hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
bb0c30e5eb1b49c0422d1d8dd358a7d8cedf6d72.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_arrays.h" template <int N> __device__ float read_surface(hipSurfaceObject_t s, int x, int y, int z); template <> __device__ float read_surface<1>(hipSurfaceObject_t s, int x, int y, int z) { return surf1Dread<float>(s, x * (int)sizeof(float), hipBoundaryModeClamp); } template <> __device__ float read_surface<2>(hipSurfaceObject_t s, int x, int y, int z) { return surf2Dread<float>(s, x * (int)sizeof(float), y, hipBoundaryModeClamp); } template <> __device__ float read_surface<3>(hipSurfaceObject_t s, int x, int y, int z) { return surf3Dread<float>(s, x * (int)sizeof(float), y, z, hipBoundaryModeClamp); } template <int N> __device__ void write_surface(float f, hipSurfaceObject_t s, int x, int y, int z); template <> __device__ void write_surface<1>(float f, hipSurfaceObject_t s, int x, int y, int z) { #if CUDART_VERSION <= 8000 surf1Dwrite(f, s, x * sizeof(float)); #else surf1Dwrite<float>(f, s, x * sizeof(float)); #endif } template <> __device__ void write_surface<2>(float f, hipSurfaceObject_t s, int x, int y, int z) { #if CUDART_VERSION <= 8000 surf2Dwrite(f, s, x * sizeof(float), y); #else surf2Dwrite<float>(f, s, x * sizeof(float), y); #endif } template <> __device__ void write_surface<3>(float f, hipSurfaceObject_t s, int x, int y, int z) { #if CUDART_VERSION <= 8000 surf3Dwrite(f, s, x * sizeof(float), y, z); #else surf3Dwrite<float>(f, s, x * sizeof(float), y, z); #endif } template <int N, typename T> __global__ void smooth(Realm::Rect<N,T> extent, int nx, int ny, int nz, float alpha, hipSurfaceObject_t surf_in, hipSurfaceObject_t surf_out) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int z = blockDim.z * blockIdx.z + threadIdx.z; if((x >= nx) || (y >= ny) || (z >= nz)) return; // 6-point stencil float f = (1 - alpha) * read_surface<N>(surf_in, x, y, z); f += alpha/6.0f * read_surface<N>(surf_in, x-1, y, z); f += alpha/6.0f * read_surface<N>(surf_in, x+1, y, z); f += alpha/6.0f * read_surface<N>(surf_in, x, y-1, z); f += alpha/6.0f * read_surface<N>(surf_in, x, y+1, z); f += alpha/6.0f * read_surface<N>(surf_in, x, y, z-1); f += alpha/6.0f * read_surface<N>(surf_in, x, y, z+1); write_surface<N>(f, surf_out, x, y, z); } void smooth_kernel(Realm::Rect<1> extent, float alpha, hipSurfaceObject_t surf_in, hipSurfaceObject_t surf_out) { size_t tx = extent.hi.x - extent.lo.x + 1; size_t bx = std::min<size_t>(1024, tx); size_t gx = 1 + ((tx - 1) / bx); dim3 gridDim(gx, 1, 1); dim3 blockDim(bx, 1, 1); hipLaunchKernelGGL(( smooth), dim3(gridDim), dim3(blockDim), 0, 0, extent, tx, 1, 1, alpha, surf_in, surf_out); } void smooth_kernel(Realm::Rect<2> extent, float alpha, hipSurfaceObject_t surf_in, hipSurfaceObject_t surf_out) { size_t tx = extent.hi.x - extent.lo.x + 1; size_t bx = std::min<size_t>(1024, tx); size_t gx = 1 + ((tx - 1) / bx); size_t ty = extent.hi.y - extent.lo.y + 1; size_t by = std::min<size_t>(1024 / bx, ty); size_t gy = 1 + ((ty - 1) / by); dim3 gridDim(gx, gy, 1); dim3 blockDim(bx, by, 1); hipLaunchKernelGGL(( smooth), dim3(gridDim), dim3(blockDim), 0, 0, extent, tx, ty, 1, alpha, surf_in, surf_out); } void smooth_kernel(Realm::Rect<3> extent, float alpha, hipSurfaceObject_t surf_in, hipSurfaceObject_t surf_out) { size_t tx = extent.hi.x - extent.lo.x + 1; size_t bx = std::min<size_t>(1024, tx); size_t gx = 1 + ((tx - 1) / bx); size_t ty = extent.hi.y - extent.lo.y + 1; size_t by = std::min<size_t>(1024 / bx, ty); size_t gy = 1 + ((ty - 1) / by); size_t tz = extent.hi.z - extent.lo.z + 1; size_t bz = std::min<size_t>(1024 / (bx * by), tz); size_t gz = 1 + ((tz - 1) / bz); dim3 gridDim(gx, gy, gz); dim3 blockDim(bx, by, bz); hipLaunchKernelGGL(( smooth), dim3(gridDim), dim3(blockDim), 0, 0, extent, tx, ty, tz, alpha, surf_in, surf_out); }
bb0c30e5eb1b49c0422d1d8dd358a7d8cedf6d72.cu
#include "cuda_arrays.h" template <int N> __device__ float read_surface(cudaSurfaceObject_t s, int x, int y, int z); template <> __device__ float read_surface<1>(cudaSurfaceObject_t s, int x, int y, int z) { return surf1Dread<float>(s, x * (int)sizeof(float), cudaBoundaryModeClamp); } template <> __device__ float read_surface<2>(cudaSurfaceObject_t s, int x, int y, int z) { return surf2Dread<float>(s, x * (int)sizeof(float), y, cudaBoundaryModeClamp); } template <> __device__ float read_surface<3>(cudaSurfaceObject_t s, int x, int y, int z) { return surf3Dread<float>(s, x * (int)sizeof(float), y, z, cudaBoundaryModeClamp); } template <int N> __device__ void write_surface(float f, cudaSurfaceObject_t s, int x, int y, int z); template <> __device__ void write_surface<1>(float f, cudaSurfaceObject_t s, int x, int y, int z) { #if CUDART_VERSION <= 8000 surf1Dwrite(f, s, x * sizeof(float)); #else surf1Dwrite<float>(f, s, x * sizeof(float)); #endif } template <> __device__ void write_surface<2>(float f, cudaSurfaceObject_t s, int x, int y, int z) { #if CUDART_VERSION <= 8000 surf2Dwrite(f, s, x * sizeof(float), y); #else surf2Dwrite<float>(f, s, x * sizeof(float), y); #endif } template <> __device__ void write_surface<3>(float f, cudaSurfaceObject_t s, int x, int y, int z) { #if CUDART_VERSION <= 8000 surf3Dwrite(f, s, x * sizeof(float), y, z); #else surf3Dwrite<float>(f, s, x * sizeof(float), y, z); #endif } template <int N, typename T> __global__ void smooth(Realm::Rect<N,T> extent, int nx, int ny, int nz, float alpha, cudaSurfaceObject_t surf_in, cudaSurfaceObject_t surf_out) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int z = blockDim.z * blockIdx.z + threadIdx.z; if((x >= nx) || (y >= ny) || (z >= nz)) return; // 6-point stencil float f = (1 - alpha) * read_surface<N>(surf_in, x, y, z); f += alpha/6.0f * read_surface<N>(surf_in, x-1, y, z); f += alpha/6.0f * read_surface<N>(surf_in, x+1, y, z); f += alpha/6.0f * read_surface<N>(surf_in, x, y-1, z); f += alpha/6.0f * read_surface<N>(surf_in, x, y+1, z); f += alpha/6.0f * read_surface<N>(surf_in, x, y, z-1); f += alpha/6.0f * read_surface<N>(surf_in, x, y, z+1); write_surface<N>(f, surf_out, x, y, z); } void smooth_kernel(Realm::Rect<1> extent, float alpha, cudaSurfaceObject_t surf_in, cudaSurfaceObject_t surf_out) { size_t tx = extent.hi.x - extent.lo.x + 1; size_t bx = std::min<size_t>(1024, tx); size_t gx = 1 + ((tx - 1) / bx); dim3 gridDim(gx, 1, 1); dim3 blockDim(bx, 1, 1); smooth<<<gridDim, blockDim>>>(extent, tx, 1, 1, alpha, surf_in, surf_out); } void smooth_kernel(Realm::Rect<2> extent, float alpha, cudaSurfaceObject_t surf_in, cudaSurfaceObject_t surf_out) { size_t tx = extent.hi.x - extent.lo.x + 1; size_t bx = std::min<size_t>(1024, tx); size_t gx = 1 + ((tx - 1) / bx); size_t ty = extent.hi.y - extent.lo.y + 1; size_t by = std::min<size_t>(1024 / bx, ty); size_t gy = 1 + ((ty - 1) / by); dim3 gridDim(gx, gy, 1); dim3 blockDim(bx, by, 1); smooth<<<gridDim, blockDim>>>(extent, tx, ty, 1, alpha, surf_in, surf_out); } void smooth_kernel(Realm::Rect<3> extent, float alpha, cudaSurfaceObject_t surf_in, cudaSurfaceObject_t surf_out) { size_t tx = extent.hi.x - extent.lo.x + 1; size_t bx = std::min<size_t>(1024, tx); size_t gx = 1 + ((tx - 1) / bx); size_t ty = extent.hi.y - extent.lo.y + 1; size_t by = std::min<size_t>(1024 / bx, ty); size_t gy = 1 + ((ty - 1) / by); size_t tz = extent.hi.z - extent.lo.z + 1; size_t bz = std::min<size_t>(1024 / (bx * by), tz); size_t gz = 1 + ((tz - 1) / bz); dim3 gridDim(gx, gy, gz); dim3 blockDim(bx, by, bz); smooth<<<gridDim, blockDim>>>(extent, tx, ty, tz, alpha, surf_in, surf_out); }
b620ba372d43c8c0c3f51eef77192b98d77390ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* ============================================================================ Name : Author : Peter Whidden Version : Copyright : Description : ============================================================================ */ static void CheckCudaErrorAux (const char *, unsigned, const char *, hipError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) /* * Device kernel that compares the provided PSF distribution to the distribution * around each pixel in the provided image */ __global__ void convolvePSF(int width, int height, int imageCount, short *image, short *results, float *psf, int psfRad, int psfDim) { // Find bounds of image const int x = blockIdx.x*32+threadIdx.x; const int y = blockIdx.y*32+threadIdx.y; const int minX = max(x-psfRad, 0); const int minY = max(y-psfRad, 0); const int maxX = min(x+psfRad, width); const int maxY = min(y+psfRad, height); const int dx = maxX-minX; const int dy = maxY-minY; if (dx < 1 || dy < 1) return; // Read Image /*__shared__*/ float convArea[13][13]; //convArea[dx][dy]; int xCorrection = x-psfRad < 0 ? 0 : psfDim-dx; int yCorrection = y-psfRad < 0 ? 0 : psfDim-dy; float sum = 0.0; for (int i=0; i<dx; ++i) { for (int j=0; j<dy; ++j) { float value = float(image[0*width*height+(minX+i)*height+minY+j]); sum += value; convArea[i][j] = value; } } float sumDifference = 0.0; for (int i=0; i<dx; ++i) { for (int j=0; j<dy; ++j) { sumDifference += abs(convArea[i][j]/sum - psf[(i+xCorrection)*psfDim+j+yCorrection] ); } } results[0*width*height+x*height+y] = int(1000.0*sumDifference);//*/convArea[psfRad][psfRad]); }
b620ba372d43c8c0c3f51eef77192b98d77390ac.cu
#include "includes.h" /* ============================================================================ Name : Author : Peter Whidden Version : Copyright : Description : ============================================================================ */ static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) /* * Device kernel that compares the provided PSF distribution to the distribution * around each pixel in the provided image */ __global__ void convolvePSF(int width, int height, int imageCount, short *image, short *results, float *psf, int psfRad, int psfDim) { // Find bounds of image const int x = blockIdx.x*32+threadIdx.x; const int y = blockIdx.y*32+threadIdx.y; const int minX = max(x-psfRad, 0); const int minY = max(y-psfRad, 0); const int maxX = min(x+psfRad, width); const int maxY = min(y+psfRad, height); const int dx = maxX-minX; const int dy = maxY-minY; if (dx < 1 || dy < 1) return; // Read Image /*__shared__*/ float convArea[13][13]; //convArea[dx][dy]; int xCorrection = x-psfRad < 0 ? 0 : psfDim-dx; int yCorrection = y-psfRad < 0 ? 0 : psfDim-dy; float sum = 0.0; for (int i=0; i<dx; ++i) { for (int j=0; j<dy; ++j) { float value = float(image[0*width*height+(minX+i)*height+minY+j]); sum += value; convArea[i][j] = value; } } float sumDifference = 0.0; for (int i=0; i<dx; ++i) { for (int j=0; j<dy; ++j) { sumDifference += abs(convArea[i][j]/sum - psf[(i+xCorrection)*psfDim+j+yCorrection] ); } } results[0*width*height+x*height+y] = int(1000.0*sumDifference);//*/convArea[psfRad][psfRad]); }
4ba6471d4ea8dc3186f4fe2d9a765162889e91c6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void __floatToInt(float *A, int *B, int N) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { B[i] = (int)(A[i]); } }
4ba6471d4ea8dc3186f4fe2d9a765162889e91c6.cu
#include "includes.h" __global__ void __floatToInt(float *A, int *B, int N) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { B[i] = (int)(A[i]); } }
76acff2ce9bfdcb5c59660d559bea09bbb748961.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // Created by raver119 on 19.01.18. // @author Yurii Shyrma ([email protected]) // #include <ops/declarable/helpers/s_t_b.h> #include <PointersManager.h> namespace nd4j { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void batchToSpaceCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint cropBottom, const uint cropLeft) { // input [bS, H * blockSize, W * blockSize, iC] // output [bS, H * blockSize - cropBottom - cropTop, W * blockSize - cropLeft - cropRight, iC] // if (cropTop = cropBottom = cropRight = cropLeft = 0) shapes are the same // else: // oH -> [cropBottom, iH - cropTop] // oW -> [cropLeft, iH - cropRight] // xLen >= zLen const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ int rank; __shared__ Nd4jLong zLen, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); rank = shape::rank(zShapeInfo); zLen = shape::length(zShapeInfo); } __syncthreads(); auto coords = sharedMem + threadIdx.x * rank; const auto i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= zLen) return; shape::index2coords(rank, zShapeInfo + 1, i, zLen, coords); const auto zOffset = shape::getOffset(0, zShapeInfo + 1, zShapeInfo + rank + 1, coords, rank); coords[1] += cropBottom; coords[2] += cropLeft; const auto xOffset = shape::getOffset(0, xShapeInfo + 1, xShapeInfo + rank + 1, coords, rank); z[zOffset] = x[xOffset]; } /////////////////////////////////////////////////////////////////// template<typename T> static void batchToSpaceCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint cropBottom, const uint cropLeft) { hipLaunchKernelGGL(( batchToSpaceCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, cropBottom, cropLeft); } BUILD_SINGLE_TEMPLATE(template void batchToSpaceCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint cropBottom, const uint cropLeft), LIBND4J_TYPES); /////////////////////////////////////////////////////////////////// void batchToSpace(nd4j::LaunchContext* context, const NDArray& input, NDArray& output, const uint cropBottom, const uint cropTop, const uint cropLeft, const uint cropRight, const uint blockSize) { // [bS*blockSize*blockSize, H/blockSize, W/blockSize, iC] is rearranged/permuted to [bS, oH, oW, iC] // oH = H - cropTop - cropBottom // oW = W - cropLeft - cropRight NDArray inputRearranged0 = input.reshape(input.ordering(), {blockSize, blockSize, output.sizeAt(0), input.sizeAt(1), input.sizeAt(2), input.sizeAt(3)}); inputRearranged0.permutei({2, 3,0, 4,1, 5}); if(input.lengthOf() == output.lengthOf()) { output.assign(inputRearranged0); } else { NDArray inputRearranged1 = inputRearranged0.reshape(input.ordering(), {output.sizeAt(0), input.sizeAt(1) * blockSize, input.sizeAt(2) * blockSize, input.sizeAt(3)}); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * output.rankOf() + 128; PointersManager manager(context, "batchToSpace"); NDArray::prepareSpecialUse({&output}, {&inputRearranged1}); BUILD_SINGLE_SELECTOR(input.dataType(), batchToSpaceCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), inputRearranged1.getSpecialBuffer(), inputRearranged1.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), cropBottom, cropLeft), LIBND4J_TYPES); NDArray::registerSpecialUse({&output}, {&inputRearranged1}); manager.synchronize(); } } /////////////////////////////////////////////////////////////////// template<typename X, typename Y> __global__ static void batchToSpaceNDCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint numOfSpatialDims) { // 4D example, numOfSpatialDims = 2 // input [bS, H * blockShape[0], W * blockShape[1], iC] // output [bS, H * blockShape[0] - cropBottom - cropTop, W * blockShape[1] - cropLeft - cropRight, iC] // if (cropTop = cropBottom = cropRight = cropLeft = 0) shapes are the same // else: // oH -> [cropBottom, iH - cropTop] // oW -> [cropLeft, iH - cropRight] // xLen >= zLen const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Y*>(vy); auto z = reinterpret_cast<X*>(vz); __shared__ int rank; __shared__ Nd4jLong zLen, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); rank = shape::rank(zShapeInfo); zLen = shape::length(zShapeInfo); } __syncthreads(); auto coords = sharedMem + threadIdx.x * rank; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < zLen; i += gridDim.x * blockDim.x) { shape::index2coords(rank, zShapeInfo + 1, i, zLen, coords); const auto zOffset = shape::getOffset(0, zShapeInfo + 1, zShapeInfo + rank + 1, coords, rank); // evaluate spatial coordinates for x for(uint j = 1; j <= numOfSpatialDims; ++j) { const auto yOffset = (j - 1) * yShapeInfo[3]; // yRank = 2, calculate offset manually coords[j] += y[yOffset]; // add crop left } const auto xOffset = shape::getOffset(0, xShapeInfo + 1, xShapeInfo + rank + 1, coords, rank); z[zOffset] = x[xOffset]; } } /////////////////////////////////////////////////////////////////// template<typename X,typename Y> static void batchToSpaceNDCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint numOfSpatialDims) { hipLaunchKernelGGL(( batchToSpaceNDCuda<X,Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, numOfSpatialDims); } BUILD_DOUBLE_TEMPLATE(template void batchToSpaceNDCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint numOfSpatialDims), LIBND4J_TYPES, INTEGER_TYPES); ////////////////////////////////////////////////////////////////////////// void batchToSpaceND(nd4j::LaunchContext* context, const NDArray& input, const NDArray& blockShape, const NDArray& crop, NDArray& output) { // 4D example, numOfSpatialDims = 2 - two spatial dimensions // [bS*blockShape[0]*blockShape[1], iH, iW, iC] is rearranged/permuted to [bS, iH*blockShape[0] - cropTop - cropBottom, iW*blockShape[1] - cropLeft - cropRight, iC] const uint rank = input.rankOf(); const uint numOfSpatialDims = blockShape.sizeAt(0); //*** construct reshaping std::vector for first reshape of input array ***// std::vector<Nd4jLong> temp(numOfSpatialDims + rank); int i; for(i = 0; i < numOfSpatialDims; ++i) temp[i] = blockShape.e<Nd4jLong>(i); temp[i++] = output.sizeAt(0); for(int j = 1; j < rank; ++i, ++j) temp[i] = input.sizeAt(j); NDArray inputRearranged0 = input.reshape(input.ordering(), temp); //*** construct permuting std::vector for permutation of input array ***// temp[0] = numOfSpatialDims; for(i = 1; i <= numOfSpatialDims; ++i) { temp[2*i - 1] = numOfSpatialDims + i; temp[2*i] = i - 1; } for(i = 2 * numOfSpatialDims + 1; i < temp.size(); ++i) temp[i] = i; inputRearranged0.permutei(temp); if(input.lengthOf() == output.lengthOf()) { output.assign(inputRearranged0); } else { //*** construct reshaping std::vector for second reshape of input array ***// temp.resize(rank); temp[0] = output.sizeAt(0); for(i = 1; i < rank; ++i) temp[i] = (i <= numOfSpatialDims) ? input.sizeAt(i) * blockShape.e<Nd4jLong>(i - 1) : input.sizeAt(i); NDArray inputRearranged1 = inputRearranged0.reshape(input.ordering(), temp); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * output.rankOf() + 128; PointersManager manager(context, "batchToSpaceND"); NDArray::prepareSpecialUse({&output}, {&inputRearranged1, &crop}); BUILD_DOUBLE_SELECTOR(input.dataType(), crop.dataType(), batchToSpaceNDCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), inputRearranged1.getSpecialBuffer(), inputRearranged1.getSpecialShapeInfo(), crop.getSpecialBuffer(), crop.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), numOfSpatialDims), LIBND4J_TYPES, INTEGER_TYPES); NDArray::registerSpecialUse({&output}, {&inputRearranged1, &crop}); manager.synchronize(); } } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void spaceToBatchCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint padBottom, const uint padTop, const uint padLeft, const uint padRight) { // input [bS, H * blockSize - padBottom - padTop, W * blockSize - padLeft - padRight, iC] // output [bs, H * blockSize, W * blockSize, iC] // if (padTop = padBottom = padRight = padLeft = 0) shapes are the same // else: // iH -> [padBottom, oH - padTop] // iW -> [padLeft, oW - padRight] // zLen > xLen const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ int rank; __shared__ Nd4jLong zLen, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); rank = shape::rank(zShapeInfo); zLen = shape::length(zShapeInfo); } __syncthreads(); auto coords = sharedMem + threadIdx.x * rank; const auto i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= zLen) return; shape::index2coords(rank, zShapeInfo + 1, i, zLen, coords); const auto zOffset = shape::getOffset(0, zShapeInfo + 1, zShapeInfo + rank + 1, coords, rank); if(coords[1] >= padBottom && coords[1] < zShapeInfo[2] - padTop && coords[2] >= padLeft && coords[2] < zShapeInfo[3] - padRight) { coords[1] -= padBottom; coords[2] -= padLeft; const auto xOffset = shape::getOffset(0, xShapeInfo + 1, xShapeInfo + rank + 1, coords, rank); z[zOffset] = x[xOffset]; } else z[zOffset] = 0.f; } /////////////////////////////////////////////////////////////////// template<typename T> static void spaceToBatchCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint padBottom, const uint padTop, const uint padLeft, const uint padRight) { hipLaunchKernelGGL(( spaceToBatchCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, padBottom, padTop, padLeft, padRight); } BUILD_SINGLE_TEMPLATE(template void spaceToBatchCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint padBottom, const uint padTop, const uint padLeft, const uint padRight), LIBND4J_TYPES); /////////////////////////////////////////////////////////////////// void spaceToBatch(nd4j::LaunchContext* context, const NDArray& input, NDArray& output, const uint padBottom, const uint padTop, const uint padLeft, const uint padRight, const uint blockSize) { // [bS, iH, iW, iC] is rearranged/permuted to [bS*blockSize*blockSize, (iH + padBottom + padTop)/blockSize, (iW + padLeft + padRight)/blockSize, iC] NDArray outputRearranged0 = output.reshape(output.ordering(), {blockSize, blockSize, input.sizeAt(0), output.sizeAt(1), output.sizeAt(2), input.sizeAt(3)}); outputRearranged0.permutei({2, 3,0, 4,1, 5}); if(input.lengthOf() == output.lengthOf()) { outputRearranged0.assign(input); } else { NDArray outputRearranged1 = outputRearranged0.reshape(output.ordering(), {input.sizeAt(0), output.sizeAt(1) * blockSize, output.sizeAt(2) * blockSize, input.sizeAt(3)}); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * output.rankOf() + 128; PointersManager manager(context, "spaceToBatch"); NDArray::prepareSpecialUse({&outputRearranged1}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), spaceToBatchCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), outputRearranged1.specialBuffer(), outputRearranged1.specialShapeInfo(), padBottom, padTop, padLeft, padRight), LIBND4J_TYPES); NDArray::registerSpecialUse({&outputRearranged1}, {&input}); manager.synchronize(); if(output.getSpecialBuffer() != outputRearranged1.getSpecialBuffer()) outputRearranged0.assign(outputRearranged1); } } /////////////////////////////////////////////////////////////////// template<typename X, typename Y> __global__ static void spaceToBatchNDCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint numOfSpatialDims) { // x - input, y - padding, z - output // 4D example // input [bS, H * blockShape[0] - padBottom - padTop, W * blockShape[1] - padLeft - padRight, iC] // output [bS, H * blockShape[0], W * blockShape[1], iC] // if (padTop = padBottom = padRight = padLeft = 0) shapes are the same // else: // iH -> [padBottom, oH - padTop] // iW -> [padLeft, oW - padRight] // zLen > xLen const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Y*>(vy); auto z = reinterpret_cast<X*>(vz); __shared__ int rank; // xRank = zRank, yRank = 2; __shared__ Nd4jLong zLen, totalThreads, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); rank = shape::rank(zShapeInfo); zLen = shape::length(zShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); auto coords = sharedMem + threadIdx.x * rank; for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < zLen; i += totalThreads) { shape::index2coords(rank, zShapeInfo + 1, i, zLen, coords); const auto zOffset = shape::getOffset(0, zShapeInfo + 1, zShapeInfo + rank + 1, coords, rank); bool within = true; for(uint j = 1; j <= numOfSpatialDims; ++j) { // yRank = 2, calculate offset manually const auto yOffset = (j - 1) * yShapeInfo[3]; const auto padLeft = y[yOffset]; const auto padRight = y[yOffset + yShapeInfo[4]]; within &= (coords[j] >= padLeft && coords[j] < shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo))[j] - padRight); if(!within) break; coords[j] -= padLeft; // get coordinates for x } if(within) z[zOffset] = x[shape::getOffset(0, xShapeInfo + 1, xShapeInfo + rank + 1, coords, rank)]; else z[zOffset] = 0.f; } } /////////////////////////////////////////////////////////////////// template<typename X, typename Y> static void spaceToBatchNDCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint numOfSpatialDims) { hipLaunchKernelGGL(( spaceToBatchNDCuda<X,Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, numOfSpatialDims); } BUILD_DOUBLE_TEMPLATE(template void spaceToBatchNDCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint numOfSpatialDims), LIBND4J_TYPES, INTEGER_TYPES); ////////////////////////////////////////////////////////////////////////// void spaceToBatchND(nd4j::LaunchContext* context, const NDArray& input, const NDArray& blockShape, const NDArray& padding, NDArray& output ) { // 4D example with two spatial dimensions // [bS, iH, iW, iC] is rearranged/permuted to [bS*blockShape[0]*blockShape[1], (iH + padBottom + padTop)/blockShape[0], (iW + padLeft + padRight)/blockShape[1], iC] const uint rank = input.rankOf(); const uint numOfSpatialDims = blockShape.sizeAt(0); //*** construct reshaping std::vector for first reshape of output array ***// std::vector<Nd4jLong> temp(numOfSpatialDims + rank); int i; for(i = 0; i < numOfSpatialDims; ++i) temp[i] = blockShape.e<Nd4jLong>(i); temp[i++] = input.sizeAt(0); for(int j = 1; j < rank; ++i, ++j) temp[i] = output.sizeAt(j); NDArray outputRearranged0 = output.reshape(output.ordering(), temp); //*** construct permuting std::vector for permutation of output array ***// temp[0] = numOfSpatialDims; for(i = 1; i <= numOfSpatialDims; ++i) { temp[2*i - 1] = numOfSpatialDims + i; temp[2*i] = i - 1; } for(i = 2 * numOfSpatialDims + 1; i < temp.size(); ++i) temp[i] = i; outputRearranged0.permutei(temp); // ****** // if(input.lengthOf() == output.lengthOf()) { outputRearranged0.assign(input); } else { //*** construct reshaping std::vector for second reshape of output array ***// temp.resize(rank); temp[0] = input.sizeAt(0); for(i = 1; i < rank; ++i) temp[i] = (i <= numOfSpatialDims) ? output.sizeAt(i) * blockShape.e<Nd4jLong>(i - 1) : output.sizeAt(i); NDArray outputRearranged1 = outputRearranged0.reshape(output.ordering(), temp); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * output.rankOf() + 128; PointersManager manager(context, "spaceToBatchND"); NDArray::prepareSpecialUse({&outputRearranged1}, {&input, &padding}); BUILD_DOUBLE_SELECTOR(input.dataType(), padding.dataType(), spaceToBatchNDCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), padding.getSpecialBuffer(), padding.getSpecialShapeInfo(), outputRearranged1.specialBuffer(), outputRearranged1.specialShapeInfo(), numOfSpatialDims), LIBND4J_TYPES, INTEGER_TYPES); NDArray::registerSpecialUse({&outputRearranged1}, {&input, &padding}); manager.synchronize(); if(output.getSpecialBuffer() != outputRearranged1.getSpecialBuffer()) outputRearranged0.assign(outputRearranged1); } } /* template <int N, bool B2S> struct SpaceToBatchHelper { template <typename T> static void run(T *ptrSpace, const Nd4jLong *space_shape, const Nd4jLong *space_strides, const Nd4jLong *block_shape, const Nd4jLong *pad_start, const Nd4jLong *block_offsets, T *ptrBatch, const Nd4jLong *batch_shape, const Nd4jLong *batch_strides) { for (int batch_pos = 0; batch_pos < batch_shape[0]; ++batch_pos) { const int space_pos = batch_pos * block_shape[0] + block_offsets[0] - pad_start[0]; if (space_pos >= 0 && space_pos < space_shape[0]) { SpaceToBatchHelper<N - 1, B2S>::run(ptrSpace + space_pos * space_strides[0], space_shape + 1, space_strides + 1, block_shape + 1, pad_start + 1, block_offsets + 1, ptrBatch, batch_shape + 1, batch_strides + 1); } else { if (!B2S) for (int i = 0; i < batch_strides[0]; i++) ptrBatch[i] = (T) 0.f; } ptrBatch += batch_strides[0]; } } }; template <bool B2S> struct SpaceToBatchHelper<0, B2S> { template <typename T> static void run(T *ptrSpace, const Nd4jLong *space_shape, const Nd4jLong *space_strides, const Nd4jLong *block_shape, const Nd4jLong *pad_start, const Nd4jLong *block_offsets, T *ptrBatch, const Nd4jLong *batch_shape, const Nd4jLong *batch_strides) { int str = batch_strides[-1]; for (int i = 0; i < str; i++) if (B2S) ptrSpace[i] = ptrBatch[i]; else ptrBatch[i] = ptrSpace[i]; } }; template <typename T, int NUM_BLOCK_DIMS, bool B2S> void _execute(nd4j::LaunchContext * context, void *vptrSpace, const Nd4jLong *space_shape, const Nd4jLong *space_strides, const Nd4jLong *block_shape, const Nd4jLong *pad_start, const Nd4jLong *block_offsets, void *vptrBatch, const Nd4jLong *batch_shape, const Nd4jLong *batch_strides) { auto ptrSpace = reinterpret_cast<T *>(vptrSpace); auto ptrBatch = reinterpret_cast<T *>(vptrBatch); SpaceToBatchHelper<NUM_BLOCK_DIMS, B2S>::run(ptrSpace, space_shape, space_strides, block_shape, pad_start, block_offsets, ptrBatch, batch_shape, batch_strides); }; Nd4jStatus _batchToSpace(nd4j::LaunchContext * context, int internal_block_dims, NDArray *input, NDArray *output, std::vector<Nd4jLong> &internal_input_shape, std::vector<Nd4jLong> &internal_output_shape, Nd4jLong *block_shape, Nd4jLong *crops) { return Status::OK(); } #define STB_DIM (0, 1),\ (1, 2),\ (2, 3),\ (3, 4) #define STB_BOOL (0, false),\ (1, true) BUILD_TRIPLE_TEMPLATE(template void _execute, (nd4j::LaunchContext * context, void *ptrSpace, const Nd4jLong *space_shape, const Nd4jLong *space_strides, const Nd4jLong *block_shape, const Nd4jLong *pad_start, const Nd4jLong *block_offsets, void *ptrBatch, const Nd4jLong *batch_shape, const Nd4jLong *batch_strides), LIBND4J_TYPES, STB_DIM, STB_BOOL); #undef STB_BOOL #undef STB_DIM */ } } }
76acff2ce9bfdcb5c59660d559bea09bbb748961.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // Created by raver119 on 19.01.18. // @author Yurii Shyrma ([email protected]) // #include <ops/declarable/helpers/s_t_b.h> #include <PointersManager.h> namespace nd4j { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void batchToSpaceCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint cropBottom, const uint cropLeft) { // input [bS, H * blockSize, W * blockSize, iC] // output [bS, H * blockSize - cropBottom - cropTop, W * blockSize - cropLeft - cropRight, iC] // if (cropTop = cropBottom = cropRight = cropLeft = 0) shapes are the same // else: // oH -> [cropBottom, iH - cropTop] // oW -> [cropLeft, iH - cropRight] // xLen >= zLen const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ int rank; __shared__ Nd4jLong zLen, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); rank = shape::rank(zShapeInfo); zLen = shape::length(zShapeInfo); } __syncthreads(); auto coords = sharedMem + threadIdx.x * rank; const auto i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= zLen) return; shape::index2coords(rank, zShapeInfo + 1, i, zLen, coords); const auto zOffset = shape::getOffset(0, zShapeInfo + 1, zShapeInfo + rank + 1, coords, rank); coords[1] += cropBottom; coords[2] += cropLeft; const auto xOffset = shape::getOffset(0, xShapeInfo + 1, xShapeInfo + rank + 1, coords, rank); z[zOffset] = x[xOffset]; } /////////////////////////////////////////////////////////////////// template<typename T> static void batchToSpaceCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint cropBottom, const uint cropLeft) { batchToSpaceCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, cropBottom, cropLeft); } BUILD_SINGLE_TEMPLATE(template void batchToSpaceCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint cropBottom, const uint cropLeft), LIBND4J_TYPES); /////////////////////////////////////////////////////////////////// void batchToSpace(nd4j::LaunchContext* context, const NDArray& input, NDArray& output, const uint cropBottom, const uint cropTop, const uint cropLeft, const uint cropRight, const uint blockSize) { // [bS*blockSize*blockSize, H/blockSize, W/blockSize, iC] is rearranged/permuted to [bS, oH, oW, iC] // oH = H - cropTop - cropBottom // oW = W - cropLeft - cropRight NDArray inputRearranged0 = input.reshape(input.ordering(), {blockSize, blockSize, output.sizeAt(0), input.sizeAt(1), input.sizeAt(2), input.sizeAt(3)}); inputRearranged0.permutei({2, 3,0, 4,1, 5}); if(input.lengthOf() == output.lengthOf()) { output.assign(inputRearranged0); } else { NDArray inputRearranged1 = inputRearranged0.reshape(input.ordering(), {output.sizeAt(0), input.sizeAt(1) * blockSize, input.sizeAt(2) * blockSize, input.sizeAt(3)}); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * output.rankOf() + 128; PointersManager manager(context, "batchToSpace"); NDArray::prepareSpecialUse({&output}, {&inputRearranged1}); BUILD_SINGLE_SELECTOR(input.dataType(), batchToSpaceCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), inputRearranged1.getSpecialBuffer(), inputRearranged1.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), cropBottom, cropLeft), LIBND4J_TYPES); NDArray::registerSpecialUse({&output}, {&inputRearranged1}); manager.synchronize(); } } /////////////////////////////////////////////////////////////////// template<typename X, typename Y> __global__ static void batchToSpaceNDCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint numOfSpatialDims) { // 4D example, numOfSpatialDims = 2 // input [bS, H * blockShape[0], W * blockShape[1], iC] // output [bS, H * blockShape[0] - cropBottom - cropTop, W * blockShape[1] - cropLeft - cropRight, iC] // if (cropTop = cropBottom = cropRight = cropLeft = 0) shapes are the same // else: // oH -> [cropBottom, iH - cropTop] // oW -> [cropLeft, iH - cropRight] // xLen >= zLen const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Y*>(vy); auto z = reinterpret_cast<X*>(vz); __shared__ int rank; __shared__ Nd4jLong zLen, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); rank = shape::rank(zShapeInfo); zLen = shape::length(zShapeInfo); } __syncthreads(); auto coords = sharedMem + threadIdx.x * rank; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < zLen; i += gridDim.x * blockDim.x) { shape::index2coords(rank, zShapeInfo + 1, i, zLen, coords); const auto zOffset = shape::getOffset(0, zShapeInfo + 1, zShapeInfo + rank + 1, coords, rank); // evaluate spatial coordinates for x for(uint j = 1; j <= numOfSpatialDims; ++j) { const auto yOffset = (j - 1) * yShapeInfo[3]; // yRank = 2, calculate offset manually coords[j] += y[yOffset]; // add crop left } const auto xOffset = shape::getOffset(0, xShapeInfo + 1, xShapeInfo + rank + 1, coords, rank); z[zOffset] = x[xOffset]; } } /////////////////////////////////////////////////////////////////// template<typename X,typename Y> static void batchToSpaceNDCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint numOfSpatialDims) { batchToSpaceNDCuda<X,Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, numOfSpatialDims); } BUILD_DOUBLE_TEMPLATE(template void batchToSpaceNDCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint numOfSpatialDims), LIBND4J_TYPES, INTEGER_TYPES); ////////////////////////////////////////////////////////////////////////// void batchToSpaceND(nd4j::LaunchContext* context, const NDArray& input, const NDArray& blockShape, const NDArray& crop, NDArray& output) { // 4D example, numOfSpatialDims = 2 - two spatial dimensions // [bS*blockShape[0]*blockShape[1], iH, iW, iC] is rearranged/permuted to [bS, iH*blockShape[0] - cropTop - cropBottom, iW*blockShape[1] - cropLeft - cropRight, iC] const uint rank = input.rankOf(); const uint numOfSpatialDims = blockShape.sizeAt(0); //*** construct reshaping std::vector for first reshape of input array ***// std::vector<Nd4jLong> temp(numOfSpatialDims + rank); int i; for(i = 0; i < numOfSpatialDims; ++i) temp[i] = blockShape.e<Nd4jLong>(i); temp[i++] = output.sizeAt(0); for(int j = 1; j < rank; ++i, ++j) temp[i] = input.sizeAt(j); NDArray inputRearranged0 = input.reshape(input.ordering(), temp); //*** construct permuting std::vector for permutation of input array ***// temp[0] = numOfSpatialDims; for(i = 1; i <= numOfSpatialDims; ++i) { temp[2*i - 1] = numOfSpatialDims + i; temp[2*i] = i - 1; } for(i = 2 * numOfSpatialDims + 1; i < temp.size(); ++i) temp[i] = i; inputRearranged0.permutei(temp); if(input.lengthOf() == output.lengthOf()) { output.assign(inputRearranged0); } else { //*** construct reshaping std::vector for second reshape of input array ***// temp.resize(rank); temp[0] = output.sizeAt(0); for(i = 1; i < rank; ++i) temp[i] = (i <= numOfSpatialDims) ? input.sizeAt(i) * blockShape.e<Nd4jLong>(i - 1) : input.sizeAt(i); NDArray inputRearranged1 = inputRearranged0.reshape(input.ordering(), temp); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * output.rankOf() + 128; PointersManager manager(context, "batchToSpaceND"); NDArray::prepareSpecialUse({&output}, {&inputRearranged1, &crop}); BUILD_DOUBLE_SELECTOR(input.dataType(), crop.dataType(), batchToSpaceNDCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), inputRearranged1.getSpecialBuffer(), inputRearranged1.getSpecialShapeInfo(), crop.getSpecialBuffer(), crop.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), numOfSpatialDims), LIBND4J_TYPES, INTEGER_TYPES); NDArray::registerSpecialUse({&output}, {&inputRearranged1, &crop}); manager.synchronize(); } } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void spaceToBatchCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint padBottom, const uint padTop, const uint padLeft, const uint padRight) { // input [bS, H * blockSize - padBottom - padTop, W * blockSize - padLeft - padRight, iC] // output [bs, H * blockSize, W * blockSize, iC] // if (padTop = padBottom = padRight = padLeft = 0) shapes are the same // else: // iH -> [padBottom, oH - padTop] // iW -> [padLeft, oW - padRight] // zLen > xLen const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ int rank; __shared__ Nd4jLong zLen, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); rank = shape::rank(zShapeInfo); zLen = shape::length(zShapeInfo); } __syncthreads(); auto coords = sharedMem + threadIdx.x * rank; const auto i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= zLen) return; shape::index2coords(rank, zShapeInfo + 1, i, zLen, coords); const auto zOffset = shape::getOffset(0, zShapeInfo + 1, zShapeInfo + rank + 1, coords, rank); if(coords[1] >= padBottom && coords[1] < zShapeInfo[2] - padTop && coords[2] >= padLeft && coords[2] < zShapeInfo[3] - padRight) { coords[1] -= padBottom; coords[2] -= padLeft; const auto xOffset = shape::getOffset(0, xShapeInfo + 1, xShapeInfo + rank + 1, coords, rank); z[zOffset] = x[xOffset]; } else z[zOffset] = 0.f; } /////////////////////////////////////////////////////////////////// template<typename T> static void spaceToBatchCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint padBottom, const uint padTop, const uint padLeft, const uint padRight) { spaceToBatchCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, padBottom, padTop, padLeft, padRight); } BUILD_SINGLE_TEMPLATE(template void spaceToBatchCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint padBottom, const uint padTop, const uint padLeft, const uint padRight), LIBND4J_TYPES); /////////////////////////////////////////////////////////////////// void spaceToBatch(nd4j::LaunchContext* context, const NDArray& input, NDArray& output, const uint padBottom, const uint padTop, const uint padLeft, const uint padRight, const uint blockSize) { // [bS, iH, iW, iC] is rearranged/permuted to [bS*blockSize*blockSize, (iH + padBottom + padTop)/blockSize, (iW + padLeft + padRight)/blockSize, iC] NDArray outputRearranged0 = output.reshape(output.ordering(), {blockSize, blockSize, input.sizeAt(0), output.sizeAt(1), output.sizeAt(2), input.sizeAt(3)}); outputRearranged0.permutei({2, 3,0, 4,1, 5}); if(input.lengthOf() == output.lengthOf()) { outputRearranged0.assign(input); } else { NDArray outputRearranged1 = outputRearranged0.reshape(output.ordering(), {input.sizeAt(0), output.sizeAt(1) * blockSize, output.sizeAt(2) * blockSize, input.sizeAt(3)}); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * output.rankOf() + 128; PointersManager manager(context, "spaceToBatch"); NDArray::prepareSpecialUse({&outputRearranged1}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), spaceToBatchCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), outputRearranged1.specialBuffer(), outputRearranged1.specialShapeInfo(), padBottom, padTop, padLeft, padRight), LIBND4J_TYPES); NDArray::registerSpecialUse({&outputRearranged1}, {&input}); manager.synchronize(); if(output.getSpecialBuffer() != outputRearranged1.getSpecialBuffer()) outputRearranged0.assign(outputRearranged1); } } /////////////////////////////////////////////////////////////////// template<typename X, typename Y> __global__ static void spaceToBatchNDCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint numOfSpatialDims) { // x - input, y - padding, z - output // 4D example // input [bS, H * blockShape[0] - padBottom - padTop, W * blockShape[1] - padLeft - padRight, iC] // output [bS, H * blockShape[0], W * blockShape[1], iC] // if (padTop = padBottom = padRight = padLeft = 0) shapes are the same // else: // iH -> [padBottom, oH - padTop] // iW -> [padLeft, oW - padRight] // zLen > xLen const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Y*>(vy); auto z = reinterpret_cast<X*>(vz); __shared__ int rank; // xRank = zRank, yRank = 2; __shared__ Nd4jLong zLen, totalThreads, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); rank = shape::rank(zShapeInfo); zLen = shape::length(zShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); auto coords = sharedMem + threadIdx.x * rank; for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < zLen; i += totalThreads) { shape::index2coords(rank, zShapeInfo + 1, i, zLen, coords); const auto zOffset = shape::getOffset(0, zShapeInfo + 1, zShapeInfo + rank + 1, coords, rank); bool within = true; for(uint j = 1; j <= numOfSpatialDims; ++j) { // yRank = 2, calculate offset manually const auto yOffset = (j - 1) * yShapeInfo[3]; const auto padLeft = y[yOffset]; const auto padRight = y[yOffset + yShapeInfo[4]]; within &= (coords[j] >= padLeft && coords[j] < shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo))[j] - padRight); if(!within) break; coords[j] -= padLeft; // get coordinates for x } if(within) z[zOffset] = x[shape::getOffset(0, xShapeInfo + 1, xShapeInfo + rank + 1, coords, rank)]; else z[zOffset] = 0.f; } } /////////////////////////////////////////////////////////////////// template<typename X, typename Y> static void spaceToBatchNDCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint numOfSpatialDims) { spaceToBatchNDCuda<X,Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, numOfSpatialDims); } BUILD_DOUBLE_TEMPLATE(template void spaceToBatchNDCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint numOfSpatialDims), LIBND4J_TYPES, INTEGER_TYPES); ////////////////////////////////////////////////////////////////////////// void spaceToBatchND(nd4j::LaunchContext* context, const NDArray& input, const NDArray& blockShape, const NDArray& padding, NDArray& output ) { // 4D example with two spatial dimensions // [bS, iH, iW, iC] is rearranged/permuted to [bS*blockShape[0]*blockShape[1], (iH + padBottom + padTop)/blockShape[0], (iW + padLeft + padRight)/blockShape[1], iC] const uint rank = input.rankOf(); const uint numOfSpatialDims = blockShape.sizeAt(0); //*** construct reshaping std::vector for first reshape of output array ***// std::vector<Nd4jLong> temp(numOfSpatialDims + rank); int i; for(i = 0; i < numOfSpatialDims; ++i) temp[i] = blockShape.e<Nd4jLong>(i); temp[i++] = input.sizeAt(0); for(int j = 1; j < rank; ++i, ++j) temp[i] = output.sizeAt(j); NDArray outputRearranged0 = output.reshape(output.ordering(), temp); //*** construct permuting std::vector for permutation of output array ***// temp[0] = numOfSpatialDims; for(i = 1; i <= numOfSpatialDims; ++i) { temp[2*i - 1] = numOfSpatialDims + i; temp[2*i] = i - 1; } for(i = 2 * numOfSpatialDims + 1; i < temp.size(); ++i) temp[i] = i; outputRearranged0.permutei(temp); // ****** // if(input.lengthOf() == output.lengthOf()) { outputRearranged0.assign(input); } else { //*** construct reshaping std::vector for second reshape of output array ***// temp.resize(rank); temp[0] = input.sizeAt(0); for(i = 1; i < rank; ++i) temp[i] = (i <= numOfSpatialDims) ? output.sizeAt(i) * blockShape.e<Nd4jLong>(i - 1) : output.sizeAt(i); NDArray outputRearranged1 = outputRearranged0.reshape(output.ordering(), temp); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * output.rankOf() + 128; PointersManager manager(context, "spaceToBatchND"); NDArray::prepareSpecialUse({&outputRearranged1}, {&input, &padding}); BUILD_DOUBLE_SELECTOR(input.dataType(), padding.dataType(), spaceToBatchNDCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), padding.getSpecialBuffer(), padding.getSpecialShapeInfo(), outputRearranged1.specialBuffer(), outputRearranged1.specialShapeInfo(), numOfSpatialDims), LIBND4J_TYPES, INTEGER_TYPES); NDArray::registerSpecialUse({&outputRearranged1}, {&input, &padding}); manager.synchronize(); if(output.getSpecialBuffer() != outputRearranged1.getSpecialBuffer()) outputRearranged0.assign(outputRearranged1); } } /* template <int N, bool B2S> struct SpaceToBatchHelper { template <typename T> static void run(T *ptrSpace, const Nd4jLong *space_shape, const Nd4jLong *space_strides, const Nd4jLong *block_shape, const Nd4jLong *pad_start, const Nd4jLong *block_offsets, T *ptrBatch, const Nd4jLong *batch_shape, const Nd4jLong *batch_strides) { for (int batch_pos = 0; batch_pos < batch_shape[0]; ++batch_pos) { const int space_pos = batch_pos * block_shape[0] + block_offsets[0] - pad_start[0]; if (space_pos >= 0 && space_pos < space_shape[0]) { SpaceToBatchHelper<N - 1, B2S>::run(ptrSpace + space_pos * space_strides[0], space_shape + 1, space_strides + 1, block_shape + 1, pad_start + 1, block_offsets + 1, ptrBatch, batch_shape + 1, batch_strides + 1); } else { if (!B2S) for (int i = 0; i < batch_strides[0]; i++) ptrBatch[i] = (T) 0.f; } ptrBatch += batch_strides[0]; } } }; template <bool B2S> struct SpaceToBatchHelper<0, B2S> { template <typename T> static void run(T *ptrSpace, const Nd4jLong *space_shape, const Nd4jLong *space_strides, const Nd4jLong *block_shape, const Nd4jLong *pad_start, const Nd4jLong *block_offsets, T *ptrBatch, const Nd4jLong *batch_shape, const Nd4jLong *batch_strides) { int str = batch_strides[-1]; for (int i = 0; i < str; i++) if (B2S) ptrSpace[i] = ptrBatch[i]; else ptrBatch[i] = ptrSpace[i]; } }; template <typename T, int NUM_BLOCK_DIMS, bool B2S> void _execute(nd4j::LaunchContext * context, void *vptrSpace, const Nd4jLong *space_shape, const Nd4jLong *space_strides, const Nd4jLong *block_shape, const Nd4jLong *pad_start, const Nd4jLong *block_offsets, void *vptrBatch, const Nd4jLong *batch_shape, const Nd4jLong *batch_strides) { auto ptrSpace = reinterpret_cast<T *>(vptrSpace); auto ptrBatch = reinterpret_cast<T *>(vptrBatch); SpaceToBatchHelper<NUM_BLOCK_DIMS, B2S>::run(ptrSpace, space_shape, space_strides, block_shape, pad_start, block_offsets, ptrBatch, batch_shape, batch_strides); }; Nd4jStatus _batchToSpace(nd4j::LaunchContext * context, int internal_block_dims, NDArray *input, NDArray *output, std::vector<Nd4jLong> &internal_input_shape, std::vector<Nd4jLong> &internal_output_shape, Nd4jLong *block_shape, Nd4jLong *crops) { return Status::OK(); } #define STB_DIM (0, 1),\ (1, 2),\ (2, 3),\ (3, 4) #define STB_BOOL (0, false),\ (1, true) BUILD_TRIPLE_TEMPLATE(template void _execute, (nd4j::LaunchContext * context, void *ptrSpace, const Nd4jLong *space_shape, const Nd4jLong *space_strides, const Nd4jLong *block_shape, const Nd4jLong *pad_start, const Nd4jLong *block_offsets, void *ptrBatch, const Nd4jLong *batch_shape, const Nd4jLong *batch_strides), LIBND4J_TYPES, STB_DIM, STB_BOOL); #undef STB_BOOL #undef STB_DIM */ } } }
737292eab9613e9d6d076883314f6fa24712787b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> __global__ void VecAdd(int n, const float *A, const float *B, float* C) { /******************************************************************** * * Compute C = A + B * where A is a (1 * n) vector * where B is a (1 * n) vector * where C is a (1 * n) vector * ********************************************************************/ // INSERT KERNEL CODE HERE int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { C[i] = A[i] + B[i]; } } void basicVecAdd( float *A, float *B, float *C, int n) { // Initialize thread block and kernel grid dimensions --------------------- const unsigned int BLOCK_SIZE = 256; dim3 DimGrid(ceil(n/256.0),1,1); dim3 DimBlock(256.0, 1, 1); hipLaunchKernelGGL(( VecAdd), dim3(DimGrid),dim3(DimBlock), 0, 0, n, A, B, C); //INSERT CODE HERE }
737292eab9613e9d6d076883314f6fa24712787b.cu
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> __global__ void VecAdd(int n, const float *A, const float *B, float* C) { /******************************************************************** * * Compute C = A + B * where A is a (1 * n) vector * where B is a (1 * n) vector * where C is a (1 * n) vector * ********************************************************************/ // INSERT KERNEL CODE HERE int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { C[i] = A[i] + B[i]; } } void basicVecAdd( float *A, float *B, float *C, int n) { // Initialize thread block and kernel grid dimensions --------------------- const unsigned int BLOCK_SIZE = 256; dim3 DimGrid(ceil(n/256.0),1,1); dim3 DimBlock(256.0, 1, 1); VecAdd<<<DimGrid,DimBlock>>>(n, A, B, C); //INSERT CODE HERE }
56561fef939094802d54182450ce6fa2d4cf39f4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kShuffleColumns.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *source = NULL; hipMalloc(&source, XSIZE*YSIZE); float *target = NULL; hipMalloc(&target, XSIZE*YSIZE); float *indices = NULL; hipMalloc(&indices, XSIZE*YSIZE); int width = XSIZE; int height = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kShuffleColumns), dim3(gridBlock),dim3(threadBlock), 0, 0, source,target,indices,width,height); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kShuffleColumns), dim3(gridBlock),dim3(threadBlock), 0, 0, source,target,indices,width,height); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kShuffleColumns), dim3(gridBlock),dim3(threadBlock), 0, 0, source,target,indices,width,height); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
56561fef939094802d54182450ce6fa2d4cf39f4.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kShuffleColumns.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *source = NULL; cudaMalloc(&source, XSIZE*YSIZE); float *target = NULL; cudaMalloc(&target, XSIZE*YSIZE); float *indices = NULL; cudaMalloc(&indices, XSIZE*YSIZE); int width = XSIZE; int height = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kShuffleColumns<<<gridBlock,threadBlock>>>(source,target,indices,width,height); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kShuffleColumns<<<gridBlock,threadBlock>>>(source,target,indices,width,height); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kShuffleColumns<<<gridBlock,threadBlock>>>(source,target,indices,width,height); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a1c0122fabef28c2d2e81ab3740cc41d44bc48aa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // #include <stdio.h> // #include <hiprand/hiprand.h> // #include <hiprand/hiprand_kernel.h> // #include <hipfft.h> // #include <iostream> // using namespace std; // // #define ELECTRON_MASS 9.10938356e-31 // #define PROTON_MASS 1.6726219e-27 // #define ELECTRON_CHARGE 1 // // NOTE: setting electron charge to the default SI 1.6e-19 value breaks interpolation // #define EPSILON_ZERO 8.854e-12 // // //TODO: THIS HERE TIMESTEP I AM NOT COMPLETELY CERTAIN ABOUT // #define NT 1000 // #define N_grid 16 // #define N_particles_1_axis 64 // #define N_particles (N_particles_1_axis*N_particles_1_axis*N_particles_1_axis) // #define L 1e-4 // #define dt 1e-25 // #define N_grid_all (N_grid *N_grid * N_grid) // #define dx (L/float(N_grid)) // #define dy dx // #define dz dx // // // dim3 particleThreads(512); // dim3 particleBlocks((N_particles+particleThreads.x - 1)/particleThreads.x); // dim3 gridThreads(8,8,8); dim3 gridBlocks((N_grid+gridThreads.x-1)/gridThreads.x, (N_grid + gridThreads.y - 1)/gridThreads.y, (N_grid+gridThreads.z-1)/gridThreads.z); static void CUDA_ERROR( hipError_t err){ if (err != hipSuccess) { printf("CUDA ERROR: %s, exiting\n", hipGetErrorString(err)); exit(-1); } } struct Grid{ float *rho; float *Ex; float *Ey; float *Ez; float *d_rho; float *d_Ex; float *d_Ey; float *d_Ez; //fourier transformed versions of grid quantities, for fields solver hipfftComplex *d_fourier_rho; hipfftComplex *d_fourier_Ex; hipfftComplex *d_fourier_Ey; hipfftComplex *d_fourier_Ez; //instructions for cuFFT hipfftHandle plan_forward; hipfftHandle plan_backward; //the wave vector, for the field solver float *kv; float *d_kv; }; struct Particle{ //keeps information about the position of one particle in (6D) phase space (positions, velocities) float x; float y; float z; float vx; float vy; float vz; }; struct Species{ //keeps information about one distinct group of particles float m; //mass float q; //charge //number of particles in group: not fully used yet long int N; Particle *particles; Particle *d_particles; }; __global__ void solve_poisson(float *d_kv, hipfftComplex *d_fourier_rho, hipfftComplex *d_fourier_Ex, hipfftComplex *d_fourier_Ey, hipfftComplex *d_fourier_Ez){ /*solve poisson equation d_kv: wave vector d_fourier_rho: complex array of fourier transformed charge densities d_fourier_E(i): */ int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int k = blockIdx.z*blockDim.z + threadIdx.z; int index = k*N_grid*N_grid + j*N_grid + i; if(i<N_grid && j<N_grid && k<N_grid){ //wave vector magnitude squared float k2 = d_kv[i]*d_kv[i] + d_kv[j]*d_kv[j] + d_kv[k]*d_kv[k]; if (i==0 && j==0 && k ==0) { k2 = 1.0f; //dodge a bullet with a division by zero } //see: Birdsall Langdon, Plasma Physics via Computer Simulation, page 19 d_fourier_Ex[index].x = -d_kv[i]*d_fourier_rho[index].x/k2/EPSILON_ZERO; d_fourier_Ex[index].y = -d_kv[i]*d_fourier_rho[index].y/k2/EPSILON_ZERO; d_fourier_Ey[index].x = -d_kv[j]*d_fourier_rho[index].x/k2/EPSILON_ZERO; d_fourier_Ey[index].y = -d_kv[j]*d_fourier_rho[index].y/k2/EPSILON_ZERO; d_fourier_Ez[index].x = -d_kv[k]*d_fourier_rho[index].x/k2/EPSILON_ZERO; d_fourier_Ez[index].y = -d_kv[k]*d_fourier_rho[index].y/k2/EPSILON_ZERO; } } __global__ void real2complex(float *input, hipfftComplex *output){ //converts array of floats to array of real complex numbers int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int k = blockIdx.z*blockDim.z + threadIdx.z; int index = k*N_grid*N_grid + j*N_grid + i; if(i<N_grid && j<N_grid && k<N_grid) { output[index].x = input[index]; output[index].y = 0.0f; } } __global__ void complex2real(hipfftComplex *input, float *output){ //converts array of complex inputs to floats (discards) int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int k = blockIdx.z*blockDim.z + threadIdx.z; int index = k*N_grid*N_grid + j*N_grid + i; if(i<N_grid && j<N_grid && k<N_grid){ output[index] = input[index].x/float(N_grid_all); } } __global__ void scale_down_after_fft(float *d_Ex, float *d_Ey, float *d_Ez){ int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int k = blockIdx.z*blockDim.z + threadIdx.z; int index = k*N_grid*N_grid + j*N_grid + i; if(i<N_grid && j<N_grid && k<N_grid){ d_Ex[index] /= float(N_grid_all); d_Ey[index] /= float(N_grid_all); d_Ez[index] /= float(N_grid_all); } } __global__ void set_grid_array_to_value(float *arr, float value){ int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int k = blockIdx.z*blockDim.z + threadIdx.z; int index = k*N_grid*N_grid + j*N_grid + i; if((i<N_grid) && (j<N_grid) && (k<N_grid)){ arr[index] = value; } } void init_grid(Grid *g){ g->rho = new float[N_grid_all]; g->Ex = new float[N_grid_all]; g->Ey = new float[N_grid_all]; g->Ez = new float[N_grid_all]; g->kv = new float[N_grid]; for (int i =0; i<=N_grid/2; i++) { g->kv[i] = i*2*M_PI; } for (int i = N_grid/2 + 1; i < N_grid; i++) { g->kv[i] = (i-N_grid)*2*M_PI; } CUDA_ERROR(hipMalloc((void**)&(g->d_kv), sizeof(float)*N_grid)); CUDA_ERROR(hipMemcpy(g->d_kv, g->kv, sizeof(float)*N_grid, hipMemcpyHostToDevice)); CUDA_ERROR(hipMalloc((void**)&(g->d_fourier_rho), sizeof(hipfftComplex)*N_grid_all)); CUDA_ERROR(hipMalloc((void**)&(g->d_fourier_Ex), sizeof(hipfftComplex)*N_grid_all)); CUDA_ERROR(hipMalloc((void**)&(g->d_fourier_Ey), sizeof(hipfftComplex)*N_grid_all)); CUDA_ERROR(hipMalloc((void**)&(g->d_fourier_Ez), sizeof(hipfftComplex)*N_grid_all)); CUDA_ERROR(hipMalloc((void**)&(g->d_rho), sizeof(float)*N_grid_all)); CUDA_ERROR(hipMemcpy(g->d_rho, g->rho, sizeof(float)*N_grid_all, hipMemcpyHostToDevice)); CUDA_ERROR(hipMalloc((void**)&(g->d_Ex), sizeof(float)*N_grid_all)); CUDA_ERROR(hipMemcpy(g->d_Ex, g->Ex, sizeof(float)*N_grid_all, hipMemcpyHostToDevice)); CUDA_ERROR(hipMalloc((void**)&(g->d_Ey), sizeof(float)*N_grid_all)); CUDA_ERROR(hipMemcpy(g->d_Ey, g->Ey, sizeof(float)*N_grid_all, hipMemcpyHostToDevice)); CUDA_ERROR(hipMalloc((void**)&(g->d_Ez), sizeof(float)*N_grid_all)); CUDA_ERROR(hipMemcpy(g->d_Ez, g->Ez, sizeof(float)*N_grid_all, hipMemcpyHostToDevice)); hipfftPlan3d(&(g->plan_forward), N_grid, N_grid, N_grid, HIPFFT_R2C); hipfftPlan3d(&(g->plan_backward), N_grid, N_grid, N_grid, HIPFFT_C2R); } void debug_field_solver_uniform(Grid *g){ float* linear_field_x = new float[N_grid_all]; float* linear_field_y = new float[N_grid_all]; float* linear_field_z = new float[N_grid_all]; for(int i = 0; i<N_grid; i++){ for(int j = 0; j<N_grid; j++){ for(int k = 0; k<N_grid; k++){ int index = i*N_grid*N_grid + j*N_grid + k; linear_field_x[index] = 1000; linear_field_y[index] = 0; linear_field_z[index] = 0; // printf("%d %f %f %f\n", index, linear_field_x[index], linear_field_y[index],linear_field_z[index]); } } } // cout << "if happy and know it clap your hands" << endl; hipMemcpy(g->d_Ex, linear_field_x, sizeof(float)*N_grid_all, hipMemcpyHostToDevice); hipMemcpy(g->d_Ey, linear_field_y, sizeof(float)*N_grid_all, hipMemcpyHostToDevice); hipMemcpy(g->d_Ez, linear_field_z, sizeof(float)*N_grid_all, hipMemcpyHostToDevice); } void debug_field_solver_sine(Grid *g) { float* linear_field_x = new float[N_grid_all]; float* linear_field_y = new float[N_grid_all]; float* linear_field_z = new float[N_grid_all]; for(int i = 0; i<N_grid; i++){ for(int j = 0; j<N_grid; j++){ for(int k = 0; k<N_grid; k++){ int index = i*N_grid*N_grid + j*N_grid + k; linear_field_x[index] = 1000*sin(2*M_PI*((float)k/(float)N_grid)); linear_field_y[index] = 1000*sin(2*M_PI*((float)j/(float)N_grid)); linear_field_z[index] = 1000*sin(2*M_PI*((float)i/(float)N_grid)); } } } hipMemcpy(g->d_Ex, linear_field_x, sizeof(float)*N_grid_all, hipMemcpyHostToDevice); hipMemcpy(g->d_Ey, linear_field_y, sizeof(float)*N_grid_all, hipMemcpyHostToDevice); hipMemcpy(g->d_Ez, linear_field_z, sizeof(float)*N_grid_all, hipMemcpyHostToDevice); } // void debug_field_solver_quadratic(Grid *g) // { // float* linear_field_x = new float[N_grid_all]; // float* linear_field_y = new float[N_grid_all]; // float* linear_field_z = new float[N_grid_all]; // for(int i = 0; i<N_grid; i++){ // for(int j = 0; j<N_grid; j++){ // for(int k = 0; k<N_grid; k++){ // int index = i*N_grid*N_grid + j*N_grid + k; // linear_field_x[index] = (dx*i)*(dx*i); // linear_field_y[index] = (dx*j)*(dx*j); // linear_field_z[index] = (dx*k)*(dx*k); // } // } // } // hipMemcpy(g->d_Ex, linear_field_x, sizeof(float)*N_grid_all, hipMemcpyHostToDevice); // hipMemcpy(g->d_Ey, linear_field_y, sizeof(float)*N_grid_all, hipMemcpyHostToDevice); // hipMemcpy(g->d_Ez, linear_field_z, sizeof(float)*N_grid_all, hipMemcpyHostToDevice); // } void field_solver(Grid *g){ hipfftExecR2C(g->plan_forward, g->d_rho, g->d_fourier_rho); CUDA_ERROR(hipDeviceSynchronize()); hipLaunchKernelGGL(( solve_poisson), dim3(gridBlocks), dim3(gridThreads), 0, 0, g->d_kv, g->d_fourier_rho, g->d_fourier_Ex, g->d_fourier_Ey, g->d_fourier_Ez); CUDA_ERROR(hipDeviceSynchronize()); hipfftExecC2R(g->plan_backward, g->d_fourier_Ex, g->d_Ex); hipfftExecC2R(g->plan_backward, g->d_fourier_Ey, g->d_Ey); hipfftExecC2R(g->plan_backward, g->d_fourier_Ez, g->d_Ez); hipLaunchKernelGGL(( scale_down_after_fft), dim3(gridBlocks), dim3(gridThreads), 0, 0, g->d_Ex, g->d_Ey, g->d_Ez); CUDA_ERROR(hipDeviceSynchronize()); } __device__ int position_to_grid_index(float X){ return int(X/dx); } __device__ float position_in_cell(float x){ int grid_index = position_to_grid_index(x); return x - grid_index*dx; } __global__ void scatter_charge(Particle *d_P, float q, float* d_rho){ int n = blockIdx.x*blockDim.x + threadIdx.x; float x = d_P[n].x; float y = d_P[n].y; float z = d_P[n].z; int i = position_to_grid_index(x); int j = position_to_grid_index(y); int k = position_to_grid_index(z); float Xr = position_in_cell(x)/dx; float Xl = 1 - Xr; float Yr = position_in_cell(y)/dy; float Yl = 1 - Yr; float Zr = position_in_cell(z)/dz; float Zl = 1 - Zr; //this part is literally hitler - not just unreadable but slow af //TODO: redo this using a reduce atomicAdd(&(d_rho[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j)%N_grid) + ((i)%N_grid)]), q*Xl*Yl*Zl); atomicAdd(&(d_rho[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j)%N_grid) + ((i+1)%N_grid)]), q*Xr*Yl*Zl); atomicAdd(&(d_rho[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j+1)%N_grid) + ((i)%N_grid)]), q*Xl*Yr*Zl); atomicAdd(&(d_rho[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j)%N_grid) + ((i)%N_grid)]), q*Xl*Yl*Zr); atomicAdd(&(d_rho[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j+1)%N_grid) + ((i+1)%N_grid)]), q*Xr*Yr*Zl); atomicAdd(&(d_rho[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j)%N_grid) + ((i+1)%N_grid)]), q*Xr*Yl*Zr); atomicAdd(&(d_rho[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j+1)%N_grid) + ((i)%N_grid)]), q*Xl*Yr*Zr); atomicAdd(&(d_rho[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j+1)%N_grid) + ((i+1)%N_grid)]), q*Xr*Yr*Zr); } __device__ float gather_grid_to_particle(Particle *p, float *grid){ float x = p->x; float y = p->y; float z = p->z; int i = position_to_grid_index(x); int j = position_to_grid_index(y); int k = position_to_grid_index(z); float Xr = position_in_cell(x)/dx; float Xl = 1 - Xr; float Yr = position_in_cell(y)/dy; float Yl = 1 - Yr; float Zr = position_in_cell(z)/dz; float Zl = 1 - Zr; float interpolated_scalar = 0.0f; //this part is also hitler but not as much //TODO: zafunkcjowa ten kawaek interpolated_scalar += grid[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j)%N_grid) + ((i)%N_grid)]*Xl*Yl*Zl; interpolated_scalar += grid[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j)%N_grid) + ((i+1)%N_grid)]*Xr*Yl*Zl; interpolated_scalar += grid[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j+1)%N_grid) + ((i)%N_grid)]*Xl*Yr*Zl; interpolated_scalar += grid[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j)%N_grid) + ((i)%N_grid)]*Xl*Yl*Zr; interpolated_scalar += grid[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j+1)%N_grid) + ((i+1)%N_grid)]*Xr*Yr*Zl; interpolated_scalar += grid[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j)%N_grid) + ((i+1)%N_grid)]*Xr*Yl*Zr; interpolated_scalar += grid[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j+1)%N_grid) + ((i)%N_grid)]*Xl*Yr*Zr; interpolated_scalar += grid[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j+1)%N_grid) + ((i+1)%N_grid)]*Xr*Yr*Zr; return interpolated_scalar; } __global__ void InitParticleArrays(Particle *d_p, float shiftx, float shifty, float shiftz){ int n = blockDim.x * blockIdx.x + threadIdx.x; if (n<N_particles){ Particle *p = &(d_p[n]); int i = n / (int)(N_particles_1_axis*N_particles_1_axis); int j = (int) (n/N_particles_1_axis) % N_particles_1_axis; int k = n % N_particles_1_axis; p->x = L/float(N_particles_1_axis) * i + shiftx; p->x = p->x - floor(p->x/L)*L; p->y = L/float(N_particles_1_axis) * j + shifty; p->y = p->y - floor(p->y/L)*L; p->z = L/float(N_particles_1_axis) * k + shiftz; p->z = p->z - floor(p->z/L)*L; p->vx = 0.0f; p->vy = 0.0f; p->vz = 0.0f; } } __global__ void InitialVelocityStep(Particle *d_p, float q, float m, float *d_Ex, float *d_Ey, float *d_Ez){ int n = blockDim.x * blockIdx.x + threadIdx.x; { Particle *p = &(d_p[n]); //gather electric field float Ex = gather_grid_to_particle(p, d_Ex); float Ey = gather_grid_to_particle(p, d_Ey); float Ez = gather_grid_to_particle(p, d_Ez); //use electric field to accelerate particles p->vx -= 0.5f*dt*q/m*Ex; p->vy -= 0.5f*dt*q/m*Ey; p->vz -= 0.5f*dt*q/m*Ez; } } __global__ void ParticleKernel(Particle *d_p, float q, float m, float *d_Ex, float *d_Ey, float *d_Ez){ int n = blockDim.x * blockIdx.x + threadIdx.x; if(n<N_particles) { Particle *p = &(d_p[n]); //push positions, enforce periodic boundary conditions p->x = p->x + p->vx*dt; p->x = p->x - floor(p->x/L)*L; p->y = p->y + p->vy*dt; p->y = p->y - floor(p->y/L)*L; p->z = p->z + p->vz*dt; p->z = p->z - floor(p->z/L)*L; //gather electric field float Ex = gather_grid_to_particle(p, d_Ex); float Ey = gather_grid_to_particle(p, d_Ey); float Ez = gather_grid_to_particle(p, d_Ez); //use electric field to accelerate particles p->vx += dt*q/m*Ex; p->vy += dt*q/m*Ey; p->vz += dt*q/m*Ez; } } void init_species(Species *s, float shiftx, float shifty, float shiftz){ s->particles = new Particle[N_particles]; CUDA_ERROR(hipMalloc((void**)&(s->d_particles), sizeof(Particle)*N_particles)); cout << "initializing particles" << endl; hipLaunchKernelGGL(( InitParticleArrays), dim3(particleBlocks), dim3(particleThreads), 0, 0, s->d_particles, shiftx, shifty, shiftz); } void dump_density_data(Grid *g, char* name){ cout << "dumping" << endl; CUDA_ERROR(hipMemcpy(g->rho, g->d_rho, sizeof(float)*N_grid_all, hipMemcpyDeviceToHost)); CUDA_ERROR(hipMemcpy(g->Ex, g->d_Ex, sizeof(float)*N_grid_all, hipMemcpyDeviceToHost)); CUDA_ERROR(hipMemcpy(g->Ey, g->d_Ey, sizeof(float)*N_grid_all, hipMemcpyDeviceToHost)); CUDA_ERROR(hipMemcpy(g->Ez, g->d_Ez, sizeof(float)*N_grid_all, hipMemcpyDeviceToHost)); FILE *density_data = fopen(name, "w"); float rho_total = 0.0f; for (int n = 0; n < N_grid_all; n++) { fprintf(density_data, "%f %.0f %.0f %.0f\n", g->rho[n], g->Ex[n], g->Ey[n], g->Ez[n]); // printf("%d %f %f %f %f\n", n, g->rho[n], g->Ex[n], g->Ey[n], g->Ez[n]); rho_total += g->rho[n]; } printf("rho total: %f\n", rho_total); } void dump_running_density_data(Grid *g, char* name){ CUDA_ERROR(hipMemcpy(g->rho, g->d_rho, sizeof(float)*N_grid_all, hipMemcpyDeviceToHost)); CUDA_ERROR(hipMemcpy(g->Ex, g->d_Ex, sizeof(float)*N_grid_all, hipMemcpyDeviceToHost)); CUDA_ERROR(hipMemcpy(g->Ey, g->d_Ey, sizeof(float)*N_grid_all, hipMemcpyDeviceToHost)); CUDA_ERROR(hipMemcpy(g->Ez, g->d_Ez, sizeof(float)*N_grid_all, hipMemcpyDeviceToHost)); FILE *density_data = fopen(name, "w"); for (int n = 0; n < N_grid_all; n++) { fprintf(density_data, "\n%f %f %f %f", g->rho[n], g->Ex[n], g->Ey[n], g->Ez[n]); } fclose(density_data); } void dump_position_data(Species *s, char* name){ cout << "Copying particles from GPU to device"<< endl; CUDA_ERROR(hipMemcpy(s->particles, s->d_particles, sizeof(Particle)*N_particles, hipMemcpyDeviceToHost)); cout << "Copied particles from GPU to device"<< endl; FILE *initial_position_data = fopen(name, "w"); for (int i =0; i<N_particles; i++) { Particle *p = &(s->particles[i]); fprintf(initial_position_data, "%f %f %f %f %f %f\n", p->x, p->y, p->z, p->vx, p->vy, p->vz); } // free(s->particles); fclose(initial_position_data); } void init_timestep(Grid *g, Species *electrons, Species *ions){ hipLaunchKernelGGL(( set_grid_array_to_value), dim3(gridBlocks), dim3(gridThreads), 0, 0, g->d_rho, 0); CUDA_ERROR(hipDeviceSynchronize()); hipLaunchKernelGGL(( scatter_charge), dim3(particleBlocks), dim3(particleThreads), 0, 0, electrons->d_particles, electrons->q, g->d_rho); CUDA_ERROR(hipDeviceSynchronize()); hipLaunchKernelGGL(( scatter_charge), dim3(particleBlocks), dim3(particleThreads), 0, 0, ions->d_particles, ions->q, g->d_rho); CUDA_ERROR(hipDeviceSynchronize()); // debug_field_solver_sine(g); field_solver(g); CUDA_ERROR(hipDeviceSynchronize()); hipLaunchKernelGGL(( InitialVelocityStep), dim3(particleBlocks), dim3(particleThreads), 0, 0, electrons->d_particles, electrons->q, electrons->m, g->d_Ex, g->d_Ey, g->d_Ez); hipLaunchKernelGGL(( InitialVelocityStep), dim3(particleBlocks), dim3(particleThreads), 0, 0, ions->d_particles, ions->q, ions->m, g->d_Ex, g->d_Ey, g->d_Ez); CUDA_ERROR(hipDeviceSynchronize()); } void timestep(Grid *g, Species *electrons, Species *ions){ //1. move particles, gather electric fields at their locations, accelerate particles hipLaunchKernelGGL(( ParticleKernel), dim3(particleBlocks), dim3(particleThreads), 0, 0, electrons->d_particles, electrons->q, electrons->m, g->d_Ex, g->d_Ey, g->d_Ez); hipLaunchKernelGGL(( ParticleKernel), dim3(particleBlocks), dim3(particleThreads), 0, 0, ions->d_particles, ions->q, ions->m, g->d_Ex, g->d_Ey, g->d_Ez); //potential TODO: sort particles????? //2. clear charge density for scattering fields to particles charge hipLaunchKernelGGL(( set_grid_array_to_value), dim3(gridBlocks), dim3(gridThreads), 0, 0, g->d_rho, 0); CUDA_ERROR(hipDeviceSynchronize()); //3. gather charge from new particle position to grid //TODO: note that I may need to cudaSyncThreads between these steps hipLaunchKernelGGL(( scatter_charge), dim3(particleBlocks), dim3(particleThreads), 0, 0, electrons->d_particles, electrons->q, g->d_rho); CUDA_ERROR(hipDeviceSynchronize()); hipLaunchKernelGGL(( scatter_charge), dim3(particleBlocks), dim3(particleThreads), 0, 0, ions->d_particles, ions->q, g->d_rho); CUDA_ERROR(hipDeviceSynchronize()); //4. use charge density to calculate field field_solver(g); CUDA_ERROR(hipDeviceSynchronize()); } int main(void){ printf("N_grid Threads per block Blocks\n"); printf("%7d %17d %6d\n", N_grid_all, gridThreads.x, gridBlocks.x); hipEvent_t startLoop, endLoop; hipEventCreate(&startLoop); hipEventCreate(&endLoop); Grid g; init_grid(&g); Species electrons; electrons.q = -ELECTRON_CHARGE; electrons.m = ELECTRON_MASS; electrons.N = N_particles; init_species(&electrons, L/100.0f, 0, 0); Species ions; ions.q = +ELECTRON_CHARGE; ions.m = PROTON_MASS; ions.N = N_particles; init_species(&ions, 0, 0, 0); //TODO: initialize for two stream instability init_timestep(&g, &electrons, &ions); CUDA_ERROR(hipGetLastError()); hipEventSynchronize(startLoop); hipEventRecord(startLoop); for(int i =0; i<NT; i++){ char* filename = new char[100]; timestep(&g, &electrons, &ions); } hipDeviceSynchronize(); hipEventSynchronize(endLoop); hipEventRecord(endLoop); cout << endl << "finished time loop" << endl; float loopRuntimeMS = 0; hipEventElapsedTime(&loopRuntimeMS, startLoop, endLoop); printf("N_grid Threads per block Blocks\tRuntime\n"); printf("%7d %17d %6d %f\n", N_grid_all, gridThreads.x, gridBlocks.x, loopRuntimeMS); if (loopRuntimeMS > 0.0001) { char* filename = new char[100]; sprintf(filename, "benchmark/pb_%d_%d_%d.bdat", N_grid, gridThreads.x, gridBlocks.x); FILE *benchmark = fopen(filename, "w"); fprintf(benchmark, "N_grid Threads per block Blocks\tRuntime\n"); fprintf(benchmark, "%7d %17d %6d %f\n", N_grid_all, gridThreads.x, gridBlocks.x, loopRuntimeMS); fclose(benchmark); } else { printf("Not saved!\n"); } CUDA_ERROR(hipFree(electrons.d_particles)); CUDA_ERROR(hipFree(g.d_rho)); CUDA_ERROR(hipFree(g.d_Ex)); CUDA_ERROR(hipFree(g.d_Ey)); CUDA_ERROR(hipFree(g.d_Ez)); CUDA_ERROR(hipFree(g.d_fourier_Ex)); CUDA_ERROR(hipFree(g.d_fourier_Ey)); CUDA_ERROR(hipFree(g.d_fourier_Ez)); CUDA_ERROR(hipFree(g.d_fourier_rho)); }
a1c0122fabef28c2d2e81ab3740cc41d44bc48aa.cu
// #include <stdio.h> // #include <curand.h> // #include <curand_kernel.h> // #include <cufft.h> // #include <iostream> // using namespace std; // // #define ELECTRON_MASS 9.10938356e-31 // #define PROTON_MASS 1.6726219e-27 // #define ELECTRON_CHARGE 1 // // NOTE: setting electron charge to the default SI 1.6e-19 value breaks interpolation // #define EPSILON_ZERO 8.854e-12 // // //TODO: THIS HERE TIMESTEP I AM NOT COMPLETELY CERTAIN ABOUT // #define NT 1000 // #define N_grid 16 // #define N_particles_1_axis 64 // #define N_particles (N_particles_1_axis*N_particles_1_axis*N_particles_1_axis) // #define L 1e-4 // #define dt 1e-25 // #define N_grid_all (N_grid *N_grid * N_grid) // #define dx (L/float(N_grid)) // #define dy dx // #define dz dx // // // dim3 particleThreads(512); // dim3 particleBlocks((N_particles+particleThreads.x - 1)/particleThreads.x); // dim3 gridThreads(8,8,8); dim3 gridBlocks((N_grid+gridThreads.x-1)/gridThreads.x, (N_grid + gridThreads.y - 1)/gridThreads.y, (N_grid+gridThreads.z-1)/gridThreads.z); static void CUDA_ERROR( cudaError_t err){ if (err != cudaSuccess) { printf("CUDA ERROR: %s, exiting\n", cudaGetErrorString(err)); exit(-1); } } struct Grid{ float *rho; float *Ex; float *Ey; float *Ez; float *d_rho; float *d_Ex; float *d_Ey; float *d_Ez; //fourier transformed versions of grid quantities, for fields solver cufftComplex *d_fourier_rho; cufftComplex *d_fourier_Ex; cufftComplex *d_fourier_Ey; cufftComplex *d_fourier_Ez; //instructions for cuFFT cufftHandle plan_forward; cufftHandle plan_backward; //the wave vector, for the field solver float *kv; float *d_kv; }; struct Particle{ //keeps information about the position of one particle in (6D) phase space (positions, velocities) float x; float y; float z; float vx; float vy; float vz; }; struct Species{ //keeps information about one distinct group of particles float m; //mass float q; //charge //number of particles in group: not fully used yet long int N; Particle *particles; Particle *d_particles; }; __global__ void solve_poisson(float *d_kv, cufftComplex *d_fourier_rho, cufftComplex *d_fourier_Ex, cufftComplex *d_fourier_Ey, cufftComplex *d_fourier_Ez){ /*solve poisson equation d_kv: wave vector d_fourier_rho: complex array of fourier transformed charge densities d_fourier_E(i): */ int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int k = blockIdx.z*blockDim.z + threadIdx.z; int index = k*N_grid*N_grid + j*N_grid + i; if(i<N_grid && j<N_grid && k<N_grid){ //wave vector magnitude squared float k2 = d_kv[i]*d_kv[i] + d_kv[j]*d_kv[j] + d_kv[k]*d_kv[k]; if (i==0 && j==0 && k ==0) { k2 = 1.0f; //dodge a bullet with a division by zero } //see: Birdsall Langdon, Plasma Physics via Computer Simulation, page 19 d_fourier_Ex[index].x = -d_kv[i]*d_fourier_rho[index].x/k2/EPSILON_ZERO; d_fourier_Ex[index].y = -d_kv[i]*d_fourier_rho[index].y/k2/EPSILON_ZERO; d_fourier_Ey[index].x = -d_kv[j]*d_fourier_rho[index].x/k2/EPSILON_ZERO; d_fourier_Ey[index].y = -d_kv[j]*d_fourier_rho[index].y/k2/EPSILON_ZERO; d_fourier_Ez[index].x = -d_kv[k]*d_fourier_rho[index].x/k2/EPSILON_ZERO; d_fourier_Ez[index].y = -d_kv[k]*d_fourier_rho[index].y/k2/EPSILON_ZERO; } } __global__ void real2complex(float *input, cufftComplex *output){ //converts array of floats to array of real complex numbers int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int k = blockIdx.z*blockDim.z + threadIdx.z; int index = k*N_grid*N_grid + j*N_grid + i; if(i<N_grid && j<N_grid && k<N_grid) { output[index].x = input[index]; output[index].y = 0.0f; } } __global__ void complex2real(cufftComplex *input, float *output){ //converts array of complex inputs to floats (discards) int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int k = blockIdx.z*blockDim.z + threadIdx.z; int index = k*N_grid*N_grid + j*N_grid + i; if(i<N_grid && j<N_grid && k<N_grid){ output[index] = input[index].x/float(N_grid_all); } } __global__ void scale_down_after_fft(float *d_Ex, float *d_Ey, float *d_Ez){ int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int k = blockIdx.z*blockDim.z + threadIdx.z; int index = k*N_grid*N_grid + j*N_grid + i; if(i<N_grid && j<N_grid && k<N_grid){ d_Ex[index] /= float(N_grid_all); d_Ey[index] /= float(N_grid_all); d_Ez[index] /= float(N_grid_all); } } __global__ void set_grid_array_to_value(float *arr, float value){ int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int k = blockIdx.z*blockDim.z + threadIdx.z; int index = k*N_grid*N_grid + j*N_grid + i; if((i<N_grid) && (j<N_grid) && (k<N_grid)){ arr[index] = value; } } void init_grid(Grid *g){ g->rho = new float[N_grid_all]; g->Ex = new float[N_grid_all]; g->Ey = new float[N_grid_all]; g->Ez = new float[N_grid_all]; g->kv = new float[N_grid]; for (int i =0; i<=N_grid/2; i++) { g->kv[i] = i*2*M_PI; } for (int i = N_grid/2 + 1; i < N_grid; i++) { g->kv[i] = (i-N_grid)*2*M_PI; } CUDA_ERROR(cudaMalloc((void**)&(g->d_kv), sizeof(float)*N_grid)); CUDA_ERROR(cudaMemcpy(g->d_kv, g->kv, sizeof(float)*N_grid, cudaMemcpyHostToDevice)); CUDA_ERROR(cudaMalloc((void**)&(g->d_fourier_rho), sizeof(cufftComplex)*N_grid_all)); CUDA_ERROR(cudaMalloc((void**)&(g->d_fourier_Ex), sizeof(cufftComplex)*N_grid_all)); CUDA_ERROR(cudaMalloc((void**)&(g->d_fourier_Ey), sizeof(cufftComplex)*N_grid_all)); CUDA_ERROR(cudaMalloc((void**)&(g->d_fourier_Ez), sizeof(cufftComplex)*N_grid_all)); CUDA_ERROR(cudaMalloc((void**)&(g->d_rho), sizeof(float)*N_grid_all)); CUDA_ERROR(cudaMemcpy(g->d_rho, g->rho, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice)); CUDA_ERROR(cudaMalloc((void**)&(g->d_Ex), sizeof(float)*N_grid_all)); CUDA_ERROR(cudaMemcpy(g->d_Ex, g->Ex, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice)); CUDA_ERROR(cudaMalloc((void**)&(g->d_Ey), sizeof(float)*N_grid_all)); CUDA_ERROR(cudaMemcpy(g->d_Ey, g->Ey, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice)); CUDA_ERROR(cudaMalloc((void**)&(g->d_Ez), sizeof(float)*N_grid_all)); CUDA_ERROR(cudaMemcpy(g->d_Ez, g->Ez, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice)); cufftPlan3d(&(g->plan_forward), N_grid, N_grid, N_grid, CUFFT_R2C); cufftPlan3d(&(g->plan_backward), N_grid, N_grid, N_grid, CUFFT_C2R); } void debug_field_solver_uniform(Grid *g){ float* linear_field_x = new float[N_grid_all]; float* linear_field_y = new float[N_grid_all]; float* linear_field_z = new float[N_grid_all]; for(int i = 0; i<N_grid; i++){ for(int j = 0; j<N_grid; j++){ for(int k = 0; k<N_grid; k++){ int index = i*N_grid*N_grid + j*N_grid + k; linear_field_x[index] = 1000; linear_field_y[index] = 0; linear_field_z[index] = 0; // printf("%d %f %f %f\n", index, linear_field_x[index], linear_field_y[index],linear_field_z[index]); } } } // cout << "if happy and know it clap your hands" << endl; cudaMemcpy(g->d_Ex, linear_field_x, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice); cudaMemcpy(g->d_Ey, linear_field_y, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice); cudaMemcpy(g->d_Ez, linear_field_z, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice); } void debug_field_solver_sine(Grid *g) { float* linear_field_x = new float[N_grid_all]; float* linear_field_y = new float[N_grid_all]; float* linear_field_z = new float[N_grid_all]; for(int i = 0; i<N_grid; i++){ for(int j = 0; j<N_grid; j++){ for(int k = 0; k<N_grid; k++){ int index = i*N_grid*N_grid + j*N_grid + k; linear_field_x[index] = 1000*sin(2*M_PI*((float)k/(float)N_grid)); linear_field_y[index] = 1000*sin(2*M_PI*((float)j/(float)N_grid)); linear_field_z[index] = 1000*sin(2*M_PI*((float)i/(float)N_grid)); } } } cudaMemcpy(g->d_Ex, linear_field_x, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice); cudaMemcpy(g->d_Ey, linear_field_y, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice); cudaMemcpy(g->d_Ez, linear_field_z, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice); } // void debug_field_solver_quadratic(Grid *g) // { // float* linear_field_x = new float[N_grid_all]; // float* linear_field_y = new float[N_grid_all]; // float* linear_field_z = new float[N_grid_all]; // for(int i = 0; i<N_grid; i++){ // for(int j = 0; j<N_grid; j++){ // for(int k = 0; k<N_grid; k++){ // int index = i*N_grid*N_grid + j*N_grid + k; // linear_field_x[index] = (dx*i)*(dx*i); // linear_field_y[index] = (dx*j)*(dx*j); // linear_field_z[index] = (dx*k)*(dx*k); // } // } // } // cudaMemcpy(g->d_Ex, linear_field_x, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice); // cudaMemcpy(g->d_Ey, linear_field_y, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice); // cudaMemcpy(g->d_Ez, linear_field_z, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice); // } void field_solver(Grid *g){ cufftExecR2C(g->plan_forward, g->d_rho, g->d_fourier_rho); CUDA_ERROR(cudaDeviceSynchronize()); solve_poisson<<<gridBlocks, gridThreads>>>(g->d_kv, g->d_fourier_rho, g->d_fourier_Ex, g->d_fourier_Ey, g->d_fourier_Ez); CUDA_ERROR(cudaDeviceSynchronize()); cufftExecC2R(g->plan_backward, g->d_fourier_Ex, g->d_Ex); cufftExecC2R(g->plan_backward, g->d_fourier_Ey, g->d_Ey); cufftExecC2R(g->plan_backward, g->d_fourier_Ez, g->d_Ez); scale_down_after_fft<<<gridBlocks, gridThreads>>>(g->d_Ex, g->d_Ey, g->d_Ez); CUDA_ERROR(cudaDeviceSynchronize()); } __device__ int position_to_grid_index(float X){ return int(X/dx); } __device__ float position_in_cell(float x){ int grid_index = position_to_grid_index(x); return x - grid_index*dx; } __global__ void scatter_charge(Particle *d_P, float q, float* d_rho){ int n = blockIdx.x*blockDim.x + threadIdx.x; float x = d_P[n].x; float y = d_P[n].y; float z = d_P[n].z; int i = position_to_grid_index(x); int j = position_to_grid_index(y); int k = position_to_grid_index(z); float Xr = position_in_cell(x)/dx; float Xl = 1 - Xr; float Yr = position_in_cell(y)/dy; float Yl = 1 - Yr; float Zr = position_in_cell(z)/dz; float Zl = 1 - Zr; //this part is literally hitler - not just unreadable but slow af //TODO: redo this using a reduce atomicAdd(&(d_rho[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j)%N_grid) + ((i)%N_grid)]), q*Xl*Yl*Zl); atomicAdd(&(d_rho[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j)%N_grid) + ((i+1)%N_grid)]), q*Xr*Yl*Zl); atomicAdd(&(d_rho[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j+1)%N_grid) + ((i)%N_grid)]), q*Xl*Yr*Zl); atomicAdd(&(d_rho[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j)%N_grid) + ((i)%N_grid)]), q*Xl*Yl*Zr); atomicAdd(&(d_rho[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j+1)%N_grid) + ((i+1)%N_grid)]), q*Xr*Yr*Zl); atomicAdd(&(d_rho[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j)%N_grid) + ((i+1)%N_grid)]), q*Xr*Yl*Zr); atomicAdd(&(d_rho[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j+1)%N_grid) + ((i)%N_grid)]), q*Xl*Yr*Zr); atomicAdd(&(d_rho[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j+1)%N_grid) + ((i+1)%N_grid)]), q*Xr*Yr*Zr); } __device__ float gather_grid_to_particle(Particle *p, float *grid){ float x = p->x; float y = p->y; float z = p->z; int i = position_to_grid_index(x); int j = position_to_grid_index(y); int k = position_to_grid_index(z); float Xr = position_in_cell(x)/dx; float Xl = 1 - Xr; float Yr = position_in_cell(y)/dy; float Yl = 1 - Yr; float Zr = position_in_cell(z)/dz; float Zl = 1 - Zr; float interpolated_scalar = 0.0f; //this part is also hitler but not as much //TODO: zafunkcjować ten kawałek interpolated_scalar += grid[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j)%N_grid) + ((i)%N_grid)]*Xl*Yl*Zl; interpolated_scalar += grid[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j)%N_grid) + ((i+1)%N_grid)]*Xr*Yl*Zl; interpolated_scalar += grid[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j+1)%N_grid) + ((i)%N_grid)]*Xl*Yr*Zl; interpolated_scalar += grid[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j)%N_grid) + ((i)%N_grid)]*Xl*Yl*Zr; interpolated_scalar += grid[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j+1)%N_grid) + ((i+1)%N_grid)]*Xr*Yr*Zl; interpolated_scalar += grid[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j)%N_grid) + ((i+1)%N_grid)]*Xr*Yl*Zr; interpolated_scalar += grid[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j+1)%N_grid) + ((i)%N_grid)]*Xl*Yr*Zr; interpolated_scalar += grid[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j+1)%N_grid) + ((i+1)%N_grid)]*Xr*Yr*Zr; return interpolated_scalar; } __global__ void InitParticleArrays(Particle *d_p, float shiftx, float shifty, float shiftz){ int n = blockDim.x * blockIdx.x + threadIdx.x; if (n<N_particles){ Particle *p = &(d_p[n]); int i = n / (int)(N_particles_1_axis*N_particles_1_axis); int j = (int) (n/N_particles_1_axis) % N_particles_1_axis; int k = n % N_particles_1_axis; p->x = L/float(N_particles_1_axis) * i + shiftx; p->x = p->x - floor(p->x/L)*L; p->y = L/float(N_particles_1_axis) * j + shifty; p->y = p->y - floor(p->y/L)*L; p->z = L/float(N_particles_1_axis) * k + shiftz; p->z = p->z - floor(p->z/L)*L; p->vx = 0.0f; p->vy = 0.0f; p->vz = 0.0f; } } __global__ void InitialVelocityStep(Particle *d_p, float q, float m, float *d_Ex, float *d_Ey, float *d_Ez){ int n = blockDim.x * blockIdx.x + threadIdx.x; { Particle *p = &(d_p[n]); //gather electric field float Ex = gather_grid_to_particle(p, d_Ex); float Ey = gather_grid_to_particle(p, d_Ey); float Ez = gather_grid_to_particle(p, d_Ez); //use electric field to accelerate particles p->vx -= 0.5f*dt*q/m*Ex; p->vy -= 0.5f*dt*q/m*Ey; p->vz -= 0.5f*dt*q/m*Ez; } } __global__ void ParticleKernel(Particle *d_p, float q, float m, float *d_Ex, float *d_Ey, float *d_Ez){ int n = blockDim.x * blockIdx.x + threadIdx.x; if(n<N_particles) { Particle *p = &(d_p[n]); //push positions, enforce periodic boundary conditions p->x = p->x + p->vx*dt; p->x = p->x - floor(p->x/L)*L; p->y = p->y + p->vy*dt; p->y = p->y - floor(p->y/L)*L; p->z = p->z + p->vz*dt; p->z = p->z - floor(p->z/L)*L; //gather electric field float Ex = gather_grid_to_particle(p, d_Ex); float Ey = gather_grid_to_particle(p, d_Ey); float Ez = gather_grid_to_particle(p, d_Ez); //use electric field to accelerate particles p->vx += dt*q/m*Ex; p->vy += dt*q/m*Ey; p->vz += dt*q/m*Ez; } } void init_species(Species *s, float shiftx, float shifty, float shiftz){ s->particles = new Particle[N_particles]; CUDA_ERROR(cudaMalloc((void**)&(s->d_particles), sizeof(Particle)*N_particles)); cout << "initializing particles" << endl; InitParticleArrays<<<particleBlocks, particleThreads>>>(s->d_particles, shiftx, shifty, shiftz); } void dump_density_data(Grid *g, char* name){ cout << "dumping" << endl; CUDA_ERROR(cudaMemcpy(g->rho, g->d_rho, sizeof(float)*N_grid_all, cudaMemcpyDeviceToHost)); CUDA_ERROR(cudaMemcpy(g->Ex, g->d_Ex, sizeof(float)*N_grid_all, cudaMemcpyDeviceToHost)); CUDA_ERROR(cudaMemcpy(g->Ey, g->d_Ey, sizeof(float)*N_grid_all, cudaMemcpyDeviceToHost)); CUDA_ERROR(cudaMemcpy(g->Ez, g->d_Ez, sizeof(float)*N_grid_all, cudaMemcpyDeviceToHost)); FILE *density_data = fopen(name, "w"); float rho_total = 0.0f; for (int n = 0; n < N_grid_all; n++) { fprintf(density_data, "%f %.0f %.0f %.0f\n", g->rho[n], g->Ex[n], g->Ey[n], g->Ez[n]); // printf("%d %f %f %f %f\n", n, g->rho[n], g->Ex[n], g->Ey[n], g->Ez[n]); rho_total += g->rho[n]; } printf("rho total: %f\n", rho_total); } void dump_running_density_data(Grid *g, char* name){ CUDA_ERROR(cudaMemcpy(g->rho, g->d_rho, sizeof(float)*N_grid_all, cudaMemcpyDeviceToHost)); CUDA_ERROR(cudaMemcpy(g->Ex, g->d_Ex, sizeof(float)*N_grid_all, cudaMemcpyDeviceToHost)); CUDA_ERROR(cudaMemcpy(g->Ey, g->d_Ey, sizeof(float)*N_grid_all, cudaMemcpyDeviceToHost)); CUDA_ERROR(cudaMemcpy(g->Ez, g->d_Ez, sizeof(float)*N_grid_all, cudaMemcpyDeviceToHost)); FILE *density_data = fopen(name, "w"); for (int n = 0; n < N_grid_all; n++) { fprintf(density_data, "\n%f %f %f %f", g->rho[n], g->Ex[n], g->Ey[n], g->Ez[n]); } fclose(density_data); } void dump_position_data(Species *s, char* name){ cout << "Copying particles from GPU to device"<< endl; CUDA_ERROR(cudaMemcpy(s->particles, s->d_particles, sizeof(Particle)*N_particles, cudaMemcpyDeviceToHost)); cout << "Copied particles from GPU to device"<< endl; FILE *initial_position_data = fopen(name, "w"); for (int i =0; i<N_particles; i++) { Particle *p = &(s->particles[i]); fprintf(initial_position_data, "%f %f %f %f %f %f\n", p->x, p->y, p->z, p->vx, p->vy, p->vz); } // free(s->particles); fclose(initial_position_data); } void init_timestep(Grid *g, Species *electrons, Species *ions){ set_grid_array_to_value<<<gridBlocks, gridThreads>>>(g->d_rho, 0); CUDA_ERROR(cudaDeviceSynchronize()); scatter_charge<<<particleBlocks, particleThreads>>>(electrons->d_particles, electrons->q, g->d_rho); CUDA_ERROR(cudaDeviceSynchronize()); scatter_charge<<<particleBlocks, particleThreads>>>(ions->d_particles, ions->q, g->d_rho); CUDA_ERROR(cudaDeviceSynchronize()); // debug_field_solver_sine(g); field_solver(g); CUDA_ERROR(cudaDeviceSynchronize()); InitialVelocityStep<<<particleBlocks, particleThreads>>>(electrons->d_particles, electrons->q, electrons->m, g->d_Ex, g->d_Ey, g->d_Ez); InitialVelocityStep<<<particleBlocks, particleThreads>>>(ions->d_particles, ions->q, ions->m, g->d_Ex, g->d_Ey, g->d_Ez); CUDA_ERROR(cudaDeviceSynchronize()); } void timestep(Grid *g, Species *electrons, Species *ions){ //1. move particles, gather electric fields at their locations, accelerate particles ParticleKernel<<<particleBlocks, particleThreads>>>(electrons->d_particles, electrons->q, electrons->m, g->d_Ex, g->d_Ey, g->d_Ez); ParticleKernel<<<particleBlocks, particleThreads>>>(ions->d_particles, ions->q, ions->m, g->d_Ex, g->d_Ey, g->d_Ez); //potential TODO: sort particles????? //2. clear charge density for scattering fields to particles charge set_grid_array_to_value<<<gridBlocks, gridThreads>>>(g->d_rho, 0); CUDA_ERROR(cudaDeviceSynchronize()); //3. gather charge from new particle position to grid //TODO: note that I may need to cudaSyncThreads between these steps scatter_charge<<<particleBlocks, particleThreads>>>(electrons->d_particles, electrons->q, g->d_rho); CUDA_ERROR(cudaDeviceSynchronize()); scatter_charge<<<particleBlocks, particleThreads>>>(ions->d_particles, ions->q, g->d_rho); CUDA_ERROR(cudaDeviceSynchronize()); //4. use charge density to calculate field field_solver(g); CUDA_ERROR(cudaDeviceSynchronize()); } int main(void){ printf("N_grid Threads per block Blocks\n"); printf("%7d %17d %6d\n", N_grid_all, gridThreads.x, gridBlocks.x); cudaEvent_t startLoop, endLoop; cudaEventCreate(&startLoop); cudaEventCreate(&endLoop); Grid g; init_grid(&g); Species electrons; electrons.q = -ELECTRON_CHARGE; electrons.m = ELECTRON_MASS; electrons.N = N_particles; init_species(&electrons, L/100.0f, 0, 0); Species ions; ions.q = +ELECTRON_CHARGE; ions.m = PROTON_MASS; ions.N = N_particles; init_species(&ions, 0, 0, 0); //TODO: initialize for two stream instability init_timestep(&g, &electrons, &ions); CUDA_ERROR(cudaGetLastError()); cudaEventSynchronize(startLoop); cudaEventRecord(startLoop); for(int i =0; i<NT; i++){ char* filename = new char[100]; timestep(&g, &electrons, &ions); } cudaDeviceSynchronize(); cudaEventSynchronize(endLoop); cudaEventRecord(endLoop); cout << endl << "finished time loop" << endl; float loopRuntimeMS = 0; cudaEventElapsedTime(&loopRuntimeMS, startLoop, endLoop); printf("N_grid Threads per block Blocks\tRuntime\n"); printf("%7d %17d %6d %f\n", N_grid_all, gridThreads.x, gridBlocks.x, loopRuntimeMS); if (loopRuntimeMS > 0.0001) { char* filename = new char[100]; sprintf(filename, "benchmark/pb_%d_%d_%d.bdat", N_grid, gridThreads.x, gridBlocks.x); FILE *benchmark = fopen(filename, "w"); fprintf(benchmark, "N_grid Threads per block Blocks\tRuntime\n"); fprintf(benchmark, "%7d %17d %6d %f\n", N_grid_all, gridThreads.x, gridBlocks.x, loopRuntimeMS); fclose(benchmark); } else { printf("Not saved!\n"); } CUDA_ERROR(cudaFree(electrons.d_particles)); CUDA_ERROR(cudaFree(g.d_rho)); CUDA_ERROR(cudaFree(g.d_Ex)); CUDA_ERROR(cudaFree(g.d_Ey)); CUDA_ERROR(cudaFree(g.d_Ez)); CUDA_ERROR(cudaFree(g.d_fourier_Ex)); CUDA_ERROR(cudaFree(g.d_fourier_Ey)); CUDA_ERROR(cudaFree(g.d_fourier_Ez)); CUDA_ERROR(cudaFree(g.d_fourier_rho)); }
290b39355a4f1580d173b6db9b3d01559434dae4.hip
// !!! This is a file automatically generated by hipify!!! #include "memBenchmark.h" #include "termcolor.hpp" #include <hip/hip_runtime.h> #include <device_launch_parameters.h> // NVTX Dir: C:\Program Files\NVIDIA GPU Computing Toolkit\nvToolsExt #include <roctracer/roctx.h> #include <cmath> #include <cstdio> #include <iomanip> #include <iostream> #include <numeric> #include <random> #include <string> // Number of element to reduce static const int n_elements = 32 * 1024 * 1024; // Number of threads per block to use for all kernels static const int threads = 256; struct DIMS1D { int dimThreads; int dimBlocks; }; #define CUDA(call) do { \ hipError_t e = (call); \ if (e == hipSuccess) break; \ fprintf(stderr, __FILE__":%d: %s (%d)\n", \ __LINE__, hipGetErrorString(e), e); \ exit(1); \ } while (0) inline unsigned divup(unsigned n, unsigned div) { return (n + div - 1) / div; } void printResults(double timeInMilliseconds, int iterations) { // print out the time required for the kernel to finish the transpose operation double bandwidth = (iterations * 1e-9 * (double)(n_elements * sizeof(float))) / (timeInMilliseconds * 1e-3); std::cout << "Elapsed Time for " << iterations << " runs = " << round(timeInMilliseconds) << "ms" << std::endl; std::cout << termcolor::bold << termcolor::red << termcolor::on_white << "Bandwidth (GB/s) = " << std::setprecision(4) << bandwidth << termcolor::reset << std::endl; std::cout.clear(); } // Check errors bool postprocess(const float *ref, const float *res, int n) { bool passed = true; for(int i = 0; i < n; i++) { if (std::abs(res[i] - ref[i]) / n_elements > 1e-6) { std::cout.precision(6); std::cout << "ID: " << i << " \t Res: " << res[i] << " \t Ref: " << ref[i] << std::endl; std::cout << termcolor::blink << termcolor::white << termcolor::on_red << "*** FAILED ***" << termcolor::reset << std::endl; passed = false; break; } } if(passed) std::cout << termcolor::green << "Post process check passed!!" << termcolor::reset << std::endl; return passed; } static float reduce_cpu(const float *data, int n) { float sum = 0; for (int i = 0; i < n; i++) sum += data[i]; return sum; } //////////////////////////////////////////////////////////////////////////////// // Reduction Stage 0: Interleaved Addressing // d_idata : Device pointer to input // d_odata : Device pointer to output // n : Number of elements to reduce //////////////////////////////////////////////////////////////////////////////// __global__ void reduce_stage0(const float* d_idata, float* d_odata, int n) { // Dynamic allocation of shared memory - See kernel call in host code extern __shared__ float smem[]; // Calculate 1D Index int idx = blockDim.x * blockIdx.x + threadIdx.x; // Copy input data to shared memory // Note: Use block index for shared memory // Also check for bounds if (idx < n) { smem[threadIdx.x] = d_idata[idx]; } // Where do we need to put __syncthreads()? Do we need it at all? __syncthreads(); // Reduce within block // Start from c = 1, upto block size, each time doubling the offset for (int c = 1; c < blockDim.x; c *= 2) { if (threadIdx.x % (2 * c) == 0) { smem[threadIdx.x] += smem[threadIdx.x + c]; } __syncthreads(); } // Copy result of reduction to global memory // Which index of d_odata do we write to? // In which index of smem is the result stored? // Do we need another syncthreads before writing to global memory? // Use only one thread to write to global memory if (threadIdx.x == 0) { d_odata[blockIdx.x] = smem[0]; } } //////////////////////////////////////////////////////////////////////////////// // Reduction Stage 1: Non-divergent Addressing // d_idata : Device pointer to input // d_odata : Device pointer to output // n : Number of elements to reduce // // The only difference between stage0 and stage1 is the reduction for loop //////////////////////////////////////////////////////////////////////////////// __global__ void reduce_stage1(const float* d_idata, float* d_odata, int n) { // Allocate dynamic shared memory, Calculate 1D Index and Copy input data into shared memory // Exactly same as reduce_stage0 extern __shared__ float smem[]; int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { smem[threadIdx.x] = d_idata[idx]; } // This is the part that differes from reduce_stage0 // Reduce within block with coalesced indexing pattern // Change the for-loop to use indexing that reduces warp-divergence for (int c = 1; c < blockDim.x; c *= 2) { int index = threadIdx.x * 2 * c; if (index + c < blockDim.x) { smem[index] += smem[index + c]; } __syncthreads(); } // Copy result of reduction to global memory - Same as reduce_stage0 if (threadIdx.x == 0) { d_odata[blockIdx.x] = smem[0]; } } //////////////////////////////////////////////////////////////////////////////// // Reduction Stage 2: Warp Management without Bank Conflicts // d_idata : Device pointer to input // d_odata : Device pointer to output // n : Number of elements to reduce // // The only difference between stage1 and stage2 is the reduction for loop // This time, we reduce start from blockDim.x and divide by 2 in each iteration //////////////////////////////////////////////////////////////////////////////// __global__ void reduce_stage2(const float* d_idata, float* d_odata, int n) { // Allocate dynamic shared memory, Calculate 1D Index and Copy input data into shared memory // Exactly same as reduce_stage1 extern __shared__ float smem[]; int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { smem[threadIdx.x] = d_idata[idx]; } // This is the part that differes from reduce_stage1 // Reduce within block with coalesced indexing pattern and avoid bank conflicts // Change the for-loop to use indexing that reduces warp-divergence // Start from blockDim.x / 2 and divide by 2 until we hit 1 for (int c = blockDim.x / 2 ; c > 0 ; c >>= 1) { if (threadIdx.x < c) { smem[threadIdx.x] += smem[threadIdx.x + c]; } __syncthreads(); } // Copy result of reduction to global memory - Same as reduce_stage1 if (threadIdx.x == 0) { d_odata[blockIdx.x] = smem[0]; } } //////////////////////////////////////////////////////////////////////////////// // Reduction Stage 3: Add During Load, Use tile to reduce number of blocks // d_idata : Device pointer to input // d_odata : Device pointer to output // n : Number of elements to reduce // stage3_TILE : Tiling factor // // In this kernel, we will add on load when copying data into shared memory // The difference between stage3 and stage2 is how we load data into shared memory // Each block does work of stage3_TILE * blockDim.x elements //////////////////////////////////////////////////////////////////////////////// const int stage3_TILE = 2; __global__ void reduce_stage3(const float* d_idata, float* d_odata, int n) { // Allocate dynamic shared memory extern __shared__ float smem[]; // Calculate 1D index similar to stage2, but multiply by stage3_TILE int idx = 0; // Copy input data to shared memory. Add on load. // Reduce the block same as reduce_stage2 //Copy result of reduction to global memory - Same as reduce_stage2 } // warpReduce function for reduce_stag4 that reduces 2 warps into one value __device__ void warpReduce(volatile float* smem, int tid) { //Write code for warp reduce here smem[tid] += smem[tid + 32]; smem[tid] += smem[tid + 16]; smem[tid] += smem[tid + 8 ]; smem[tid] += smem[tid + 4 ]; smem[tid] += smem[tid + 2 ]; smem[tid] += smem[tid + 1 ]; } //////////////////////////////////////////////////////////////////////////////// // Reduction Stage 4: Warp Loop Unrolling // d_idata : Device pointer to input // d_odata : Device pointer to output // n : Number of elements to reduce // stage4_TILE : Tiling factor - How does tuning this change performance? // // The reduce_stage4 kernel improves on reduce_stage3 by unrolling the block // reduction by unrolling the loop that operates within a warp. // Each block does work of stage4_TILE * blockDim.x elements // // This kernel also uses the warpReduce device function above //////////////////////////////////////////////////////////////////////////////// const int stage4_TILE = 2; __global__ void reduce_stage4(const float* d_idata, float* d_odata, int n) { // Allocate dynamic shared memory, Calculate 1D Index and // Copy input data and add on load into shared memory // Exactly same as reduce_stage3. Use stage4_TILE instead of stage3_TILE. extern __shared__ float smem[]; int idx = 0; // Reduce within block with coalesced indexing pattern and avoid bank conflicts // Split the block reduction into 2 parts. // Part 1 is the same as reduce stage3, but only for c > 32 // Part 2 then uses the warpReduce function to reduce the 2 warps // The reason we stop the previous loop at c > 32 is because // warpReduce can reduce 2 warps only 1 warp // Copy result of reduction to global memory - Same as reduce_stage3 } //////////////////////////////////////////////////////////////////////////////// // Reduction Stage 5: Completely unrolled blocks using templates // d_idata : Device pointer to input // d_odata : Device pointer to output // n : Number of elements to reduce // stage5_TILE : Tiling factor - How does tuning this change performance? // // The reduce_stage5 kernel is the same as reduce_stage4 except part 1 of block reduction // We simply unroll the entire for loop into individual statements wrapper by if conditions // Why do we need to use templates? How do they improve performance? // Each block does work of stage5_TILE * blockDim.x elements // // This kernel also uses the warpReduce device function above //////////////////////////////////////////////////////////////////////////////// const int stage5_TILE = 2; template<unsigned int blockSize> __global__ void reduce_stage5(const float* d_idata, float* d_odata, int n) { // Allocate dynamic shared memory, Calculate 1D Index and // Copy input data and add on load into shared memory // Exactly same as reduce_stage4. Use stage5_TILE instead of stage4_TILE. // Use #pragma unroll around the load loop extern __shared__ float smem[]; int idx = 0; // Store the threadIdx.x in a register int tid = threadIdx.x; // Reduce the block using the same part1 and part2 split that we used in reduce_stage4 // Except, here write explicit statements instead of the for loops // Part 2 is the same as reduce_stage4 // Copy result of reduction to global memory - Same as reduce_stage4 } int main() { // Calculate bytes needed for input const unsigned bytes = n_elements * sizeof(float); // Allocate memory and initialize elements // Let's use pinned memory for host float *h_idata; CUDA(hipHostMalloc((void**)&h_idata, bytes)); // Fill random values into the host array { std::random_device randomDevice; std::mt19937 generator(randomDevice()); std::uniform_real_distribution<float> distribution(-1, 1); for (int i = 0; i < n_elements; i++) { h_idata[i] = distribution(generator); } } // Copy input data into device memory float *d_idata = NULL; CUDA(hipMalloc((void **)&d_idata, bytes)); CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); // Compute Gold Standard using CPU const float gold_result = reduce_cpu(h_idata, n_elements); // Create CUDA events for timing hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); //////////////////////////////////////////////////////////// std::cout << "******************************************" << std::endl; std::cout << "***CPU Reduce***" << std::endl; { nvtxRangeId_t range = roctxRangeStart("CPU Reduce"); float cpu_result = 0; int iterations = 10; // start the timer Timer hTimer; nvtxRangeId_t rangeBenchmark = roctxRangeStart("CPU Reduce Benchmark"); for(int k = 0; k < iterations; k++) { cpu_result = reduce_cpu(h_idata, n_elements); } roctxRangeStop(rangeBenchmark); // stop the timer double time = hTimer.elapsed() * 1000; //ms if(postprocess(&cpu_result, &gold_result, 1)) printResults(time, iterations); roctxRangeStop(range); } std::cout << "******************************************" << std::endl << std::endl; //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// std::cout << "******************************************" << std::endl; std::cout << "***Reduction Stage 0***" << std::endl; { nvtxRangeId_t range = roctxRangeStart("Reduction Stage 0"); //Calculate Threads per block and total blocks required DIMS1D dims; dims.dimThreads = threads; dims.dimBlocks = divup(n_elements, dims.dimThreads); // Copy input data to device CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); // Calculate bytes needed for output size_t block_bytes = dims.dimBlocks * sizeof(float); // Allocate memory for output on device float *d_odata = NULL; CUDA(hipMalloc((void**)&d_odata, block_bytes)); CUDA(hipMemset(d_odata, 0, block_bytes)); // Call the kernel. Allocate dynamic shared memory hipLaunchKernelGGL(( reduce_stage0), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements); // Copy result of block reduce to CPU and run CPU reduce float *h_blocks = (float *)malloc(dims.dimBlocks * sizeof(float)); CUDA(hipMemcpy(h_blocks, d_odata, dims.dimBlocks * sizeof(float), hipMemcpyDeviceToHost)); // Secondary reduce on CPU float gpu_result = 0; for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; // Check the result and then run the benchmark. if(postprocess(&gpu_result, &gold_result, 1)) { nvtxRangeId_t rangeBenchmark = roctxRangeStart("Reduction Stage 0 Benchmark"); //Start Benchmark int iterations = 10; CUDA(hipEventRecord(start, 0)); // Run multiple times for a good benchmark for(int i = 0; i < iterations; i++) { hipLaunchKernelGGL(( reduce_stage0), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements); hipMemcpy(h_blocks, d_odata, dims.dimBlocks * sizeof(float), hipMemcpyDeviceToHost); for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; } CUDA(hipEventRecord(stop, 0)); CUDA(hipEventSynchronize(stop)); roctxRangeStop(rangeBenchmark); float time_ms; CUDA(hipEventElapsedTime(&time_ms, start, stop)); printResults(time_ms, iterations); } // Cleanup free(h_blocks); hipFree(d_odata); roctxRangeStop(range); } std::cout << "******************************************" << std::endl << std::endl; //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// std::cout << "******************************************" << std::endl; std::cout << "***Reduction Stage 1***" << std::endl; { nvtxRangeId_t range = roctxRangeStart("Reduction Stage 1"); //Calculate Threads per block and total blocks required DIMS1D dims; dims.dimThreads = threads; dims.dimBlocks = divup(n_elements, dims.dimThreads); // Copy input data to device CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); // Calculate bytes needed for output size_t block_bytes = dims.dimBlocks * sizeof(float); // Allocate memory for output on device float *d_odata = NULL; CUDA(hipMalloc((void**)&d_odata, block_bytes)); CUDA(hipMemset(d_odata, 0, block_bytes)); // Call the kernel. Allocate dynamic shared memory hipLaunchKernelGGL(( reduce_stage1), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements); // Copy result of block reduce to CPU and run CPU reduce float *h_blocks = (float *)malloc(block_bytes); CUDA(hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost)); // Secondary reduce on CPU float gpu_result = 0; for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; // Check the result and then run the benchmark. if(postprocess(&gpu_result, &gold_result, 1)) { nvtxRangeId_t rangeBenchmark = roctxRangeStart("Reduction Stage 1 Benchmark"); //Start Benchmark int iterations = 10; CUDA(hipEventRecord(start, 0)); // Run multiple times for a good benchmark for(int i = 0; i < iterations; i++) { hipLaunchKernelGGL(( reduce_stage1), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements); hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost); for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; } CUDA(hipEventRecord(stop, 0)); CUDA(hipEventSynchronize(stop)); roctxRangeStop(rangeBenchmark); float time_ms; CUDA(hipEventElapsedTime(&time_ms, start, stop)); printResults(time_ms, iterations); } // Cleanup free(h_blocks); hipFree(d_odata); roctxRangeStop(range); } std::cout << "******************************************" << std::endl << std::endl; //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// std::cout << "******************************************" << std::endl; std::cout << "***Reduction Stage 2***" << std::endl; { nvtxRangeId_t range = roctxRangeStart("Reduction Stage 2"); //Calculate Threads per block and total blocks required DIMS1D dims; dims.dimThreads = threads; dims.dimBlocks = divup(n_elements, dims.dimThreads); // Copy input data to device CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); // Calculate bytes needed for output size_t block_bytes = dims.dimBlocks * sizeof(float); // Allocate memory for output on device float *d_odata = NULL; CUDA(hipMalloc((void**)&d_odata, block_bytes)); CUDA(hipMemset(d_odata, 0, block_bytes)); // Call the kernel. Allocate dynamic shared memory hipLaunchKernelGGL(( reduce_stage2), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements); // Copy result of block reduce to CPU and run CPU reduce float *h_blocks = (float *)malloc(block_bytes); CUDA(hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost)); // Secondary reduce on CPU float gpu_result = 0; for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; // Check the result and then run the benchmark. if(postprocess(&gpu_result, &gold_result, 1)) { nvtxRangeId_t rangeBenchmark = roctxRangeStart("Reduction Stage 2 Benchmark"); //Start Benchmark int iterations = 10; CUDA(hipEventRecord(start, 0)); // Run multiple times for a good benchmark for(int i = 0; i < iterations; i++) { hipLaunchKernelGGL(( reduce_stage2), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements); hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost); for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; } CUDA(hipEventRecord(stop, 0)); CUDA(hipEventSynchronize(stop)); roctxRangeStop(rangeBenchmark); float time_ms; CUDA(hipEventElapsedTime(&time_ms, start, stop)); printResults(time_ms, iterations); } // Cleanup free(h_blocks); hipFree(d_odata); roctxRangeStop(range); } std::cout << "******************************************" << std::endl << std::endl; //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// std::cout << "******************************************" << std::endl; std::cout << "***Reduction Stage 3***" << std::endl; { nvtxRangeId_t range = roctxRangeStart("Reduction Stage 3"); // Calculate Threads per block and total blocks required // Use stage3_TILE in your grid calculation DIMS1D dims; dims.dimThreads = threads; dims.dimBlocks = divup(n_elements, dims.dimThreads * stage3_TILE); // Copy input data to device CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); // Calculate bytes needed for output size_t block_bytes = dims.dimBlocks * sizeof(float); // Allocate memory for output on device float *d_odata = NULL; CUDA(hipMalloc((void**)&d_odata, block_bytes)); CUDA(hipMemset(d_odata, 0, block_bytes)); // Call the kernel. Allocate dynamic shared memory hipLaunchKernelGGL(( reduce_stage3), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements); // Copy result of block reduce to CPU and run CPU reduce float *h_blocks = (float *)malloc(block_bytes); CUDA(hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost)); // Secondary reduce on CPU float gpu_result = 0; for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; // Check the result and then run the benchmark. if(postprocess(&gpu_result, &gold_result, 1)) { nvtxRangeId_t rangeBenchmark = roctxRangeStart("Reduction Stage 3 Benchmark"); //Start Benchmark int iterations = 10; CUDA(hipEventRecord(start, 0)); // Run multiple times for a good benchmark for(int i = 0; i < iterations; i++) { hipLaunchKernelGGL(( reduce_stage3), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements); hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost); for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; } CUDA(hipEventRecord(stop, 0)); CUDA(hipEventSynchronize(stop)); roctxRangeStop(rangeBenchmark); float time_ms; CUDA(hipEventElapsedTime(&time_ms, start, stop)); printResults(time_ms, iterations); } // Cleanup free(h_blocks); hipFree(d_odata); roctxRangeStop(range); } std::cout << "******************************************" << std::endl << std::endl; //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// std::cout << "******************************************" << std::endl; std::cout << "***Reduction Stage 4***" << std::endl; { nvtxRangeId_t range = roctxRangeStart("Reduction Stage 4"); // Calculate Threads per block and total blocks required // Use stage4_TILE in your grid calculation DIMS1D dims; dims.dimThreads = threads; dims.dimBlocks = divup(n_elements, dims.dimThreads * stage4_TILE); // Copy input data to device CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); // Calculate bytes needed for output size_t block_bytes = dims.dimBlocks * sizeof(float); // Allocate memory for output on device float *d_odata = NULL; CUDA(hipMalloc((void**)&d_odata, block_bytes)); CUDA(hipMemset(d_odata, 0, block_bytes)); // Call the kernel. Allocate dynamic shared memory hipLaunchKernelGGL(( reduce_stage4), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements); // Copy result of block reduce to CPU and run CPU reduce float *h_blocks = (float *)malloc(block_bytes); CUDA(hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost)); // Secondary reduce on CPU float gpu_result = 0; for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; // Check the result and then run the benchmark. if(postprocess(&gpu_result, &gold_result, 1)) { nvtxRangeId_t rangeBenchmark = roctxRangeStart("Reduction Stage 4 Benchmark"); //Start Benchmark int iterations = 10; CUDA(hipEventRecord(start, 0)); // Run multiple times for a good benchmark for(int i = 0; i < iterations; i++) { hipLaunchKernelGGL(( reduce_stage4), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements); hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost); for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; } CUDA(hipEventRecord(stop, 0)); CUDA(hipEventSynchronize(stop)); roctxRangeStop(rangeBenchmark); float time_ms; CUDA(hipEventElapsedTime(&time_ms, start, stop)); printResults(time_ms, iterations); } // Cleanup free(h_blocks); hipFree(d_odata); roctxRangeStop(range); } std::cout << "******************************************" << std::endl << std::endl; //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// std::cout << "******************************************" << std::endl; std::cout << "***Reduction Stage 5***" << std::endl; { nvtxRangeId_t range = roctxRangeStart("Reduction Stage 5"); // Calculate Threads per block and total blocks required // Use stage5_TILE in your grid calculation DIMS1D dims; dims.dimThreads = threads; dims.dimBlocks = divup(n_elements, dims.dimThreads * stage5_TILE); // Copy input data to device CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); // Calculate bytes needed for output size_t block_bytes = dims.dimBlocks * sizeof(float); // Allocate memory for output on device float *d_odata = NULL; CUDA(hipMalloc((void**)&d_odata, block_bytes)); CUDA(hipMemset(d_odata, 0, block_bytes)); // Call the kernel. Allocate dynamic shared memory // Don't forget to add the template hipLaunchKernelGGL(( reduce_stage5<threads>), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements); // Copy result of block reduce to CPU and run CPU reduce float *h_blocks = (float *)malloc(block_bytes); CUDA(hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost)); // Secondary reduce on CPU float gpu_result = 0; for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; // Check the result and then run the benchmark. if(postprocess(&gpu_result, &gold_result, 1)) { nvtxRangeId_t rangeBenchmark = roctxRangeStart("Reduction Stage 5 Benchmark"); //Start Benchmark int iterations = 10; CUDA(hipEventRecord(start, 0)); // Run multiple times for a good benchmark for(int i = 0; i < iterations; i++) { hipLaunchKernelGGL(( reduce_stage5<threads>), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements); hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost); for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; } CUDA(hipEventRecord(stop, 0)); CUDA(hipEventSynchronize(stop)); roctxRangeStop(rangeBenchmark); float time_ms; CUDA(hipEventElapsedTime(&time_ms, start, stop)); printResults(time_ms, iterations); } // Cleanup free(h_blocks); hipFree(d_odata); roctxRangeStop(range); } std::cout << "******************************************" << std::endl << std::endl; //////////////////////////////////////////////////////////// // Cleanup CUDA(hipEventDestroy(start)); CUDA(hipEventDestroy(stop)); CUDA(hipHostFree(h_idata)); CUDA(hipFree(d_idata)); return 0; }
290b39355a4f1580d173b6db9b3d01559434dae4.cu
#include "memBenchmark.h" #include "termcolor.hpp" #include <cuda_runtime.h> #include <device_launch_parameters.h> // NVTX Dir: C:\Program Files\NVIDIA GPU Computing Toolkit\nvToolsExt #include <nvToolsExt.h> #include <cmath> #include <cstdio> #include <iomanip> #include <iostream> #include <numeric> #include <random> #include <string> // Number of element to reduce static const int n_elements = 32 * 1024 * 1024; // Number of threads per block to use for all kernels static const int threads = 256; struct DIMS1D { int dimThreads; int dimBlocks; }; #define CUDA(call) do { \ cudaError_t e = (call); \ if (e == cudaSuccess) break; \ fprintf(stderr, __FILE__":%d: %s (%d)\n", \ __LINE__, cudaGetErrorString(e), e); \ exit(1); \ } while (0) inline unsigned divup(unsigned n, unsigned div) { return (n + div - 1) / div; } void printResults(double timeInMilliseconds, int iterations) { // print out the time required for the kernel to finish the transpose operation double bandwidth = (iterations * 1e-9 * (double)(n_elements * sizeof(float))) / (timeInMilliseconds * 1e-3); std::cout << "Elapsed Time for " << iterations << " runs = " << round(timeInMilliseconds) << "ms" << std::endl; std::cout << termcolor::bold << termcolor::red << termcolor::on_white << "Bandwidth (GB/s) = " << std::setprecision(4) << bandwidth << termcolor::reset << std::endl; std::cout.clear(); } // Check errors bool postprocess(const float *ref, const float *res, int n) { bool passed = true; for(int i = 0; i < n; i++) { if (std::abs(res[i] - ref[i]) / n_elements > 1e-6) { std::cout.precision(6); std::cout << "ID: " << i << " \t Res: " << res[i] << " \t Ref: " << ref[i] << std::endl; std::cout << termcolor::blink << termcolor::white << termcolor::on_red << "*** FAILED ***" << termcolor::reset << std::endl; passed = false; break; } } if(passed) std::cout << termcolor::green << "Post process check passed!!" << termcolor::reset << std::endl; return passed; } static float reduce_cpu(const float *data, int n) { float sum = 0; for (int i = 0; i < n; i++) sum += data[i]; return sum; } //////////////////////////////////////////////////////////////////////////////// // Reduction Stage 0: Interleaved Addressing // d_idata : Device pointer to input // d_odata : Device pointer to output // n : Number of elements to reduce //////////////////////////////////////////////////////////////////////////////// __global__ void reduce_stage0(const float* d_idata, float* d_odata, int n) { // Dynamic allocation of shared memory - See kernel call in host code extern __shared__ float smem[]; // Calculate 1D Index int idx = blockDim.x * blockIdx.x + threadIdx.x; // Copy input data to shared memory // Note: Use block index for shared memory // Also check for bounds if (idx < n) { smem[threadIdx.x] = d_idata[idx]; } // Where do we need to put __syncthreads()? Do we need it at all? __syncthreads(); // Reduce within block // Start from c = 1, upto block size, each time doubling the offset for (int c = 1; c < blockDim.x; c *= 2) { if (threadIdx.x % (2 * c) == 0) { smem[threadIdx.x] += smem[threadIdx.x + c]; } __syncthreads(); } // Copy result of reduction to global memory // Which index of d_odata do we write to? // In which index of smem is the result stored? // Do we need another syncthreads before writing to global memory? // Use only one thread to write to global memory if (threadIdx.x == 0) { d_odata[blockIdx.x] = smem[0]; } } //////////////////////////////////////////////////////////////////////////////// // Reduction Stage 1: Non-divergent Addressing // d_idata : Device pointer to input // d_odata : Device pointer to output // n : Number of elements to reduce // // The only difference between stage0 and stage1 is the reduction for loop //////////////////////////////////////////////////////////////////////////////// __global__ void reduce_stage1(const float* d_idata, float* d_odata, int n) { // Allocate dynamic shared memory, Calculate 1D Index and Copy input data into shared memory // Exactly same as reduce_stage0 extern __shared__ float smem[]; int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { smem[threadIdx.x] = d_idata[idx]; } // This is the part that differes from reduce_stage0 // Reduce within block with coalesced indexing pattern // Change the for-loop to use indexing that reduces warp-divergence for (int c = 1; c < blockDim.x; c *= 2) { int index = threadIdx.x * 2 * c; if (index + c < blockDim.x) { smem[index] += smem[index + c]; } __syncthreads(); } // Copy result of reduction to global memory - Same as reduce_stage0 if (threadIdx.x == 0) { d_odata[blockIdx.x] = smem[0]; } } //////////////////////////////////////////////////////////////////////////////// // Reduction Stage 2: Warp Management without Bank Conflicts // d_idata : Device pointer to input // d_odata : Device pointer to output // n : Number of elements to reduce // // The only difference between stage1 and stage2 is the reduction for loop // This time, we reduce start from blockDim.x and divide by 2 in each iteration //////////////////////////////////////////////////////////////////////////////// __global__ void reduce_stage2(const float* d_idata, float* d_odata, int n) { // Allocate dynamic shared memory, Calculate 1D Index and Copy input data into shared memory // Exactly same as reduce_stage1 extern __shared__ float smem[]; int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { smem[threadIdx.x] = d_idata[idx]; } // This is the part that differes from reduce_stage1 // Reduce within block with coalesced indexing pattern and avoid bank conflicts // Change the for-loop to use indexing that reduces warp-divergence // Start from blockDim.x / 2 and divide by 2 until we hit 1 for (int c = blockDim.x / 2 ; c > 0 ; c >>= 1) { if (threadIdx.x < c) { smem[threadIdx.x] += smem[threadIdx.x + c]; } __syncthreads(); } // Copy result of reduction to global memory - Same as reduce_stage1 if (threadIdx.x == 0) { d_odata[blockIdx.x] = smem[0]; } } //////////////////////////////////////////////////////////////////////////////// // Reduction Stage 3: Add During Load, Use tile to reduce number of blocks // d_idata : Device pointer to input // d_odata : Device pointer to output // n : Number of elements to reduce // stage3_TILE : Tiling factor // // In this kernel, we will add on load when copying data into shared memory // The difference between stage3 and stage2 is how we load data into shared memory // Each block does work of stage3_TILE * blockDim.x elements //////////////////////////////////////////////////////////////////////////////// const int stage3_TILE = 2; __global__ void reduce_stage3(const float* d_idata, float* d_odata, int n) { // Allocate dynamic shared memory extern __shared__ float smem[]; // Calculate 1D index similar to stage2, but multiply by stage3_TILE int idx = 0; // Copy input data to shared memory. Add on load. // Reduce the block same as reduce_stage2 //Copy result of reduction to global memory - Same as reduce_stage2 } // warpReduce function for reduce_stag4 that reduces 2 warps into one value __device__ void warpReduce(volatile float* smem, int tid) { //Write code for warp reduce here smem[tid] += smem[tid + 32]; smem[tid] += smem[tid + 16]; smem[tid] += smem[tid + 8 ]; smem[tid] += smem[tid + 4 ]; smem[tid] += smem[tid + 2 ]; smem[tid] += smem[tid + 1 ]; } //////////////////////////////////////////////////////////////////////////////// // Reduction Stage 4: Warp Loop Unrolling // d_idata : Device pointer to input // d_odata : Device pointer to output // n : Number of elements to reduce // stage4_TILE : Tiling factor - How does tuning this change performance? // // The reduce_stage4 kernel improves on reduce_stage3 by unrolling the block // reduction by unrolling the loop that operates within a warp. // Each block does work of stage4_TILE * blockDim.x elements // // This kernel also uses the warpReduce device function above //////////////////////////////////////////////////////////////////////////////// const int stage4_TILE = 2; __global__ void reduce_stage4(const float* d_idata, float* d_odata, int n) { // Allocate dynamic shared memory, Calculate 1D Index and // Copy input data and add on load into shared memory // Exactly same as reduce_stage3. Use stage4_TILE instead of stage3_TILE. extern __shared__ float smem[]; int idx = 0; // Reduce within block with coalesced indexing pattern and avoid bank conflicts // Split the block reduction into 2 parts. // Part 1 is the same as reduce stage3, but only for c > 32 // Part 2 then uses the warpReduce function to reduce the 2 warps // The reason we stop the previous loop at c > 32 is because // warpReduce can reduce 2 warps only 1 warp // Copy result of reduction to global memory - Same as reduce_stage3 } //////////////////////////////////////////////////////////////////////////////// // Reduction Stage 5: Completely unrolled blocks using templates // d_idata : Device pointer to input // d_odata : Device pointer to output // n : Number of elements to reduce // stage5_TILE : Tiling factor - How does tuning this change performance? // // The reduce_stage5 kernel is the same as reduce_stage4 except part 1 of block reduction // We simply unroll the entire for loop into individual statements wrapper by if conditions // Why do we need to use templates? How do they improve performance? // Each block does work of stage5_TILE * blockDim.x elements // // This kernel also uses the warpReduce device function above //////////////////////////////////////////////////////////////////////////////// const int stage5_TILE = 2; template<unsigned int blockSize> __global__ void reduce_stage5(const float* d_idata, float* d_odata, int n) { // Allocate dynamic shared memory, Calculate 1D Index and // Copy input data and add on load into shared memory // Exactly same as reduce_stage4. Use stage5_TILE instead of stage4_TILE. // Use #pragma unroll around the load loop extern __shared__ float smem[]; int idx = 0; // Store the threadIdx.x in a register int tid = threadIdx.x; // Reduce the block using the same part1 and part2 split that we used in reduce_stage4 // Except, here write explicit statements instead of the for loops // Part 2 is the same as reduce_stage4 // Copy result of reduction to global memory - Same as reduce_stage4 } int main() { // Calculate bytes needed for input const unsigned bytes = n_elements * sizeof(float); // Allocate memory and initialize elements // Let's use pinned memory for host float *h_idata; CUDA(cudaMallocHost((void**)&h_idata, bytes)); // Fill random values into the host array { std::random_device randomDevice; std::mt19937 generator(randomDevice()); std::uniform_real_distribution<float> distribution(-1, 1); for (int i = 0; i < n_elements; i++) { h_idata[i] = distribution(generator); } } // Copy input data into device memory float *d_idata = NULL; CUDA(cudaMalloc((void **)&d_idata, bytes)); CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); // Compute Gold Standard using CPU const float gold_result = reduce_cpu(h_idata, n_elements); // Create CUDA events for timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //////////////////////////////////////////////////////////// std::cout << "******************************************" << std::endl; std::cout << "***CPU Reduce***" << std::endl; { nvtxRangeId_t range = nvtxRangeStart("CPU Reduce"); float cpu_result = 0; int iterations = 10; // start the timer Timer hTimer; nvtxRangeId_t rangeBenchmark = nvtxRangeStart("CPU Reduce Benchmark"); for(int k = 0; k < iterations; k++) { cpu_result = reduce_cpu(h_idata, n_elements); } nvtxRangeEnd(rangeBenchmark); // stop the timer double time = hTimer.elapsed() * 1000; //ms if(postprocess(&cpu_result, &gold_result, 1)) printResults(time, iterations); nvtxRangeEnd(range); } std::cout << "******************************************" << std::endl << std::endl; //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// std::cout << "******************************************" << std::endl; std::cout << "***Reduction Stage 0***" << std::endl; { nvtxRangeId_t range = nvtxRangeStart("Reduction Stage 0"); //Calculate Threads per block and total blocks required DIMS1D dims; dims.dimThreads = threads; dims.dimBlocks = divup(n_elements, dims.dimThreads); // Copy input data to device CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); // Calculate bytes needed for output size_t block_bytes = dims.dimBlocks * sizeof(float); // Allocate memory for output on device float *d_odata = NULL; CUDA(cudaMalloc((void**)&d_odata, block_bytes)); CUDA(cudaMemset(d_odata, 0, block_bytes)); // Call the kernel. Allocate dynamic shared memory reduce_stage0<<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements); // Copy result of block reduce to CPU and run CPU reduce float *h_blocks = (float *)malloc(dims.dimBlocks * sizeof(float)); CUDA(cudaMemcpy(h_blocks, d_odata, dims.dimBlocks * sizeof(float), cudaMemcpyDeviceToHost)); // Secondary reduce on CPU float gpu_result = 0; for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; // Check the result and then run the benchmark. if(postprocess(&gpu_result, &gold_result, 1)) { nvtxRangeId_t rangeBenchmark = nvtxRangeStart("Reduction Stage 0 Benchmark"); //Start Benchmark int iterations = 10; CUDA(cudaEventRecord(start, 0)); // Run multiple times for a good benchmark for(int i = 0; i < iterations; i++) { reduce_stage0<<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements); cudaMemcpy(h_blocks, d_odata, dims.dimBlocks * sizeof(float), cudaMemcpyDeviceToHost); for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; } CUDA(cudaEventRecord(stop, 0)); CUDA(cudaEventSynchronize(stop)); nvtxRangeEnd(rangeBenchmark); float time_ms; CUDA(cudaEventElapsedTime(&time_ms, start, stop)); printResults(time_ms, iterations); } // Cleanup free(h_blocks); cudaFree(d_odata); nvtxRangeEnd(range); } std::cout << "******************************************" << std::endl << std::endl; //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// std::cout << "******************************************" << std::endl; std::cout << "***Reduction Stage 1***" << std::endl; { nvtxRangeId_t range = nvtxRangeStart("Reduction Stage 1"); //Calculate Threads per block and total blocks required DIMS1D dims; dims.dimThreads = threads; dims.dimBlocks = divup(n_elements, dims.dimThreads); // Copy input data to device CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); // Calculate bytes needed for output size_t block_bytes = dims.dimBlocks * sizeof(float); // Allocate memory for output on device float *d_odata = NULL; CUDA(cudaMalloc((void**)&d_odata, block_bytes)); CUDA(cudaMemset(d_odata, 0, block_bytes)); // Call the kernel. Allocate dynamic shared memory reduce_stage1<<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements); // Copy result of block reduce to CPU and run CPU reduce float *h_blocks = (float *)malloc(block_bytes); CUDA(cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost)); // Secondary reduce on CPU float gpu_result = 0; for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; // Check the result and then run the benchmark. if(postprocess(&gpu_result, &gold_result, 1)) { nvtxRangeId_t rangeBenchmark = nvtxRangeStart("Reduction Stage 1 Benchmark"); //Start Benchmark int iterations = 10; CUDA(cudaEventRecord(start, 0)); // Run multiple times for a good benchmark for(int i = 0; i < iterations; i++) { reduce_stage1<<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements); cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost); for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; } CUDA(cudaEventRecord(stop, 0)); CUDA(cudaEventSynchronize(stop)); nvtxRangeEnd(rangeBenchmark); float time_ms; CUDA(cudaEventElapsedTime(&time_ms, start, stop)); printResults(time_ms, iterations); } // Cleanup free(h_blocks); cudaFree(d_odata); nvtxRangeEnd(range); } std::cout << "******************************************" << std::endl << std::endl; //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// std::cout << "******************************************" << std::endl; std::cout << "***Reduction Stage 2***" << std::endl; { nvtxRangeId_t range = nvtxRangeStart("Reduction Stage 2"); //Calculate Threads per block and total blocks required DIMS1D dims; dims.dimThreads = threads; dims.dimBlocks = divup(n_elements, dims.dimThreads); // Copy input data to device CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); // Calculate bytes needed for output size_t block_bytes = dims.dimBlocks * sizeof(float); // Allocate memory for output on device float *d_odata = NULL; CUDA(cudaMalloc((void**)&d_odata, block_bytes)); CUDA(cudaMemset(d_odata, 0, block_bytes)); // Call the kernel. Allocate dynamic shared memory reduce_stage2<<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements); // Copy result of block reduce to CPU and run CPU reduce float *h_blocks = (float *)malloc(block_bytes); CUDA(cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost)); // Secondary reduce on CPU float gpu_result = 0; for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; // Check the result and then run the benchmark. if(postprocess(&gpu_result, &gold_result, 1)) { nvtxRangeId_t rangeBenchmark = nvtxRangeStart("Reduction Stage 2 Benchmark"); //Start Benchmark int iterations = 10; CUDA(cudaEventRecord(start, 0)); // Run multiple times for a good benchmark for(int i = 0; i < iterations; i++) { reduce_stage2<<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements); cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost); for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; } CUDA(cudaEventRecord(stop, 0)); CUDA(cudaEventSynchronize(stop)); nvtxRangeEnd(rangeBenchmark); float time_ms; CUDA(cudaEventElapsedTime(&time_ms, start, stop)); printResults(time_ms, iterations); } // Cleanup free(h_blocks); cudaFree(d_odata); nvtxRangeEnd(range); } std::cout << "******************************************" << std::endl << std::endl; //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// std::cout << "******************************************" << std::endl; std::cout << "***Reduction Stage 3***" << std::endl; { nvtxRangeId_t range = nvtxRangeStart("Reduction Stage 3"); // Calculate Threads per block and total blocks required // Use stage3_TILE in your grid calculation DIMS1D dims; dims.dimThreads = threads; dims.dimBlocks = divup(n_elements, dims.dimThreads * stage3_TILE); // Copy input data to device CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); // Calculate bytes needed for output size_t block_bytes = dims.dimBlocks * sizeof(float); // Allocate memory for output on device float *d_odata = NULL; CUDA(cudaMalloc((void**)&d_odata, block_bytes)); CUDA(cudaMemset(d_odata, 0, block_bytes)); // Call the kernel. Allocate dynamic shared memory reduce_stage3<<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements); // Copy result of block reduce to CPU and run CPU reduce float *h_blocks = (float *)malloc(block_bytes); CUDA(cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost)); // Secondary reduce on CPU float gpu_result = 0; for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; // Check the result and then run the benchmark. if(postprocess(&gpu_result, &gold_result, 1)) { nvtxRangeId_t rangeBenchmark = nvtxRangeStart("Reduction Stage 3 Benchmark"); //Start Benchmark int iterations = 10; CUDA(cudaEventRecord(start, 0)); // Run multiple times for a good benchmark for(int i = 0; i < iterations; i++) { reduce_stage3<<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements); cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost); for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; } CUDA(cudaEventRecord(stop, 0)); CUDA(cudaEventSynchronize(stop)); nvtxRangeEnd(rangeBenchmark); float time_ms; CUDA(cudaEventElapsedTime(&time_ms, start, stop)); printResults(time_ms, iterations); } // Cleanup free(h_blocks); cudaFree(d_odata); nvtxRangeEnd(range); } std::cout << "******************************************" << std::endl << std::endl; //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// std::cout << "******************************************" << std::endl; std::cout << "***Reduction Stage 4***" << std::endl; { nvtxRangeId_t range = nvtxRangeStart("Reduction Stage 4"); // Calculate Threads per block and total blocks required // Use stage4_TILE in your grid calculation DIMS1D dims; dims.dimThreads = threads; dims.dimBlocks = divup(n_elements, dims.dimThreads * stage4_TILE); // Copy input data to device CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); // Calculate bytes needed for output size_t block_bytes = dims.dimBlocks * sizeof(float); // Allocate memory for output on device float *d_odata = NULL; CUDA(cudaMalloc((void**)&d_odata, block_bytes)); CUDA(cudaMemset(d_odata, 0, block_bytes)); // Call the kernel. Allocate dynamic shared memory reduce_stage4<<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements); // Copy result of block reduce to CPU and run CPU reduce float *h_blocks = (float *)malloc(block_bytes); CUDA(cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost)); // Secondary reduce on CPU float gpu_result = 0; for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; // Check the result and then run the benchmark. if(postprocess(&gpu_result, &gold_result, 1)) { nvtxRangeId_t rangeBenchmark = nvtxRangeStart("Reduction Stage 4 Benchmark"); //Start Benchmark int iterations = 10; CUDA(cudaEventRecord(start, 0)); // Run multiple times for a good benchmark for(int i = 0; i < iterations; i++) { reduce_stage4<<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements); cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost); for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; } CUDA(cudaEventRecord(stop, 0)); CUDA(cudaEventSynchronize(stop)); nvtxRangeEnd(rangeBenchmark); float time_ms; CUDA(cudaEventElapsedTime(&time_ms, start, stop)); printResults(time_ms, iterations); } // Cleanup free(h_blocks); cudaFree(d_odata); nvtxRangeEnd(range); } std::cout << "******************************************" << std::endl << std::endl; //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// std::cout << "******************************************" << std::endl; std::cout << "***Reduction Stage 5***" << std::endl; { nvtxRangeId_t range = nvtxRangeStart("Reduction Stage 5"); // Calculate Threads per block and total blocks required // Use stage5_TILE in your grid calculation DIMS1D dims; dims.dimThreads = threads; dims.dimBlocks = divup(n_elements, dims.dimThreads * stage5_TILE); // Copy input data to device CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); // Calculate bytes needed for output size_t block_bytes = dims.dimBlocks * sizeof(float); // Allocate memory for output on device float *d_odata = NULL; CUDA(cudaMalloc((void**)&d_odata, block_bytes)); CUDA(cudaMemset(d_odata, 0, block_bytes)); // Call the kernel. Allocate dynamic shared memory // Don't forget to add the template reduce_stage5<threads><<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements); // Copy result of block reduce to CPU and run CPU reduce float *h_blocks = (float *)malloc(block_bytes); CUDA(cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost)); // Secondary reduce on CPU float gpu_result = 0; for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; // Check the result and then run the benchmark. if(postprocess(&gpu_result, &gold_result, 1)) { nvtxRangeId_t rangeBenchmark = nvtxRangeStart("Reduction Stage 5 Benchmark"); //Start Benchmark int iterations = 10; CUDA(cudaEventRecord(start, 0)); // Run multiple times for a good benchmark for(int i = 0; i < iterations; i++) { reduce_stage5<threads><<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements); cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost); for(int i = 0; i < dims.dimBlocks; i++) gpu_result += h_blocks[i]; } CUDA(cudaEventRecord(stop, 0)); CUDA(cudaEventSynchronize(stop)); nvtxRangeEnd(rangeBenchmark); float time_ms; CUDA(cudaEventElapsedTime(&time_ms, start, stop)); printResults(time_ms, iterations); } // Cleanup free(h_blocks); cudaFree(d_odata); nvtxRangeEnd(range); } std::cout << "******************************************" << std::endl << std::endl; //////////////////////////////////////////////////////////// // Cleanup CUDA(cudaEventDestroy(start)); CUDA(cudaEventDestroy(stop)); CUDA(cudaFreeHost(h_idata)); CUDA(cudaFree(d_idata)); return 0; }
3c517226af1a84a9bacd37e50be16d3af369d70a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/fc.h" namespace paddle { namespace operators { namespace math { template <typename T, bool DoRelu> __global__ void InplaceAddReluKernel(const T* bias, T* data, int M, int N) { for (int i = blockIdx.x; i < M; i += gridDim.x) { int index = i * N + threadIdx.x; for (int j = threadIdx.x; j < N; j += blockDim.x) { T tmp = data[index] + bias[j]; if (DoRelu) { data[index] = (tmp > 0) ? tmp : 0; } else { data[index] = tmp; } index += blockDim.x; } } } template <typename T> class FCFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const int M, const int N, const int K, const T* X, const T* W, T* Y, const T* B = nullptr, bool relu = false) { auto blas = math::GetBlas<platform::CUDADeviceContext, T>(context); blas.GEMM(false, false, M, N, K, static_cast<T>(1.0), X, K, W, N, static_cast<T>(0.0), Y, N); if (B == NULL) { return; } const int kThreadsPerBlock = 1024; int max_threads = context.GetMaxPhysicalThreadCount(); int num_threads = ::min(kThreadsPerBlock, (((N + 31) >> 5) << 5)); int num_blocks = ::max(max_threads / num_threads, 1); if (relu) { hipLaunchKernelGGL(( InplaceAddReluKernel< T, true>), dim3(num_blocks), dim3(num_threads), 0, context.stream(), B, Y, M, N); } else { hipLaunchKernelGGL(( InplaceAddReluKernel< T, false>), dim3(num_blocks), dim3(num_threads), 0, context.stream(), B, Y, M, N); } } }; template class FCFunctor<platform::CUDADeviceContext, float>; template class FCFunctor<platform::CUDADeviceContext, double>; } // namespace math } // namespace operators } // namespace paddle
3c517226af1a84a9bacd37e50be16d3af369d70a.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/fc.h" namespace paddle { namespace operators { namespace math { template <typename T, bool DoRelu> __global__ void InplaceAddReluKernel(const T* bias, T* data, int M, int N) { for (int i = blockIdx.x; i < M; i += gridDim.x) { int index = i * N + threadIdx.x; for (int j = threadIdx.x; j < N; j += blockDim.x) { T tmp = data[index] + bias[j]; if (DoRelu) { data[index] = (tmp > 0) ? tmp : 0; } else { data[index] = tmp; } index += blockDim.x; } } } template <typename T> class FCFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const int M, const int N, const int K, const T* X, const T* W, T* Y, const T* B = nullptr, bool relu = false) { auto blas = math::GetBlas<platform::CUDADeviceContext, T>(context); blas.GEMM(false, false, M, N, K, static_cast<T>(1.0), X, K, W, N, static_cast<T>(0.0), Y, N); if (B == NULL) { return; } const int kThreadsPerBlock = 1024; int max_threads = context.GetMaxPhysicalThreadCount(); int num_threads = std::min(kThreadsPerBlock, (((N + 31) >> 5) << 5)); int num_blocks = std::max(max_threads / num_threads, 1); if (relu) { InplaceAddReluKernel< T, true><<<num_blocks, num_threads, 0, context.stream()>>>(B, Y, M, N); } else { InplaceAddReluKernel< T, false><<<num_blocks, num_threads, 0, context.stream()>>>(B, Y, M, N); } } }; template class FCFunctor<platform::CUDADeviceContext, float>; template class FCFunctor<platform::CUDADeviceContext, double>; } // namespace math } // namespace operators } // namespace paddle
2a6a99a5b8e10b8582bba4c683e4af922a115f7b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMath.cu" #else void THCTensor_(fill)(THCState* state, THCTensor *self_, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); if (!THC_pointwiseApply1<scalar_t>( state, self_, TensorFillOp<scalar_t>(value))) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); } void THCTensor_(zero)(THCState *state, THCTensor *self_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); if (THCTensor_(isContiguous)(state, self_)) { THCudaCheck(hipMemsetAsync(THCTensor_(data)(state, self_), 0, sizeof(scalar_t) * THCTensor_(nElement)(state, self_), THCState_getCurrentStream(state))); } else { if (!THC_pointwiseApply1<scalar_t>( state, self_, TensorFillOp<scalar_t>(ScalarConvert<int, scalar_t>::to(0)))) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } void THCTensor_(zerosLike)(THCState *state, THCTensor *r_, THCTensor *input) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input)); THCTensor_(resizeAs)(state, r_, input); THCTensor_(zero)(state, r_); } void THCTensor_(onesLike)(THCState *state, THCTensor *r_, THCTensor *input) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input)); THCTensor_(resizeAs)(state, r_, input); THCTensor_(fill)(state, r_, ScalarConvert<int, scalar_t>::to(1)); } ptrdiff_t THCTensor_(numel)(THCState *state, THCTensor *t) { return THCTensor_(nElement)(state, t); } void THCTensor_(cat)(THCState *state, THCTensor *result, THCTensor *ta, THCTensor *tb, int dimension) { THCTensor* inputs[2]; inputs[0] = ta; inputs[1] = tb; THCTensor_(catArray)(state, result, inputs, 2, dimension); } void THCTensor_(check_shape_except_dim)(THCState *state, THCTensor *first, THCTensor *second, int dimension); inline void THCTensor_(check_shape_except_dim)(THCState *state, THCTensor *first, THCTensor *second, int dimension) { int first_dims = first->dim(); int second_dims = second->dim(); THArgCheck(first_dims == second_dims, 0, "Tensors must have same number of dimensions: got %d and %d", first_dims, second_dims); for (int dim = 0; dim < first_dims; dim++) { if (dim == dimension) { continue; } int64_t first_dim_size = THCTensor_(size)(state, first, dim); int64_t second_dim_size = THCTensor_(size)(state, second, dim); THArgCheck(first_dim_size == second_dim_size, 0, "Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d", dimension, (long long)first_dim_size, (long long)second_dim_size, dim); } } void THCTensor_(catArray)(THCState *state, THCTensor *result, THCTensor **inputs, int numInputs, int dimension) { // previously, size [0] tensors were the only possible empty tensors; thus, it wasn't possible // to cat empty tensors unless all the other tensors were 1-dimensional, so we allowed these tensors // to be "skipped". We maintain this behavior for backwards compatibility, but only for this specific // size (i.e. other empty sizes are not skipped). // FIXME: warn if this is the case int i, j, cohortMax; int64_t offset; bool hasSkippedInput = false; THCTensor *notSkippedTensor = NULL; // non-owning reference auto should_skip = [](THCTensor *t) { return t->is_empty() && t->dim() == 1; }; int nDims = 0; for (i = 0; i < numInputs; i++) { if (should_skip(inputs[i])) { hasSkippedInput = true; continue; } nDims = inputs[i]->dim(); notSkippedTensor = inputs[i]; } // If all inputs are empty tensors, return an empty tensor if (notSkippedTensor == NULL) { return; } THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs); THArgCheck(dimension >= 0, 4, "invalid dimension %d", dimension); std::vector<int64_t> size(nDims); // Compute size of the result in the cat dimension int64_t cat_dim_size = 0; for (int i = 0; i < numInputs; i++) { THCTensor *tensor = inputs[i]; if (should_skip(tensor)) { continue; } THCTensor_(check_shape_except_dim)(state, notSkippedTensor, tensor, dimension); cat_dim_size += THCTensor_(size)(state, tensor, dimension); } // Compute the size of the result for (int dim = 0; dim < nDims; dim++) { int64_t result_dim_size = THCTensor_(size)(state, notSkippedTensor, dim); if (dim == dimension) { result_dim_size = cat_dim_size; } size[dim] = result_dim_size; } THCTensor_(resize)(state, result, size, {}); // We parallelize the copy if all 6 conditions pass: // // 1. There is more than one input tensor // 2. No empty inputs // 3. The result tensor is 32-bit indexable // 4. The number of dimensions is <= 4 // 5. All input tensors are contiguous (output tensor may be non-contig) // 6. All input tensors can use 32-bit indexing // 7. All input tensors are on the same device if (numInputs > 1 && !hasSkippedInput && result->dim() <= CAT_ARRAY_MAX_INPUT_DIMS && THCTensor_canUse32BitIndexMath(state, result) && THCTensor_allContiguous(state, inputs, numInputs) && THCTensor_all32BitIndexable(state, inputs, numInputs) && THCTensor_allSameDevice(state, inputs, numInputs)) { // First, let's set up our kernel parameters. We start with a raw pointer to the storage // for the output Tensor. scalar_t *data = THCTensor_(data)(state, result); // Kernel Parameter size_t tensorMetadataSize = sizeof(CatArrInputTensor<scalar_t, unsigned int>) * CAT_ARRAY_BATCH_SIZE; auto d_inputs = static_cast<CatArrInputTensor<scalar_t, unsigned int> *>(THCudaMalloc(state, tensorMetadataSize)); OutputTensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> param; // Next, let's initialize the size, stride arrays for the output Tensor. for (i = 0; i < nDims; ++i) { param.outputSize[i] = THCTensor_(size)(state, result, i); param.outputStride[i] = THCTensor_(stride)(state, result, i); } at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // Template Declarations for dim = 1, 2, 3, 4 #define HANDLE_CASE(DIMS) \ hipLaunchKernelGGL(( CatArrayBatchedCopy<scalar_t, unsigned int, DIMS>), dim3(catGrid), dim3(applyBlock), 0, stream.stream(), data, d_inputs, param, dimension, param.outputStride[dimension]); // Now we loop offset = 0; for (i = 0; i < numInputs; i += CAT_ARRAY_BATCH_SIZE) { // Re-allocate stackInputs every iteration to avoid read-after-write hazard { auto stackInputs_owner = THCudaHostAlloc(state, tensorMetadataSize); CatArrInputTensor<scalar_t, unsigned int>* stackInputs = static_cast<CatArrInputTensor<scalar_t, unsigned int>*>(stackInputs_owner.get()); cohortMax = 0; for (j = 0; j < CAT_ARRAY_BATCH_SIZE && (i+j) < numInputs; ++j) { int64_t dimSize = THCTensor_(size)(state, inputs[i+j], dimension); stackInputs[j].input = THCTensor_(data)(state, inputs[i+j]); stackInputs[j].offset = offset; stackInputs[j].dimSize = dimSize; stackInputs[j].nElements = THCTensor_(nElement)(state, inputs[i+j]); cohortMax = cohortMax > (int) stackInputs[j].nElements ? cohortMax : (int) stackInputs[j].nElements; // update offset offset += dimSize; } THCudaCheck(hipMemcpyAsync( d_inputs, stackInputs, j * sizeof(CatArrInputTensor<scalar_t, unsigned int>), hipMemcpyHostToDevice, stream.stream())); THCudaHostRecord(state, stackInputs); } // Next, let's consider how we set our kernel launch parameters. // We borrow from THCApply, which the kernel's internal indexing // is based on. dim3 applyBlock = getApplyBlock(); //Get grid where x dim fills half gpu and y dim is number of tensors. //This will have cating two tensors fill the entire grid, but prevent //many threads from needlessly load meta data if their sizes is small. dim3 catGrid; getCatGrid(state, j, catGrid); switch (nDims) { case 1: HANDLE_CASE(1); break; case 2: HANDLE_CASE(2); break; case 3: HANDLE_CASE(3); break; case 4: HANDLE_CASE(4); break; } THCudaCheck(hipGetLastError()); } THCudaFree(state, d_inputs); #undef HANDLE_CASE } else { offset = 0; for (j = 0; j < numInputs; j++) { if (should_skip(inputs[j])) continue; int64_t dimSize = THCTensor_(size)(state, inputs[j], dimension); THCTensor *nt = THCTensor_(newWithTensor)(state, result); THCTensor_(narrow)(state, nt, NULL, dimension, offset, dimSize); THCTensor_(copy)(state, nt, inputs[j]); THCTensor_(free)(state, nt); offset += dimSize; } } } void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self )); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor)); using namespace thrust::placeholders; THCThrustAllocator thrustAlloc(state); self = THCTensor_(newContiguous)(state, self); thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self)); int num_dim = THCTensor_(nDimensionLegacyNoScalars)(state, self); int64_t N = THCTensor_(nElement)(state, self); THCudaLongTensor_resize2d(state, tensor, N, num_dim); tensor = THCudaLongTensor_newContiguous(state, tensor); thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor)); thrust::counting_iterator<int64_t> idxfirst(0); thrust::counting_iterator<int64_t> idxlast = idxfirst + N; typedef thrust::device_ptr<int64_t> Iter; strided_range<Iter> strided_tensor(tensor_data, tensor_data+N*num_dim, num_dim); #if TORCH_HIP_VERSION >= 7000 hipStream_t stream = THCState_getCurrentStream(state); #endif strided_range<Iter>::iterator dend = thrust::copy_if( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(stream), #endif idxfirst, idxlast, self_data, strided_tensor.begin(), NonZeroOp<scalar_t>() ); int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend); int64_t div = 1; for (int dim = num_dim-1; dim >= 0; dim--) { strided_range<Iter> stride_dim(tensor_data+dim, tensor_data+N*num_dim, num_dim); thrust::transform( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(stream), #endif strided_tensor.begin(), strided_tensor.end(), stride_dim.begin(), idx_functor(div, THTensor_sizeLegacyNoScalars(self, dim)) ); div *= THTensor_sizeLegacyNoScalars(self, dim); } THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim); THCTensor_(free)(state, self); THCudaLongTensor_free(state, tensor); THCudaCheck(hipGetLastError()); } void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_)); int nDimension = THCTensor_(nDimensionLegacyNoScalars)(state, src_); THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector"); if (nDimension == 2) { int64_t stride0 = THCTensor_(stride)(state, src_, 0); int64_t stride1 = THCTensor_(stride)(state, src_, 1); int64_t size0 = THCTensor_(size)(state, src_, 0); int64_t size1 = THCTensor_(size)(state, src_, 1); int64_t size = (k > 0) ? min((int64_t)size0, (int64_t)size1 - k) : min((int64_t)size0 + k, (int64_t)size1); THCTensor_(resize1d)(state, self_, size); if (size > 0) { int64_t strideSelf = THCTensor_(stride)(state, self_, 0); const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size)); dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x))); int64_t start = (k >= 0 ? k * stride1 : -k * stride0); hipLaunchKernelGGL(( THCTensor_copyFromDiagonal<scalar_t>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf); } } else { ptrdiff_t totalElements = THCTensor_(nElement)(state, src_); ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k; int64_t strideSrc = THTensor_strideLegacyNoScalars(src_, 0); THCTensor_(resize2d)(state, self_, size, size); THCTensor_(zero)(state, self_); if (size > 0) { int64_t stride0 = THCTensor_(stride)(state, self_, 0); int64_t stride1 = THCTensor_(stride)(state, self_, 1); const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size)); dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x))); ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0); hipLaunchKernelGGL(( THCTensor_copyToDiagonal<scalar_t>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc); } } THCudaCheck(hipGetLastError()); } void THCTensor_(eye)(THCState *state, THCTensor *self_, int64_t n, int64_t m) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); THArgCheck(n > 0, 1, "invalid argument"); if(m <= 0) m = n; THCTensor_(resize2d)(state, self_, n, m); THCTensor_(zero)(state, self_); int64_t sz = THMin(n, m); int64_t stride = THCTensor_(stride)(state, self_, 0) + THCTensor_(stride)(state, self_, 1); THCTensor *diag = THCTensor_(newWithStorage1d)(state, THTensor_getStoragePtr(self_), self_->storage_offset(), sz, stride); THCTensor_(fill)(state, diag, ScalarConvert<int, scalar_t>::to(1)); THCTensor_(free)(state, diag); } accreal THCTensor_(trace)(THCState *state, THCTensor *src_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_)); THArgCheck((THTensor_nDimensionLegacyAll(src_) == 2), 1, "expected a matrix"); THCTensor *diag = THCTensor_(new)(state); THCTensor_(diag)(state, diag, src_, 0); accreal trace = THCTensor_(sumall)(state, diag); THCTensor_(free)(state, diag); return trace; } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(linspace)(THCState *state, THCTensor *r_, scalar_t a, scalar_t b, int64_t n) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); // NumPy allows you to pass different points even if n <= 1 -- should we? THArgCheck(n > 1 || ((n == 0 || n == 1) && (a == b)), 3, "invalid number of points"); if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n); if (n == 0) { // skip } else if (n == 1) THCTensor_(fill)(state, r_, a); else { THCTensor *r = THCTensor_(isContiguous)(state, r_) ? r_ // if r_ is contiguous we can direct work on it : THCTensor_(newContiguous)(state, r_); scalar_t step = THCNumerics<scalar_t>::div(THCNumerics<scalar_t>::sub(b, a), ScalarConvert<int64_t,scalar_t>::to(n - 1)); LinspaceOp<scalar_t> linspace_method(a, step); thrust::device_ptr<scalar_t> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + n, linspace_method); if (!THCTensor_(isContiguous)(state, r_)) { // We need to move data back to r_ THCTensor_(freeCopyTo)(state, r, r_); } } THCudaCheck(hipGetLastError()); } void THCTensor_(logspace)(THCState *state, THCTensor *r_, scalar_t a, scalar_t b, int64_t n) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); // NumPy allows you to pass different points even if n <= 1 -- should we? THArgCheck(n > 1 || ((n == 0 || n == 1) && (a == b)), 3, "invalid number of points"); if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n); if (n == 0) { // skip } else if (n == 1) THCTensor_(fill)(state, r_, THCNumerics<scalar_t>::exp10(a)); else { THCTensor *r = THCTensor_(isContiguous)(state, r_) ? r_ : THCTensor_(newContiguous)(state, r_); scalar_t step = THCNumerics<scalar_t>::div(THCNumerics<scalar_t>::sub(b, a), ScalarConvert<int64_t,scalar_t>::to(n - 1)); LogspaceOp<scalar_t> logspace_method(a, step); thrust::device_ptr<scalar_t> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + n, logspace_method); if (!THCTensor_(isContiguous)(state, r_)) { THCTensor_(freeCopyTo)(state, r, r_); } } THCudaCheck(hipGetLastError()); } #endif void THCTensor_(range)(THCState *state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THArgCheck(step > 0 || step < 0, 3, "step must be nonzero"); THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) , 2, "upper bound and larger bound inconsistent with step sign"); ptrdiff_t size = (ptrdiff_t) (((xmax - xmin) / step) + 1); if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size); THCTensor *r = THCTensor_(newContiguous)(state, r_); LinspaceOp<scalar_t,accreal> linspace_method(xmin, step); thrust::device_ptr<scalar_t> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + size, linspace_method); THCTensor_(freeCopyTo)(state, r, r_); THCudaCheck(hipGetLastError()); } void THCTensor_(arange)(THCState* state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THArgCheck(step > 0 || step < 0, 3, "step must be nonzero"); THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) , 2, "upper bound and larger bound inconsistent with step sign"); ptrdiff_t size = (ptrdiff_t) ceil(ScalarConvert<accreal, double>::to(xmax - xmin) / step); if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size); THCTensor *r = THCTensor_(newContiguous)(state, r_); LinspaceOp<scalar_t,accreal> linspace_method(xmin, step); thrust::device_ptr<scalar_t> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + size, linspace_method); THCTensor_(freeCopyTo)(state, r, r_); THCudaCheck(hipGetLastError()); } #endif
2a6a99a5b8e10b8582bba4c683e4af922a115f7b.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMath.cu" #else void THCTensor_(fill)(THCState* state, THCTensor *self_, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); if (!THC_pointwiseApply1<scalar_t>( state, self_, TensorFillOp<scalar_t>(value))) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); } void THCTensor_(zero)(THCState *state, THCTensor *self_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); if (THCTensor_(isContiguous)(state, self_)) { THCudaCheck(cudaMemsetAsync(THCTensor_(data)(state, self_), 0, sizeof(scalar_t) * THCTensor_(nElement)(state, self_), THCState_getCurrentStream(state))); } else { if (!THC_pointwiseApply1<scalar_t>( state, self_, TensorFillOp<scalar_t>(ScalarConvert<int, scalar_t>::to(0)))) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } void THCTensor_(zerosLike)(THCState *state, THCTensor *r_, THCTensor *input) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input)); THCTensor_(resizeAs)(state, r_, input); THCTensor_(zero)(state, r_); } void THCTensor_(onesLike)(THCState *state, THCTensor *r_, THCTensor *input) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input)); THCTensor_(resizeAs)(state, r_, input); THCTensor_(fill)(state, r_, ScalarConvert<int, scalar_t>::to(1)); } ptrdiff_t THCTensor_(numel)(THCState *state, THCTensor *t) { return THCTensor_(nElement)(state, t); } void THCTensor_(cat)(THCState *state, THCTensor *result, THCTensor *ta, THCTensor *tb, int dimension) { THCTensor* inputs[2]; inputs[0] = ta; inputs[1] = tb; THCTensor_(catArray)(state, result, inputs, 2, dimension); } void THCTensor_(check_shape_except_dim)(THCState *state, THCTensor *first, THCTensor *second, int dimension); inline void THCTensor_(check_shape_except_dim)(THCState *state, THCTensor *first, THCTensor *second, int dimension) { int first_dims = first->dim(); int second_dims = second->dim(); THArgCheck(first_dims == second_dims, 0, "Tensors must have same number of dimensions: got %d and %d", first_dims, second_dims); for (int dim = 0; dim < first_dims; dim++) { if (dim == dimension) { continue; } int64_t first_dim_size = THCTensor_(size)(state, first, dim); int64_t second_dim_size = THCTensor_(size)(state, second, dim); THArgCheck(first_dim_size == second_dim_size, 0, "Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d", dimension, (long long)first_dim_size, (long long)second_dim_size, dim); } } void THCTensor_(catArray)(THCState *state, THCTensor *result, THCTensor **inputs, int numInputs, int dimension) { // previously, size [0] tensors were the only possible empty tensors; thus, it wasn't possible // to cat empty tensors unless all the other tensors were 1-dimensional, so we allowed these tensors // to be "skipped". We maintain this behavior for backwards compatibility, but only for this specific // size (i.e. other empty sizes are not skipped). // FIXME: warn if this is the case int i, j, cohortMax; int64_t offset; bool hasSkippedInput = false; THCTensor *notSkippedTensor = NULL; // non-owning reference auto should_skip = [](THCTensor *t) { return t->is_empty() && t->dim() == 1; }; int nDims = 0; for (i = 0; i < numInputs; i++) { if (should_skip(inputs[i])) { hasSkippedInput = true; continue; } nDims = inputs[i]->dim(); notSkippedTensor = inputs[i]; } // If all inputs are empty tensors, return an empty tensor if (notSkippedTensor == NULL) { return; } THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs); THArgCheck(dimension >= 0, 4, "invalid dimension %d", dimension); std::vector<int64_t> size(nDims); // Compute size of the result in the cat dimension int64_t cat_dim_size = 0; for (int i = 0; i < numInputs; i++) { THCTensor *tensor = inputs[i]; if (should_skip(tensor)) { continue; } THCTensor_(check_shape_except_dim)(state, notSkippedTensor, tensor, dimension); cat_dim_size += THCTensor_(size)(state, tensor, dimension); } // Compute the size of the result for (int dim = 0; dim < nDims; dim++) { int64_t result_dim_size = THCTensor_(size)(state, notSkippedTensor, dim); if (dim == dimension) { result_dim_size = cat_dim_size; } size[dim] = result_dim_size; } THCTensor_(resize)(state, result, size, {}); // We parallelize the copy if all 6 conditions pass: // // 1. There is more than one input tensor // 2. No empty inputs // 3. The result tensor is 32-bit indexable // 4. The number of dimensions is <= 4 // 5. All input tensors are contiguous (output tensor may be non-contig) // 6. All input tensors can use 32-bit indexing // 7. All input tensors are on the same device if (numInputs > 1 && !hasSkippedInput && result->dim() <= CAT_ARRAY_MAX_INPUT_DIMS && THCTensor_canUse32BitIndexMath(state, result) && THCTensor_allContiguous(state, inputs, numInputs) && THCTensor_all32BitIndexable(state, inputs, numInputs) && THCTensor_allSameDevice(state, inputs, numInputs)) { // First, let's set up our kernel parameters. We start with a raw pointer to the storage // for the output Tensor. scalar_t *data = THCTensor_(data)(state, result); // Kernel Parameter size_t tensorMetadataSize = sizeof(CatArrInputTensor<scalar_t, unsigned int>) * CAT_ARRAY_BATCH_SIZE; auto d_inputs = static_cast<CatArrInputTensor<scalar_t, unsigned int> *>(THCudaMalloc(state, tensorMetadataSize)); OutputTensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> param; // Next, let's initialize the size, stride arrays for the output Tensor. for (i = 0; i < nDims; ++i) { param.outputSize[i] = THCTensor_(size)(state, result, i); param.outputStride[i] = THCTensor_(stride)(state, result, i); } at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream(); // Template Declarations for dim = 1, 2, 3, 4 #define HANDLE_CASE(DIMS) \ CatArrayBatchedCopy<scalar_t, unsigned int, DIMS><<<catGrid, applyBlock, 0, stream.stream()>>>(data, d_inputs, param, dimension, param.outputStride[dimension]); // Now we loop offset = 0; for (i = 0; i < numInputs; i += CAT_ARRAY_BATCH_SIZE) { // Re-allocate stackInputs every iteration to avoid read-after-write hazard { auto stackInputs_owner = THCudaHostAlloc(state, tensorMetadataSize); CatArrInputTensor<scalar_t, unsigned int>* stackInputs = static_cast<CatArrInputTensor<scalar_t, unsigned int>*>(stackInputs_owner.get()); cohortMax = 0; for (j = 0; j < CAT_ARRAY_BATCH_SIZE && (i+j) < numInputs; ++j) { int64_t dimSize = THCTensor_(size)(state, inputs[i+j], dimension); stackInputs[j].input = THCTensor_(data)(state, inputs[i+j]); stackInputs[j].offset = offset; stackInputs[j].dimSize = dimSize; stackInputs[j].nElements = THCTensor_(nElement)(state, inputs[i+j]); cohortMax = cohortMax > (int) stackInputs[j].nElements ? cohortMax : (int) stackInputs[j].nElements; // update offset offset += dimSize; } THCudaCheck(cudaMemcpyAsync( d_inputs, stackInputs, j * sizeof(CatArrInputTensor<scalar_t, unsigned int>), cudaMemcpyHostToDevice, stream.stream())); THCudaHostRecord(state, stackInputs); } // Next, let's consider how we set our kernel launch parameters. // We borrow from THCApply, which the kernel's internal indexing // is based on. dim3 applyBlock = getApplyBlock(); //Get grid where x dim fills half gpu and y dim is number of tensors. //This will have cating two tensors fill the entire grid, but prevent //many threads from needlessly load meta data if their sizes is small. dim3 catGrid; getCatGrid(state, j, catGrid); switch (nDims) { case 1: HANDLE_CASE(1); break; case 2: HANDLE_CASE(2); break; case 3: HANDLE_CASE(3); break; case 4: HANDLE_CASE(4); break; } THCudaCheck(cudaGetLastError()); } THCudaFree(state, d_inputs); #undef HANDLE_CASE } else { offset = 0; for (j = 0; j < numInputs; j++) { if (should_skip(inputs[j])) continue; int64_t dimSize = THCTensor_(size)(state, inputs[j], dimension); THCTensor *nt = THCTensor_(newWithTensor)(state, result); THCTensor_(narrow)(state, nt, NULL, dimension, offset, dimSize); THCTensor_(copy)(state, nt, inputs[j]); THCTensor_(free)(state, nt); offset += dimSize; } } } void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self )); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor)); using namespace thrust::placeholders; THCThrustAllocator thrustAlloc(state); self = THCTensor_(newContiguous)(state, self); thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self)); int num_dim = THCTensor_(nDimensionLegacyNoScalars)(state, self); int64_t N = THCTensor_(nElement)(state, self); THCudaLongTensor_resize2d(state, tensor, N, num_dim); tensor = THCudaLongTensor_newContiguous(state, tensor); thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor)); thrust::counting_iterator<int64_t> idxfirst(0); thrust::counting_iterator<int64_t> idxlast = idxfirst + N; typedef thrust::device_ptr<int64_t> Iter; strided_range<Iter> strided_tensor(tensor_data, tensor_data+N*num_dim, num_dim); #if CUDA_VERSION >= 7000 cudaStream_t stream = THCState_getCurrentStream(state); #endif strided_range<Iter>::iterator dend = thrust::copy_if( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(stream), #endif idxfirst, idxlast, self_data, strided_tensor.begin(), NonZeroOp<scalar_t>() ); int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend); int64_t div = 1; for (int dim = num_dim-1; dim >= 0; dim--) { strided_range<Iter> stride_dim(tensor_data+dim, tensor_data+N*num_dim, num_dim); thrust::transform( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(stream), #endif strided_tensor.begin(), strided_tensor.end(), stride_dim.begin(), idx_functor(div, THTensor_sizeLegacyNoScalars(self, dim)) ); div *= THTensor_sizeLegacyNoScalars(self, dim); } THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim); THCTensor_(free)(state, self); THCudaLongTensor_free(state, tensor); THCudaCheck(cudaGetLastError()); } void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_)); int nDimension = THCTensor_(nDimensionLegacyNoScalars)(state, src_); THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector"); if (nDimension == 2) { int64_t stride0 = THCTensor_(stride)(state, src_, 0); int64_t stride1 = THCTensor_(stride)(state, src_, 1); int64_t size0 = THCTensor_(size)(state, src_, 0); int64_t size1 = THCTensor_(size)(state, src_, 1); int64_t size = (k > 0) ? min((int64_t)size0, (int64_t)size1 - k) : min((int64_t)size0 + k, (int64_t)size1); THCTensor_(resize1d)(state, self_, size); if (size > 0) { int64_t strideSelf = THCTensor_(stride)(state, self_, 0); const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size)); dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x))); int64_t start = (k >= 0 ? k * stride1 : -k * stride0); THCTensor_copyFromDiagonal<scalar_t><<<grid, threads, 0, THCState_getCurrentStream(state)>>> (THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf); } } else { ptrdiff_t totalElements = THCTensor_(nElement)(state, src_); ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k; int64_t strideSrc = THTensor_strideLegacyNoScalars(src_, 0); THCTensor_(resize2d)(state, self_, size, size); THCTensor_(zero)(state, self_); if (size > 0) { int64_t stride0 = THCTensor_(stride)(state, self_, 0); int64_t stride1 = THCTensor_(stride)(state, self_, 1); const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size)); dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x))); ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0); THCTensor_copyToDiagonal<scalar_t><<<grid, threads, 0, THCState_getCurrentStream(state)>>> (THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc); } } THCudaCheck(cudaGetLastError()); } void THCTensor_(eye)(THCState *state, THCTensor *self_, int64_t n, int64_t m) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); THArgCheck(n > 0, 1, "invalid argument"); if(m <= 0) m = n; THCTensor_(resize2d)(state, self_, n, m); THCTensor_(zero)(state, self_); int64_t sz = THMin(n, m); int64_t stride = THCTensor_(stride)(state, self_, 0) + THCTensor_(stride)(state, self_, 1); THCTensor *diag = THCTensor_(newWithStorage1d)(state, THTensor_getStoragePtr(self_), self_->storage_offset(), sz, stride); THCTensor_(fill)(state, diag, ScalarConvert<int, scalar_t>::to(1)); THCTensor_(free)(state, diag); } accreal THCTensor_(trace)(THCState *state, THCTensor *src_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_)); THArgCheck((THTensor_nDimensionLegacyAll(src_) == 2), 1, "expected a matrix"); THCTensor *diag = THCTensor_(new)(state); THCTensor_(diag)(state, diag, src_, 0); accreal trace = THCTensor_(sumall)(state, diag); THCTensor_(free)(state, diag); return trace; } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(linspace)(THCState *state, THCTensor *r_, scalar_t a, scalar_t b, int64_t n) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); // NumPy allows you to pass different points even if n <= 1 -- should we? THArgCheck(n > 1 || ((n == 0 || n == 1) && (a == b)), 3, "invalid number of points"); if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n); if (n == 0) { // skip } else if (n == 1) THCTensor_(fill)(state, r_, a); else { THCTensor *r = THCTensor_(isContiguous)(state, r_) ? r_ // if r_ is contiguous we can direct work on it : THCTensor_(newContiguous)(state, r_); scalar_t step = THCNumerics<scalar_t>::div(THCNumerics<scalar_t>::sub(b, a), ScalarConvert<int64_t,scalar_t>::to(n - 1)); LinspaceOp<scalar_t> linspace_method(a, step); thrust::device_ptr<scalar_t> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + n, linspace_method); if (!THCTensor_(isContiguous)(state, r_)) { // We need to move data back to r_ THCTensor_(freeCopyTo)(state, r, r_); } } THCudaCheck(cudaGetLastError()); } void THCTensor_(logspace)(THCState *state, THCTensor *r_, scalar_t a, scalar_t b, int64_t n) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); // NumPy allows you to pass different points even if n <= 1 -- should we? THArgCheck(n > 1 || ((n == 0 || n == 1) && (a == b)), 3, "invalid number of points"); if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n); if (n == 0) { // skip } else if (n == 1) THCTensor_(fill)(state, r_, THCNumerics<scalar_t>::exp10(a)); else { THCTensor *r = THCTensor_(isContiguous)(state, r_) ? r_ : THCTensor_(newContiguous)(state, r_); scalar_t step = THCNumerics<scalar_t>::div(THCNumerics<scalar_t>::sub(b, a), ScalarConvert<int64_t,scalar_t>::to(n - 1)); LogspaceOp<scalar_t> logspace_method(a, step); thrust::device_ptr<scalar_t> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + n, logspace_method); if (!THCTensor_(isContiguous)(state, r_)) { THCTensor_(freeCopyTo)(state, r, r_); } } THCudaCheck(cudaGetLastError()); } #endif void THCTensor_(range)(THCState *state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THArgCheck(step > 0 || step < 0, 3, "step must be nonzero"); THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) , 2, "upper bound and larger bound inconsistent with step sign"); ptrdiff_t size = (ptrdiff_t) (((xmax - xmin) / step) + 1); if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size); THCTensor *r = THCTensor_(newContiguous)(state, r_); LinspaceOp<scalar_t,accreal> linspace_method(xmin, step); thrust::device_ptr<scalar_t> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + size, linspace_method); THCTensor_(freeCopyTo)(state, r, r_); THCudaCheck(cudaGetLastError()); } void THCTensor_(arange)(THCState* state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THArgCheck(step > 0 || step < 0, 3, "step must be nonzero"); THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) , 2, "upper bound and larger bound inconsistent with step sign"); ptrdiff_t size = (ptrdiff_t) ceil(ScalarConvert<accreal, double>::to(xmax - xmin) / step); if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size); THCTensor *r = THCTensor_(newContiguous)(state, r_); LinspaceOp<scalar_t,accreal> linspace_method(xmin, step); thrust::device_ptr<scalar_t> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + size, linspace_method); THCTensor_(freeCopyTo)(state, r, r_); THCudaCheck(cudaGetLastError()); } #endif
3fcd5f9aa31da0042edfc1e4d67adcb8289f03bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <petsc/private/cudavecimpl.h> #include <../src/vec/is/sf/impls/basic/sfpack.h> #include <mpi.h> #include <nvshmem.h> #include <nvshmemx.h> PetscErrorCode PetscNvshmemInitializeCheck(void) { PetscErrorCode ierr; PetscFunctionBegin; if (!PetscNvshmemInitialized) { /* Note NVSHMEM does not provide a routine to check whether it is initialized */ nvshmemx_init_attr_t attr; attr.mpi_comm = &PETSC_COMM_WORLD; ierr = PetscDeviceInitialize(PETSC_DEVICE_CUDA);CHKERRQ(ierr); ierr = nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM,&attr);CHKERRQ(ierr); PetscNvshmemInitialized = PETSC_TRUE; PetscBeganNvshmem = PETSC_TRUE; } PetscFunctionReturn(0); } PetscErrorCode PetscNvshmemMalloc(size_t size, void** ptr) { PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscNvshmemInitializeCheck();CHKERRQ(ierr); *ptr = nvshmem_malloc(size); if (!*ptr) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"nvshmem_malloc() failed to allocate %zu bytes",size); PetscFunctionReturn(0); } PetscErrorCode PetscNvshmemCalloc(size_t size, void**ptr) { PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscNvshmemInitializeCheck();CHKERRQ(ierr); *ptr = nvshmem_calloc(size,1); if (!*ptr) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"nvshmem_calloc() failed to allocate %zu bytes",size); PetscFunctionReturn(0); } PetscErrorCode PetscNvshmemFree_Private(void* ptr) { PetscFunctionBegin; nvshmem_free(ptr); PetscFunctionReturn(0); } PetscErrorCode PetscNvshmemFinalize(void) { PetscFunctionBegin; nvshmem_finalize(); PetscFunctionReturn(0); } /* Free nvshmem related fields in the SF */ PetscErrorCode PetscSFReset_Basic_NVSHMEM(PetscSF sf) { PetscErrorCode ierr; PetscSF_Basic *bas = (PetscSF_Basic*)sf->data; PetscFunctionBegin; ierr = PetscFree2(bas->leafsigdisp,bas->leafbufdisp);CHKERRQ(ierr); ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,bas->leafbufdisp_d);CHKERRQ(ierr); ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,bas->leafsigdisp_d);CHKERRQ(ierr); ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,bas->iranks_d);CHKERRQ(ierr); ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,bas->ioffset_d);CHKERRQ(ierr); ierr = PetscFree2(sf->rootsigdisp,sf->rootbufdisp);CHKERRQ(ierr); ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,sf->rootbufdisp_d);CHKERRQ(ierr); ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,sf->rootsigdisp_d);CHKERRQ(ierr); ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,sf->ranks_d);CHKERRQ(ierr); ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,sf->roffset_d);CHKERRQ(ierr); PetscFunctionReturn(0); } /* Set up NVSHMEM related fields for an SF of type SFBASIC (only after PetscSFSetup_Basic() already set up dependant fields */ static PetscErrorCode PetscSFSetUp_Basic_NVSHMEM(PetscSF sf) { PetscErrorCode ierr; hipError_t cerr; PetscSF_Basic *bas = (PetscSF_Basic*)sf->data; PetscInt i,nRemoteRootRanks,nRemoteLeafRanks; PetscMPIInt tag; MPI_Comm comm; MPI_Request *rootreqs,*leafreqs; PetscInt tmp,stmp[4],rtmp[4]; /* tmps for send/recv buffers */ PetscFunctionBegin; ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr); ierr = PetscObjectGetNewTag((PetscObject)sf,&tag);CHKERRQ(ierr); nRemoteRootRanks = sf->nranks-sf->ndranks; nRemoteLeafRanks = bas->niranks-bas->ndiranks; sf->nRemoteRootRanks = nRemoteRootRanks; bas->nRemoteLeafRanks = nRemoteLeafRanks; ierr = PetscMalloc2(nRemoteLeafRanks,&rootreqs,nRemoteRootRanks,&leafreqs);CHKERRQ(ierr); stmp[0] = nRemoteRootRanks; stmp[1] = sf->leafbuflen[PETSCSF_REMOTE]; stmp[2] = nRemoteLeafRanks; stmp[3] = bas->rootbuflen[PETSCSF_REMOTE]; ierr = MPIU_Allreduce(stmp,rtmp,4,MPIU_INT,MPI_MAX,comm);CHKERRMPI(ierr); sf->nRemoteRootRanksMax = rtmp[0]; sf->leafbuflen_rmax = rtmp[1]; bas->nRemoteLeafRanksMax = rtmp[2]; bas->rootbuflen_rmax = rtmp[3]; /* Total four rounds of MPI communications to set up the nvshmem fields */ /* Root ranks to leaf ranks: send info about rootsigdisp[] and rootbufdisp[] */ ierr = PetscMalloc2(nRemoteRootRanks,&sf->rootsigdisp,nRemoteRootRanks,&sf->rootbufdisp);CHKERRQ(ierr); for (i=0; i<nRemoteRootRanks; i++) {ierr = MPI_Irecv(&sf->rootsigdisp[i],1,MPIU_INT,sf->ranks[i+sf->ndranks],tag,comm,&leafreqs[i]);CHKERRMPI(ierr);} /* Leaves recv */ for (i=0; i<nRemoteLeafRanks; i++) {ierr = MPI_Send(&i,1,MPIU_INT,bas->iranks[i+bas->ndiranks],tag,comm);CHKERRMPI(ierr);} /* Roots send. Note i changes, so we use MPI_Send. */ ierr = MPI_Waitall(nRemoteRootRanks,leafreqs,MPI_STATUSES_IGNORE);CHKERRMPI(ierr); for (i=0; i<nRemoteRootRanks; i++) {ierr = MPI_Irecv(&sf->rootbufdisp[i],1,MPIU_INT,sf->ranks[i+sf->ndranks],tag,comm,&leafreqs[i]);CHKERRMPI(ierr);} /* Leaves recv */ for (i=0; i<nRemoteLeafRanks; i++) { tmp = bas->ioffset[i+bas->ndiranks] - bas->ioffset[bas->ndiranks]; ierr = MPI_Send(&tmp,1,MPIU_INT,bas->iranks[i+bas->ndiranks],tag,comm);CHKERRMPI(ierr); /* Roots send. Note tmp changes, so we use MPI_Send. */ } ierr = MPI_Waitall(nRemoteRootRanks,leafreqs,MPI_STATUSES_IGNORE);CHKERRMPI(ierr); cerr = hipMalloc((void**)&sf->rootbufdisp_d,nRemoteRootRanks*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = hipMalloc((void**)&sf->rootsigdisp_d,nRemoteRootRanks*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = hipMalloc((void**)&sf->ranks_d,nRemoteRootRanks*sizeof(PetscMPIInt));CHKERRCUDA(cerr); cerr = hipMalloc((void**)&sf->roffset_d,(nRemoteRootRanks+1)*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = hipMemcpyAsync(sf->rootbufdisp_d,sf->rootbufdisp,nRemoteRootRanks*sizeof(PetscInt),hipMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr); cerr = hipMemcpyAsync(sf->rootsigdisp_d,sf->rootsigdisp,nRemoteRootRanks*sizeof(PetscInt),hipMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr); cerr = hipMemcpyAsync(sf->ranks_d,sf->ranks+sf->ndranks,nRemoteRootRanks*sizeof(PetscMPIInt),hipMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr); cerr = hipMemcpyAsync(sf->roffset_d,sf->roffset+sf->ndranks,(nRemoteRootRanks+1)*sizeof(PetscInt),hipMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr); /* Leaf ranks to root ranks: send info about leafsigdisp[] and leafbufdisp[] */ ierr = PetscMalloc2(nRemoteLeafRanks,&bas->leafsigdisp,nRemoteLeafRanks,&bas->leafbufdisp);CHKERRQ(ierr); for (i=0; i<nRemoteLeafRanks; i++) {ierr = MPI_Irecv(&bas->leafsigdisp[i],1,MPIU_INT,bas->iranks[i+bas->ndiranks],tag,comm,&rootreqs[i]);CHKERRMPI(ierr);} for (i=0; i<nRemoteRootRanks; i++) {ierr = MPI_Send(&i,1,MPIU_INT,sf->ranks[i+sf->ndranks],tag,comm);CHKERRMPI(ierr);} ierr = MPI_Waitall(nRemoteLeafRanks,rootreqs,MPI_STATUSES_IGNORE);CHKERRMPI(ierr); for (i=0; i<nRemoteLeafRanks; i++) {ierr = MPI_Irecv(&bas->leafbufdisp[i],1,MPIU_INT,bas->iranks[i+bas->ndiranks],tag,comm,&rootreqs[i]);CHKERRMPI(ierr);} for (i=0; i<nRemoteRootRanks; i++) { tmp = sf->roffset[i+sf->ndranks] - sf->roffset[sf->ndranks]; ierr = MPI_Send(&tmp,1,MPIU_INT,sf->ranks[i+sf->ndranks],tag,comm);CHKERRMPI(ierr); } ierr = MPI_Waitall(nRemoteLeafRanks,rootreqs,MPI_STATUSES_IGNORE);CHKERRMPI(ierr); cerr = hipMalloc((void**)&bas->leafbufdisp_d,nRemoteLeafRanks*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = hipMalloc((void**)&bas->leafsigdisp_d,nRemoteLeafRanks*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = hipMalloc((void**)&bas->iranks_d,nRemoteLeafRanks*sizeof(PetscMPIInt));CHKERRCUDA(cerr); cerr = hipMalloc((void**)&bas->ioffset_d,(nRemoteLeafRanks+1)*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = hipMemcpyAsync(bas->leafbufdisp_d,bas->leafbufdisp,nRemoteLeafRanks*sizeof(PetscInt),hipMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr); cerr = hipMemcpyAsync(bas->leafsigdisp_d,bas->leafsigdisp,nRemoteLeafRanks*sizeof(PetscInt),hipMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr); cerr = hipMemcpyAsync(bas->iranks_d,bas->iranks+bas->ndiranks,nRemoteLeafRanks*sizeof(PetscMPIInt),hipMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr); cerr = hipMemcpyAsync(bas->ioffset_d,bas->ioffset+bas->ndiranks,(nRemoteLeafRanks+1)*sizeof(PetscInt),hipMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr); ierr = PetscFree2(rootreqs,leafreqs);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode PetscSFLinkNvshmemCheck(PetscSF sf,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,const void *leafdata,PetscBool *use_nvshmem) { PetscErrorCode ierr; MPI_Comm comm; PetscBool isBasic; PetscMPIInt result = MPI_UNEQUAL; PetscFunctionBegin; ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr); /* Check if the sf is eligible for NVSHMEM, if we have not checked yet. Note the check result <use_nvshmem> must be the same over comm, since an SFLink must be collectively either NVSHMEM or MPI. */ sf->checked_nvshmem_eligibility = PETSC_TRUE; if (sf->use_nvshmem && !sf->checked_nvshmem_eligibility) { /* Only use NVSHMEM for SFBASIC on PETSC_COMM_WORLD */ ierr = PetscObjectTypeCompare((PetscObject)sf,PETSCSFBASIC,&isBasic);CHKERRQ(ierr); if (isBasic) {ierr = MPI_Comm_compare(PETSC_COMM_WORLD,comm,&result);CHKERRMPI(ierr);} if (!isBasic || (result != MPI_IDENT && result != MPI_CONGRUENT)) sf->use_nvshmem = PETSC_FALSE; /* If not eligible, clear the flag so that we don't try again */ /* Do further check: If on a rank, both rootdata and leafdata are NULL, we might think they are PETSC_MEMTYPE_CUDA (or HOST) and then use NVSHMEM. But if root/leafmtypes on other ranks are PETSC_MEMTYPE_HOST (or DEVICE), this would lead to inconsistency on the return value <use_nvshmem>. To be safe, we simply disable nvshmem on these rare SFs. */ if (sf->use_nvshmem) { PetscInt hasNullRank = (!rootdata && !leafdata) ? 1 : 0; ierr = MPI_Allreduce(MPI_IN_PLACE,&hasNullRank,1,MPIU_INT,MPI_LOR,comm);CHKERRMPI(ierr); if (hasNullRank) sf->use_nvshmem = PETSC_FALSE; } sf->checked_nvshmem_eligibility = PETSC_TRUE; /* If eligible, don't do above check again */ } /* Check if rootmtype and leafmtype collectively are PETSC_MEMTYPE_CUDA */ if (sf->use_nvshmem) { PetscInt oneCuda = (!rootdata || PetscMemTypeCUDA(rootmtype)) && (!leafdata || PetscMemTypeCUDA(leafmtype)) ? 1 : 0; /* Do I use cuda for both root&leafmtype? */ PetscInt allCuda = oneCuda; /* Assume the same for all ranks. But if not, in opt mode, return value <use_nvshmem> won't be collective! */ #if defined(PETSC_USE_DEBUG) /* Check in debug mode. Note MPI_Allreduce is expensive, so only in debug mode */ ierr = MPI_Allreduce(&oneCuda,&allCuda,1,MPIU_INT,MPI_LAND,comm);CHKERRMPI(ierr); if (allCuda != oneCuda) SETERRQ(comm,PETSC_ERR_SUP,"root/leaf mtypes are inconsistent among ranks, which may lead to SF nvshmem failure in opt mode. Add -use_nvshmem 0 to disable it."); #endif if (allCuda) { ierr = PetscNvshmemInitializeCheck();CHKERRQ(ierr); if (!sf->setup_nvshmem) { /* Set up nvshmem related fields on this SF on-demand */ ierr = PetscSFSetUp_Basic_NVSHMEM(sf);CHKERRQ(ierr); sf->setup_nvshmem = PETSC_TRUE; } *use_nvshmem = PETSC_TRUE; } else { *use_nvshmem = PETSC_FALSE; } } else { *use_nvshmem = PETSC_FALSE; } PetscFunctionReturn(0); } /* Build dependence between <stream> and <remoteCommStream> at the entry of NVSHMEM communication */ static PetscErrorCode PetscSFLinkBuildDependenceBegin(PetscSF sf,PetscSFLink link,PetscSFDirection direction) { hipError_t cerr; PetscSF_Basic *bas = (PetscSF_Basic *)sf->data; PetscInt buflen = (direction == PETSCSF_ROOT2LEAF)? bas->rootbuflen[PETSCSF_REMOTE] : sf->leafbuflen[PETSCSF_REMOTE]; PetscFunctionBegin; if (buflen) { cerr = hipEventRecord(link->dataReady,link->stream);CHKERRCUDA(cerr); cerr = hipStreamWaitEvent(link->remoteCommStream,link->dataReady,0);CHKERRCUDA(cerr); } PetscFunctionReturn(0); } /* Build dependence between <stream> and <remoteCommStream> at the exit of NVSHMEM communication */ static PetscErrorCode PetscSFLinkBuildDependenceEnd(PetscSF sf,PetscSFLink link,PetscSFDirection direction) { hipError_t cerr; PetscSF_Basic *bas = (PetscSF_Basic *)sf->data; PetscInt buflen = (direction == PETSCSF_ROOT2LEAF)? sf->leafbuflen[PETSCSF_REMOTE] : bas->rootbuflen[PETSCSF_REMOTE]; PetscFunctionBegin; /* If unpack to non-null device buffer, build the endRemoteComm dependance */ if (buflen) { cerr = hipEventRecord(link->endRemoteComm,link->remoteCommStream);CHKERRCUDA(cerr); cerr = hipStreamWaitEvent(link->stream,link->endRemoteComm,0);CHKERRCUDA(cerr); } PetscFunctionReturn(0); } /* Send/Put signals to remote ranks Input parameters: + n - Number of remote ranks . sig - Signal address in symmetric heap . sigdisp - To i-th rank, use its signal at offset sigdisp[i] . ranks - remote ranks - newval - Set signals to this value */ __global__ static void NvshmemSendSignals(PetscInt n,uint64_t *sig,PetscInt *sigdisp,PetscMPIInt *ranks,uint64_t newval) { int i = blockIdx.x*blockDim.x + threadIdx.x; /* Each thread puts one remote signal */ if (i < n) nvshmemx_uint64_signal(sig+sigdisp[i],newval,ranks[i]); } /* Wait until local signals equal to the expected value and then set them to a new value Input parameters: + n - Number of signals . sig - Local signal address . expval - expected value - newval - Set signals to this new value */ __global__ static void NvshmemWaitSignals(PetscInt n,uint64_t *sig,uint64_t expval,uint64_t newval) { #if 0 /* Akhil Langer@NVIDIA said using 1 thread and nvshmem_uint64_wait_until_all is better */ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { nvshmem_signal_wait_until(sig+i,NVSHMEM_CMP_EQ,expval); sig[i] = newval; } #else nvshmem_uint64_wait_until_all(sig,n,NULL/*no mask*/,NVSHMEM_CMP_EQ,expval); for (int i=0; i<n; i++) sig[i] = newval; #endif } /* =========================================================================================================== A set of routines to support receiver initiated communication using the get method The getting protocol is: Sender has a send buf (sbuf) and a signal variable (ssig); Receiver has a recv buf (rbuf) and a signal variable (rsig); All signal variables have an initial value 0. Sender: | Receiver: 1. Wait ssig be 0, then set it to 1 2. Pack data into stand alone sbuf | 3. Put 1 to receiver's rsig | 1. Wait rsig to be 1, then set it 0 | 2. Get data from remote sbuf to local rbuf | 3. Put 1 to sender's ssig | 4. Unpack data from local rbuf ===========================================================================================================*/ /* PrePack operation -- since sender will overwrite the send buffer which the receiver might be getting data from. Sender waits for signals (from receivers) indicating receivers have finished getting data */ PetscErrorCode PetscSFLinkWaitSignalsOfCompletionOfGettingData_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction) { PetscSF_Basic *bas = (PetscSF_Basic*)sf->data; uint64_t *sig; PetscInt n; PetscFunctionBegin; if (direction == PETSCSF_ROOT2LEAF) { /* leaf ranks are getting data */ sig = link->rootSendSig; /* leaf ranks set my rootSendsig */ n = bas->nRemoteLeafRanks; } else { /* LEAF2ROOT */ sig = link->leafSendSig; n = sf->nRemoteRootRanks; } if (n) { hipLaunchKernelGGL(( NvshmemWaitSignals), dim3(1),dim3(1),0,link->remoteCommStream, n,sig,0,1); /* wait the signals to be 0, then set them to 1 */ hipError_t cerr = hipGetLastError();CHKERRCUDA(cerr); } PetscFunctionReturn(0); } /* n thread blocks. Each takes in charge one remote rank */ __global__ static void GetDataFromRemotelyAccessible(PetscInt nsrcranks,PetscMPIInt *srcranks,const char *src,PetscInt *srcdisp,char *dst,PetscInt *dstdisp,PetscInt unitbytes) { int bid = blockIdx.x; PetscMPIInt pe = srcranks[bid]; if (!nvshmem_ptr(src,pe)) { PetscInt nelems = (dstdisp[bid+1]-dstdisp[bid])*unitbytes; nvshmem_getmem_nbi(dst+(dstdisp[bid]-dstdisp[0])*unitbytes,src+srcdisp[bid]*unitbytes,nelems,pe); } } /* Start communication -- Get data in the given direction */ PetscErrorCode PetscSFLinkGetDataBegin_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction) { PetscErrorCode ierr; hipError_t cerr; PetscSF_Basic *bas = (PetscSF_Basic*)sf->data; PetscInt nsrcranks,ndstranks,nLocallyAccessible = 0; char *src,*dst; PetscInt *srcdisp_h,*dstdisp_h; PetscInt *srcdisp_d,*dstdisp_d; PetscMPIInt *srcranks_h; PetscMPIInt *srcranks_d,*dstranks_d; uint64_t *dstsig; PetscInt *dstsigdisp_d; PetscFunctionBegin; ierr = PetscSFLinkBuildDependenceBegin(sf,link,direction);CHKERRQ(ierr); if (direction == PETSCSF_ROOT2LEAF) { /* src is root, dst is leaf; we will move data from src to dst */ nsrcranks = sf->nRemoteRootRanks; src = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* root buf is the send buf; it is in symmetric heap */ srcdisp_h = sf->rootbufdisp; /* for my i-th remote root rank, I will access its buf at offset rootbufdisp[i] */ srcdisp_d = sf->rootbufdisp_d; srcranks_h = sf->ranks+sf->ndranks; /* my (remote) root ranks */ srcranks_d = sf->ranks_d; ndstranks = bas->nRemoteLeafRanks; dst = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* recv buf is the local leaf buf, also in symmetric heap */ dstdisp_h = sf->roffset+sf->ndranks; /* offsets of the local leaf buf. Note dstdisp[0] is not necessarily 0 */ dstdisp_d = sf->roffset_d; dstranks_d = bas->iranks_d; /* my (remote) leaf ranks */ dstsig = link->leafRecvSig; dstsigdisp_d = bas->leafsigdisp_d; } else { /* src is leaf, dst is root; we will move data from src to dst */ nsrcranks = bas->nRemoteLeafRanks; src = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* leaf buf is the send buf */ srcdisp_h = bas->leafbufdisp; /* for my i-th remote root rank, I will access its buf at offset rootbufdisp[i] */ srcdisp_d = bas->leafbufdisp_d; srcranks_h = bas->iranks+bas->ndiranks; /* my (remote) root ranks */ srcranks_d = bas->iranks_d; ndstranks = sf->nRemoteRootRanks; dst = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* the local root buf is the recv buf */ dstdisp_h = bas->ioffset+bas->ndiranks; /* offsets of the local root buf. Note dstdisp[0] is not necessarily 0 */ dstdisp_d = bas->ioffset_d; dstranks_d = sf->ranks_d; /* my (remote) root ranks */ dstsig = link->rootRecvSig; dstsigdisp_d = sf->rootsigdisp_d; } /* After Pack operation -- src tells dst ranks that they are allowed to get data */ if (ndstranks) { hipLaunchKernelGGL(( NvshmemSendSignals), dim3((ndstranks+255)/256),dim3(256),0,link->remoteCommStream, ndstranks,dstsig,dstsigdisp_d,dstranks_d,1); /* set signals to 1 */ cerr = hipGetLastError();CHKERRCUDA(cerr); } /* dst waits for signals (permissions) from src ranks to start getting data */ if (nsrcranks) { hipLaunchKernelGGL(( NvshmemWaitSignals), dim3(1),dim3(1),0,link->remoteCommStream, nsrcranks,dstsig,1,0); /* wait the signals to be 1, then set them to 0 */ cerr = hipGetLastError();CHKERRCUDA(cerr); } /* dst gets data from src ranks using non-blocking nvshmem_gets, which are finished in PetscSFLinkGetDataEnd_NVSHMEM() */ /* Count number of locally accessible src ranks, which should be a small number */ for (int i=0; i<nsrcranks; i++) {if (nvshmem_ptr(src,srcranks_h[i])) nLocallyAccessible++;} /* Get data from remotely accessible PEs */ if (nLocallyAccessible < nsrcranks) { hipLaunchKernelGGL(( GetDataFromRemotelyAccessible), dim3(nsrcranks),dim3(1),0,link->remoteCommStream, nsrcranks,srcranks_d,src,srcdisp_d,dst,dstdisp_d,link->unitbytes); cerr = hipGetLastError();CHKERRCUDA(cerr); } /* Get data from locally accessible PEs */ if (nLocallyAccessible) { for (int i=0; i<nsrcranks; i++) { int pe = srcranks_h[i]; if (nvshmem_ptr(src,pe)) { size_t nelems = (dstdisp_h[i+1]-dstdisp_h[i])*link->unitbytes; nvshmemx_getmem_nbi_on_stream(dst+(dstdisp_h[i]-dstdisp_h[0])*link->unitbytes,src+srcdisp_h[i]*link->unitbytes,nelems,pe,link->remoteCommStream); } } } PetscFunctionReturn(0); } /* Finish the communication (can be done before Unpack) Receiver tells its senders that they are allowed to reuse their send buffer (since receiver has got data from their send buffer) */ PetscErrorCode PetscSFLinkGetDataEnd_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction) { PetscErrorCode ierr; hipError_t cerr; PetscSF_Basic *bas = (PetscSF_Basic*)sf->data; uint64_t *srcsig; PetscInt nsrcranks,*srcsigdisp; PetscMPIInt *srcranks; PetscFunctionBegin; if (direction == PETSCSF_ROOT2LEAF) { /* leaf ranks are getting data */ nsrcranks = sf->nRemoteRootRanks; srcsig = link->rootSendSig; /* I want to set their root signal */ srcsigdisp = sf->rootsigdisp_d; /* offset of each root signal */ srcranks = sf->ranks_d; /* ranks of the n root ranks */ } else { /* LEAF2ROOT, root ranks are getting data */ nsrcranks = bas->nRemoteLeafRanks; srcsig = link->leafSendSig; srcsigdisp = bas->leafsigdisp_d; srcranks = bas->iranks_d; } if (nsrcranks) { nvshmemx_quiet_on_stream(link->remoteCommStream); /* Finish the nonblocking get, so that we can unpack afterwards */ cerr = hipGetLastError();CHKERRCUDA(cerr); hipLaunchKernelGGL(( NvshmemSendSignals), dim3((nsrcranks+511)/512),dim3(512),0,link->remoteCommStream, nsrcranks,srcsig,srcsigdisp,srcranks,0); /* set signals to 0 */ cerr = hipGetLastError();CHKERRCUDA(cerr); } ierr = PetscSFLinkBuildDependenceEnd(sf,link,direction);CHKERRQ(ierr); PetscFunctionReturn(0); } /* =========================================================================================================== A set of routines to support sender initiated communication using the put-based method (the default) The putting protocol is: Sender has a send buf (sbuf) and a send signal var (ssig); Receiver has a stand-alone recv buf (rbuf) and a recv signal var (rsig); All signal variables have an initial value 0. rbuf is allocated by SF and is in nvshmem space. Sender: | Receiver: | 1. Pack data into sbuf | 2. Wait ssig be 0, then set it to 1 | 3. Put data to remote stand-alone rbuf | 4. Fence // make sure 5 happens after 3 | 5. Put 1 to receiver's rsig | 1. Wait rsig to be 1, then set it 0 | 2. Unpack data from local rbuf | 3. Put 0 to sender's ssig ===========================================================================================================*/ /* n thread blocks. Each takes in charge one remote rank */ __global__ static void WaitAndPutDataToRemotelyAccessible(PetscInt ndstranks,PetscMPIInt *dstranks,char *dst,PetscInt *dstdisp,const char *src,PetscInt *srcdisp,uint64_t *srcsig,PetscInt unitbytes) { int bid = blockIdx.x; PetscMPIInt pe = dstranks[bid]; if (!nvshmem_ptr(dst,pe)) { PetscInt nelems = (srcdisp[bid+1]-srcdisp[bid])*unitbytes; nvshmem_uint64_wait_until(srcsig+bid,NVSHMEM_CMP_EQ,0); /* Wait until the sig = 0 */ srcsig[bid] = 1; nvshmem_putmem_nbi(dst+dstdisp[bid]*unitbytes,src+(srcdisp[bid]-srcdisp[0])*unitbytes,nelems,pe); } } /* one-thread kernel, which takes in charge all locally accesible */ __global__ static void WaitSignalsFromLocallyAccessible(PetscInt ndstranks,PetscMPIInt *dstranks,uint64_t *srcsig,const char *dst) { for (int i=0; i<ndstranks; i++) { int pe = dstranks[i]; if (nvshmem_ptr(dst,pe)) { nvshmem_uint64_wait_until(srcsig+i,NVSHMEM_CMP_EQ,0); /* Wait until the sig = 0 */ srcsig[i] = 1; } } } /* Put data in the given direction */ PetscErrorCode PetscSFLinkPutDataBegin_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction) { PetscErrorCode ierr; hipError_t cerr; PetscSF_Basic *bas = (PetscSF_Basic*)sf->data; PetscInt ndstranks,nLocallyAccessible = 0; char *src,*dst; PetscInt *srcdisp_h,*dstdisp_h; PetscInt *srcdisp_d,*dstdisp_d; PetscMPIInt *dstranks_h; PetscMPIInt *dstranks_d; uint64_t *srcsig; PetscFunctionBegin; ierr = PetscSFLinkBuildDependenceBegin(sf,link,direction);CHKERRQ(ierr); if (direction == PETSCSF_ROOT2LEAF) { /* put data in rootbuf to leafbuf */ ndstranks = bas->nRemoteLeafRanks; /* number of (remote) leaf ranks */ src = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* Both src & dst must be symmetric */ dst = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; srcdisp_h = bas->ioffset+bas->ndiranks; /* offsets of rootbuf. srcdisp[0] is not necessarily zero */ srcdisp_d = bas->ioffset_d; srcsig = link->rootSendSig; dstdisp_h = bas->leafbufdisp; /* for my i-th remote leaf rank, I will access its leaf buf at offset leafbufdisp[i] */ dstdisp_d = bas->leafbufdisp_d; dstranks_h = bas->iranks+bas->ndiranks; /* remote leaf ranks */ dstranks_d = bas->iranks_d; } else { /* put data in leafbuf to rootbuf */ ndstranks = sf->nRemoteRootRanks; src = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; dst = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; srcdisp_h = sf->roffset+sf->ndranks; /* offsets of leafbuf */ srcdisp_d = sf->roffset_d; srcsig = link->leafSendSig; dstdisp_h = sf->rootbufdisp; /* for my i-th remote root rank, I will access its root buf at offset rootbufdisp[i] */ dstdisp_d = sf->rootbufdisp_d; dstranks_h = sf->ranks+sf->ndranks; /* remote root ranks */ dstranks_d = sf->ranks_d; } /* Wait for signals and then put data to dst ranks using non-blocking nvshmem_put, which are finished in PetscSFLinkPutDataEnd_NVSHMEM */ /* Count number of locally accessible neighbors, which should be a small number */ for (int i=0; i<ndstranks; i++) {if (nvshmem_ptr(dst,dstranks_h[i])) nLocallyAccessible++;} /* For remotely accessible PEs, send data to them in one kernel call */ if (nLocallyAccessible < ndstranks) { hipLaunchKernelGGL(( WaitAndPutDataToRemotelyAccessible), dim3(ndstranks),dim3(1),0,link->remoteCommStream, ndstranks,dstranks_d,dst,dstdisp_d,src,srcdisp_d,srcsig,link->unitbytes); cerr = hipGetLastError();CHKERRCUDA(cerr); } /* For locally accessible PEs, use host API, which uses CUDA copy-engines and is much faster than device API */ if (nLocallyAccessible) { hipLaunchKernelGGL(( WaitSignalsFromLocallyAccessible), dim3(1),dim3(1),0,link->remoteCommStream, ndstranks,dstranks_d,srcsig,dst); for (int i=0; i<ndstranks; i++) { int pe = dstranks_h[i]; if (nvshmem_ptr(dst,pe)) { /* If return a non-null pointer, then <pe> is locally accessible */ size_t nelems = (srcdisp_h[i+1]-srcdisp_h[i])*link->unitbytes; /* Initiate the nonblocking communication */ nvshmemx_putmem_nbi_on_stream(dst+dstdisp_h[i]*link->unitbytes,src+(srcdisp_h[i]-srcdisp_h[0])*link->unitbytes,nelems,pe,link->remoteCommStream); } } } if (nLocallyAccessible) { nvshmemx_quiet_on_stream(link->remoteCommStream); /* Calling nvshmem_fence/quiet() does not fence the above nvshmemx_putmem_nbi_on_stream! */ } PetscFunctionReturn(0); } /* A one-thread kernel. The thread takes in charge all remote PEs */ __global__ static void PutDataEnd(PetscInt nsrcranks,PetscInt ndstranks,PetscMPIInt *dstranks,uint64_t *dstsig,PetscInt *dstsigdisp) { /* TODO: Shall we finished the non-blocking remote puts? */ /* 1. Send a signal to each dst rank */ /* According to Akhil@NVIDIA, IB is orderred, so no fence is needed for remote PEs. For local PEs, we already called nvshmemx_quiet_on_stream(). Therefore, we are good to send signals to all dst ranks now. */ for (int i=0; i<ndstranks; i++) {nvshmemx_uint64_signal(dstsig+dstsigdisp[i],1,dstranks[i]);} /* set sig to 1 */ /* 2. Wait for signals from src ranks (if any) */ if (nsrcranks) { nvshmem_uint64_wait_until_all(dstsig,nsrcranks,NULL/*no mask*/,NVSHMEM_CMP_EQ,1); /* wait sigs to be 1, then set them to 0 */ for (int i=0; i<nsrcranks; i++) dstsig[i] = 0; } } /* Finish the communication -- A receiver waits until it can access its receive buffer */ PetscErrorCode PetscSFLinkPutDataEnd_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction) { PetscErrorCode ierr; hipError_t cerr; PetscSF_Basic *bas = (PetscSF_Basic*)sf->data; PetscMPIInt *dstranks; uint64_t *dstsig; PetscInt nsrcranks,ndstranks,*dstsigdisp; PetscFunctionBegin; if (direction == PETSCSF_ROOT2LEAF) { /* put root data to leaf */ nsrcranks = sf->nRemoteRootRanks; ndstranks = bas->nRemoteLeafRanks; dstranks = bas->iranks_d; /* leaf ranks */ dstsig = link->leafRecvSig; /* I will set my leaf ranks's RecvSig */ dstsigdisp = bas->leafsigdisp_d; /* for my i-th remote leaf rank, I will access its signal at offset leafsigdisp[i] */ } else { /* LEAF2ROOT */ nsrcranks = bas->nRemoteLeafRanks; ndstranks = sf->nRemoteRootRanks; dstranks = sf->ranks_d; dstsig = link->rootRecvSig; dstsigdisp = sf->rootsigdisp_d; } if (nsrcranks || ndstranks) { hipLaunchKernelGGL(( PutDataEnd), dim3(1),dim3(1),0,link->remoteCommStream, nsrcranks,ndstranks,dstranks,dstsig,dstsigdisp); cerr = hipGetLastError();CHKERRCUDA(cerr); } ierr = PetscSFLinkBuildDependenceEnd(sf,link,direction);CHKERRQ(ierr); PetscFunctionReturn(0); } /* PostUnpack operation -- A receiver tells its senders that they are allowed to put data to here (it implies recv buf is free to take new data) */ PetscErrorCode PetscSFLinkSendSignalsToAllowPuttingData_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction) { PetscSF_Basic *bas = (PetscSF_Basic*)sf->data; uint64_t *srcsig; PetscInt nsrcranks,*srcsigdisp_d; PetscMPIInt *srcranks_d; PetscFunctionBegin; if (direction == PETSCSF_ROOT2LEAF) { /* I allow my root ranks to put data to me */ nsrcranks = sf->nRemoteRootRanks; srcsig = link->rootSendSig; /* I want to set their send signals */ srcsigdisp_d = sf->rootsigdisp_d; /* offset of each root signal */ srcranks_d = sf->ranks_d; /* ranks of the n root ranks */ } else { /* LEAF2ROOT */ nsrcranks = bas->nRemoteLeafRanks; srcsig = link->leafSendSig; srcsigdisp_d = bas->leafsigdisp_d; srcranks_d = bas->iranks_d; } if (nsrcranks) { hipLaunchKernelGGL(( NvshmemSendSignals), dim3((nsrcranks+255)/256),dim3(256),0,link->remoteCommStream, nsrcranks,srcsig,srcsigdisp_d,srcranks_d,0); /* Set remote signals to 0 */ hipError_t cerr = hipGetLastError();CHKERRCUDA(cerr); } PetscFunctionReturn(0); } /* Destructor when the link uses nvshmem for communication */ static PetscErrorCode PetscSFLinkDestroy_NVSHMEM(PetscSF sf,PetscSFLink link) { PetscErrorCode ierr; hipError_t cerr; PetscFunctionBegin; cerr = hipEventDestroy(link->dataReady);CHKERRCUDA(cerr); cerr = hipEventDestroy(link->endRemoteComm);CHKERRCUDA(cerr); cerr = hipStreamDestroy(link->remoteCommStream);CHKERRCUDA(cerr); /* nvshmem does not need buffers on host, which should be NULL */ ierr = PetscNvshmemFree(link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]);CHKERRQ(ierr); ierr = PetscNvshmemFree(link->leafSendSig);CHKERRQ(ierr); ierr = PetscNvshmemFree(link->leafRecvSig);CHKERRQ(ierr); ierr = PetscNvshmemFree(link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]);CHKERRQ(ierr); ierr = PetscNvshmemFree(link->rootSendSig);CHKERRQ(ierr); ierr = PetscNvshmemFree(link->rootRecvSig);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode PetscSFLinkCreate_NVSHMEM(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,const void *leafdata,MPI_Op op,PetscSFOperation sfop,PetscSFLink *mylink) { PetscErrorCode ierr; hipError_t cerr; PetscSF_Basic *bas = (PetscSF_Basic*)sf->data; PetscSFLink *p,link; PetscBool match,rootdirect[2],leafdirect[2]; int greatestPriority; PetscFunctionBegin; /* Check to see if we can directly send/recv root/leafdata with the given sf, sfop and op. We only care root/leafdirect[PETSCSF_REMOTE], since we never need intermeidate buffers in local communication with NVSHMEM. */ if (sfop == PETSCSF_BCAST) { /* Move data from rootbuf to leafbuf */ if (sf->use_nvshmem_get) { rootdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* send buffer has to be stand-alone (can't be rootdata) */ leafdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(leafmtype) && sf->leafcontig[PETSCSF_REMOTE] && op == MPI_REPLACE) ? PETSC_TRUE : PETSC_FALSE; } else { rootdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(rootmtype) && bas->rootcontig[PETSCSF_REMOTE]) ? PETSC_TRUE : PETSC_FALSE; leafdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* Our put-protocol always needs a nvshmem alloc'ed recv buffer */ } } else if (sfop == PETSCSF_REDUCE) { /* Move data from leafbuf to rootbuf */ if (sf->use_nvshmem_get) { rootdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(rootmtype) && bas->rootcontig[PETSCSF_REMOTE] && op == MPI_REPLACE) ? PETSC_TRUE : PETSC_FALSE; leafdirect[PETSCSF_REMOTE] = PETSC_FALSE; } else { rootdirect[PETSCSF_REMOTE] = PETSC_FALSE; leafdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(leafmtype) && sf->leafcontig[PETSCSF_REMOTE]) ? PETSC_TRUE : PETSC_FALSE; } } else { /* PETSCSF_FETCH */ rootdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* FETCH always need a separate rootbuf */ leafdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* We also force allocating a separate leafbuf so that leafdata and leafupdate can share mpi requests */ } /* Look for free nvshmem links in cache */ for (p=&bas->avail; (link=*p); p=&link->next) { if (link->use_nvshmem) { ierr = MPIPetsc_Type_compare(unit,link->unit,&match);CHKERRQ(ierr); if (match) { *p = link->next; /* Remove from available list */ goto found; } } } ierr = PetscNew(&link);CHKERRQ(ierr); ierr = PetscSFLinkSetUp_Host(sf,link,unit);CHKERRQ(ierr); /* Compute link->unitbytes, dup link->unit etc. */ if (sf->backend == PETSCSF_BACKEND_CUDA) {ierr = PetscSFLinkSetUp_CUDA(sf,link,unit);CHKERRQ(ierr);} /* Setup pack routines, streams etc */ #if defined(PETSC_HAVE_KOKKOS) else if (sf->backend == PETSCSF_BACKEND_KOKKOS) {ierr = PetscSFLinkSetUp_Kokkos(sf,link,unit);CHKERRQ(ierr);} #endif link->rootdirect[PETSCSF_LOCAL] = PETSC_TRUE; /* For the local part we directly use root/leafdata */ link->leafdirect[PETSCSF_LOCAL] = PETSC_TRUE; /* Init signals to zero */ if (!link->rootSendSig) {ierr = PetscNvshmemCalloc(bas->nRemoteLeafRanksMax*sizeof(uint64_t),(void**)&link->rootSendSig);CHKERRQ(ierr);} if (!link->rootRecvSig) {ierr = PetscNvshmemCalloc(bas->nRemoteLeafRanksMax*sizeof(uint64_t),(void**)&link->rootRecvSig);CHKERRQ(ierr);} if (!link->leafSendSig) {ierr = PetscNvshmemCalloc(sf->nRemoteRootRanksMax*sizeof(uint64_t),(void**)&link->leafSendSig);CHKERRQ(ierr);} if (!link->leafRecvSig) {ierr = PetscNvshmemCalloc(sf->nRemoteRootRanksMax*sizeof(uint64_t),(void**)&link->leafRecvSig);CHKERRQ(ierr);} link->use_nvshmem = PETSC_TRUE; link->rootmtype = PETSC_MEMTYPE_DEVICE; /* Only need 0/1-based mtype from now on */ link->leafmtype = PETSC_MEMTYPE_DEVICE; /* Overwrite some function pointers set by PetscSFLinkSetUp_CUDA */ link->Destroy = PetscSFLinkDestroy_NVSHMEM; if (sf->use_nvshmem_get) { /* get-based protocol */ link->PrePack = PetscSFLinkWaitSignalsOfCompletionOfGettingData_NVSHMEM; link->StartCommunication = PetscSFLinkGetDataBegin_NVSHMEM; link->FinishCommunication = PetscSFLinkGetDataEnd_NVSHMEM; } else { /* put-based protocol */ link->StartCommunication = PetscSFLinkPutDataBegin_NVSHMEM; link->FinishCommunication = PetscSFLinkPutDataEnd_NVSHMEM; link->PostUnpack = PetscSFLinkSendSignalsToAllowPuttingData_NVSHMEM; } cerr = hipDeviceGetStreamPriorityRange(NULL,&greatestPriority);CHKERRCUDA(cerr); cerr = hipStreamCreateWithPriority(&link->remoteCommStream,hipStreamNonBlocking,greatestPriority);CHKERRCUDA(cerr); cerr = hipEventCreateWithFlags(&link->dataReady,hipEventDisableTiming);CHKERRCUDA(cerr); cerr = hipEventCreateWithFlags(&link->endRemoteComm,hipEventDisableTiming);CHKERRCUDA(cerr); found: if (rootdirect[PETSCSF_REMOTE]) { link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = (char*)rootdata + bas->rootstart[PETSCSF_REMOTE]*link->unitbytes; } else { if (!link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]) { ierr = PetscNvshmemMalloc(bas->rootbuflen_rmax*link->unitbytes,(void**)&link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]);CHKERRQ(ierr); } link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; } if (leafdirect[PETSCSF_REMOTE]) { link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = (char*)leafdata + sf->leafstart[PETSCSF_REMOTE]*link->unitbytes; } else { if (!link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]) { ierr = PetscNvshmemMalloc(sf->leafbuflen_rmax*link->unitbytes,(void**)&link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]);CHKERRQ(ierr); } link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; } link->rootdirect[PETSCSF_REMOTE] = rootdirect[PETSCSF_REMOTE]; link->leafdirect[PETSCSF_REMOTE] = leafdirect[PETSCSF_REMOTE]; link->rootdata = rootdata; /* root/leafdata are keys to look up links in PetscSFXxxEnd */ link->leafdata = leafdata; link->next = bas->inuse; bas->inuse = link; *mylink = link; PetscFunctionReturn(0); } #if defined(PETSC_USE_REAL_SINGLE) PetscErrorCode PetscNvshmemSum(PetscInt count,float *dst,const float *src) { PetscErrorCode ierr; PetscMPIInt num; /* Assume nvshmem's int is MPI's int */ PetscFunctionBegin; ierr = PetscMPIIntCast(count,&num);CHKERRQ(ierr); nvshmemx_float_sum_reduce_on_stream(NVSHMEM_TEAM_WORLD,dst,src,num,PetscDefaultCudaStream); PetscFunctionReturn(0); } PetscErrorCode PetscNvshmemMax(PetscInt count,float *dst,const float *src) { PetscErrorCode ierr; PetscMPIInt num; PetscFunctionBegin; ierr = PetscMPIIntCast(count,&num);CHKERRQ(ierr); nvshmemx_float_max_reduce_on_stream(NVSHMEM_TEAM_WORLD,dst,src,num,PetscDefaultCudaStream); PetscFunctionReturn(0); } #elif defined(PETSC_USE_REAL_DOUBLE) PetscErrorCode PetscNvshmemSum(PetscInt count,double *dst,const double *src) { PetscErrorCode ierr; PetscMPIInt num; PetscFunctionBegin; ierr = PetscMPIIntCast(count,&num);CHKERRQ(ierr); nvshmemx_double_sum_reduce_on_stream(NVSHMEM_TEAM_WORLD,dst,src,num,PetscDefaultCudaStream); PetscFunctionReturn(0); } PetscErrorCode PetscNvshmemMax(PetscInt count,double *dst,const double *src) { PetscErrorCode ierr; PetscMPIInt num; PetscFunctionBegin; ierr = PetscMPIIntCast(count,&num);CHKERRQ(ierr); nvshmemx_double_max_reduce_on_stream(NVSHMEM_TEAM_WORLD,dst,src,num,PetscDefaultCudaStream); PetscFunctionReturn(0); } #endif
3fcd5f9aa31da0042edfc1e4d67adcb8289f03bc.cu
#include <petsc/private/cudavecimpl.h> #include <../src/vec/is/sf/impls/basic/sfpack.h> #include <mpi.h> #include <nvshmem.h> #include <nvshmemx.h> PetscErrorCode PetscNvshmemInitializeCheck(void) { PetscErrorCode ierr; PetscFunctionBegin; if (!PetscNvshmemInitialized) { /* Note NVSHMEM does not provide a routine to check whether it is initialized */ nvshmemx_init_attr_t attr; attr.mpi_comm = &PETSC_COMM_WORLD; ierr = PetscDeviceInitialize(PETSC_DEVICE_CUDA);CHKERRQ(ierr); ierr = nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM,&attr);CHKERRQ(ierr); PetscNvshmemInitialized = PETSC_TRUE; PetscBeganNvshmem = PETSC_TRUE; } PetscFunctionReturn(0); } PetscErrorCode PetscNvshmemMalloc(size_t size, void** ptr) { PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscNvshmemInitializeCheck();CHKERRQ(ierr); *ptr = nvshmem_malloc(size); if (!*ptr) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"nvshmem_malloc() failed to allocate %zu bytes",size); PetscFunctionReturn(0); } PetscErrorCode PetscNvshmemCalloc(size_t size, void**ptr) { PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscNvshmemInitializeCheck();CHKERRQ(ierr); *ptr = nvshmem_calloc(size,1); if (!*ptr) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"nvshmem_calloc() failed to allocate %zu bytes",size); PetscFunctionReturn(0); } PetscErrorCode PetscNvshmemFree_Private(void* ptr) { PetscFunctionBegin; nvshmem_free(ptr); PetscFunctionReturn(0); } PetscErrorCode PetscNvshmemFinalize(void) { PetscFunctionBegin; nvshmem_finalize(); PetscFunctionReturn(0); } /* Free nvshmem related fields in the SF */ PetscErrorCode PetscSFReset_Basic_NVSHMEM(PetscSF sf) { PetscErrorCode ierr; PetscSF_Basic *bas = (PetscSF_Basic*)sf->data; PetscFunctionBegin; ierr = PetscFree2(bas->leafsigdisp,bas->leafbufdisp);CHKERRQ(ierr); ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,bas->leafbufdisp_d);CHKERRQ(ierr); ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,bas->leafsigdisp_d);CHKERRQ(ierr); ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,bas->iranks_d);CHKERRQ(ierr); ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,bas->ioffset_d);CHKERRQ(ierr); ierr = PetscFree2(sf->rootsigdisp,sf->rootbufdisp);CHKERRQ(ierr); ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,sf->rootbufdisp_d);CHKERRQ(ierr); ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,sf->rootsigdisp_d);CHKERRQ(ierr); ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,sf->ranks_d);CHKERRQ(ierr); ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,sf->roffset_d);CHKERRQ(ierr); PetscFunctionReturn(0); } /* Set up NVSHMEM related fields for an SF of type SFBASIC (only after PetscSFSetup_Basic() already set up dependant fields */ static PetscErrorCode PetscSFSetUp_Basic_NVSHMEM(PetscSF sf) { PetscErrorCode ierr; cudaError_t cerr; PetscSF_Basic *bas = (PetscSF_Basic*)sf->data; PetscInt i,nRemoteRootRanks,nRemoteLeafRanks; PetscMPIInt tag; MPI_Comm comm; MPI_Request *rootreqs,*leafreqs; PetscInt tmp,stmp[4],rtmp[4]; /* tmps for send/recv buffers */ PetscFunctionBegin; ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr); ierr = PetscObjectGetNewTag((PetscObject)sf,&tag);CHKERRQ(ierr); nRemoteRootRanks = sf->nranks-sf->ndranks; nRemoteLeafRanks = bas->niranks-bas->ndiranks; sf->nRemoteRootRanks = nRemoteRootRanks; bas->nRemoteLeafRanks = nRemoteLeafRanks; ierr = PetscMalloc2(nRemoteLeafRanks,&rootreqs,nRemoteRootRanks,&leafreqs);CHKERRQ(ierr); stmp[0] = nRemoteRootRanks; stmp[1] = sf->leafbuflen[PETSCSF_REMOTE]; stmp[2] = nRemoteLeafRanks; stmp[3] = bas->rootbuflen[PETSCSF_REMOTE]; ierr = MPIU_Allreduce(stmp,rtmp,4,MPIU_INT,MPI_MAX,comm);CHKERRMPI(ierr); sf->nRemoteRootRanksMax = rtmp[0]; sf->leafbuflen_rmax = rtmp[1]; bas->nRemoteLeafRanksMax = rtmp[2]; bas->rootbuflen_rmax = rtmp[3]; /* Total four rounds of MPI communications to set up the nvshmem fields */ /* Root ranks to leaf ranks: send info about rootsigdisp[] and rootbufdisp[] */ ierr = PetscMalloc2(nRemoteRootRanks,&sf->rootsigdisp,nRemoteRootRanks,&sf->rootbufdisp);CHKERRQ(ierr); for (i=0; i<nRemoteRootRanks; i++) {ierr = MPI_Irecv(&sf->rootsigdisp[i],1,MPIU_INT,sf->ranks[i+sf->ndranks],tag,comm,&leafreqs[i]);CHKERRMPI(ierr);} /* Leaves recv */ for (i=0; i<nRemoteLeafRanks; i++) {ierr = MPI_Send(&i,1,MPIU_INT,bas->iranks[i+bas->ndiranks],tag,comm);CHKERRMPI(ierr);} /* Roots send. Note i changes, so we use MPI_Send. */ ierr = MPI_Waitall(nRemoteRootRanks,leafreqs,MPI_STATUSES_IGNORE);CHKERRMPI(ierr); for (i=0; i<nRemoteRootRanks; i++) {ierr = MPI_Irecv(&sf->rootbufdisp[i],1,MPIU_INT,sf->ranks[i+sf->ndranks],tag,comm,&leafreqs[i]);CHKERRMPI(ierr);} /* Leaves recv */ for (i=0; i<nRemoteLeafRanks; i++) { tmp = bas->ioffset[i+bas->ndiranks] - bas->ioffset[bas->ndiranks]; ierr = MPI_Send(&tmp,1,MPIU_INT,bas->iranks[i+bas->ndiranks],tag,comm);CHKERRMPI(ierr); /* Roots send. Note tmp changes, so we use MPI_Send. */ } ierr = MPI_Waitall(nRemoteRootRanks,leafreqs,MPI_STATUSES_IGNORE);CHKERRMPI(ierr); cerr = cudaMalloc((void**)&sf->rootbufdisp_d,nRemoteRootRanks*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = cudaMalloc((void**)&sf->rootsigdisp_d,nRemoteRootRanks*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = cudaMalloc((void**)&sf->ranks_d,nRemoteRootRanks*sizeof(PetscMPIInt));CHKERRCUDA(cerr); cerr = cudaMalloc((void**)&sf->roffset_d,(nRemoteRootRanks+1)*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = cudaMemcpyAsync(sf->rootbufdisp_d,sf->rootbufdisp,nRemoteRootRanks*sizeof(PetscInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr); cerr = cudaMemcpyAsync(sf->rootsigdisp_d,sf->rootsigdisp,nRemoteRootRanks*sizeof(PetscInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr); cerr = cudaMemcpyAsync(sf->ranks_d,sf->ranks+sf->ndranks,nRemoteRootRanks*sizeof(PetscMPIInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr); cerr = cudaMemcpyAsync(sf->roffset_d,sf->roffset+sf->ndranks,(nRemoteRootRanks+1)*sizeof(PetscInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr); /* Leaf ranks to root ranks: send info about leafsigdisp[] and leafbufdisp[] */ ierr = PetscMalloc2(nRemoteLeafRanks,&bas->leafsigdisp,nRemoteLeafRanks,&bas->leafbufdisp);CHKERRQ(ierr); for (i=0; i<nRemoteLeafRanks; i++) {ierr = MPI_Irecv(&bas->leafsigdisp[i],1,MPIU_INT,bas->iranks[i+bas->ndiranks],tag,comm,&rootreqs[i]);CHKERRMPI(ierr);} for (i=0; i<nRemoteRootRanks; i++) {ierr = MPI_Send(&i,1,MPIU_INT,sf->ranks[i+sf->ndranks],tag,comm);CHKERRMPI(ierr);} ierr = MPI_Waitall(nRemoteLeafRanks,rootreqs,MPI_STATUSES_IGNORE);CHKERRMPI(ierr); for (i=0; i<nRemoteLeafRanks; i++) {ierr = MPI_Irecv(&bas->leafbufdisp[i],1,MPIU_INT,bas->iranks[i+bas->ndiranks],tag,comm,&rootreqs[i]);CHKERRMPI(ierr);} for (i=0; i<nRemoteRootRanks; i++) { tmp = sf->roffset[i+sf->ndranks] - sf->roffset[sf->ndranks]; ierr = MPI_Send(&tmp,1,MPIU_INT,sf->ranks[i+sf->ndranks],tag,comm);CHKERRMPI(ierr); } ierr = MPI_Waitall(nRemoteLeafRanks,rootreqs,MPI_STATUSES_IGNORE);CHKERRMPI(ierr); cerr = cudaMalloc((void**)&bas->leafbufdisp_d,nRemoteLeafRanks*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = cudaMalloc((void**)&bas->leafsigdisp_d,nRemoteLeafRanks*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = cudaMalloc((void**)&bas->iranks_d,nRemoteLeafRanks*sizeof(PetscMPIInt));CHKERRCUDA(cerr); cerr = cudaMalloc((void**)&bas->ioffset_d,(nRemoteLeafRanks+1)*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = cudaMemcpyAsync(bas->leafbufdisp_d,bas->leafbufdisp,nRemoteLeafRanks*sizeof(PetscInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr); cerr = cudaMemcpyAsync(bas->leafsigdisp_d,bas->leafsigdisp,nRemoteLeafRanks*sizeof(PetscInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr); cerr = cudaMemcpyAsync(bas->iranks_d,bas->iranks+bas->ndiranks,nRemoteLeafRanks*sizeof(PetscMPIInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr); cerr = cudaMemcpyAsync(bas->ioffset_d,bas->ioffset+bas->ndiranks,(nRemoteLeafRanks+1)*sizeof(PetscInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr); ierr = PetscFree2(rootreqs,leafreqs);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode PetscSFLinkNvshmemCheck(PetscSF sf,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,const void *leafdata,PetscBool *use_nvshmem) { PetscErrorCode ierr; MPI_Comm comm; PetscBool isBasic; PetscMPIInt result = MPI_UNEQUAL; PetscFunctionBegin; ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr); /* Check if the sf is eligible for NVSHMEM, if we have not checked yet. Note the check result <use_nvshmem> must be the same over comm, since an SFLink must be collectively either NVSHMEM or MPI. */ sf->checked_nvshmem_eligibility = PETSC_TRUE; if (sf->use_nvshmem && !sf->checked_nvshmem_eligibility) { /* Only use NVSHMEM for SFBASIC on PETSC_COMM_WORLD */ ierr = PetscObjectTypeCompare((PetscObject)sf,PETSCSFBASIC,&isBasic);CHKERRQ(ierr); if (isBasic) {ierr = MPI_Comm_compare(PETSC_COMM_WORLD,comm,&result);CHKERRMPI(ierr);} if (!isBasic || (result != MPI_IDENT && result != MPI_CONGRUENT)) sf->use_nvshmem = PETSC_FALSE; /* If not eligible, clear the flag so that we don't try again */ /* Do further check: If on a rank, both rootdata and leafdata are NULL, we might think they are PETSC_MEMTYPE_CUDA (or HOST) and then use NVSHMEM. But if root/leafmtypes on other ranks are PETSC_MEMTYPE_HOST (or DEVICE), this would lead to inconsistency on the return value <use_nvshmem>. To be safe, we simply disable nvshmem on these rare SFs. */ if (sf->use_nvshmem) { PetscInt hasNullRank = (!rootdata && !leafdata) ? 1 : 0; ierr = MPI_Allreduce(MPI_IN_PLACE,&hasNullRank,1,MPIU_INT,MPI_LOR,comm);CHKERRMPI(ierr); if (hasNullRank) sf->use_nvshmem = PETSC_FALSE; } sf->checked_nvshmem_eligibility = PETSC_TRUE; /* If eligible, don't do above check again */ } /* Check if rootmtype and leafmtype collectively are PETSC_MEMTYPE_CUDA */ if (sf->use_nvshmem) { PetscInt oneCuda = (!rootdata || PetscMemTypeCUDA(rootmtype)) && (!leafdata || PetscMemTypeCUDA(leafmtype)) ? 1 : 0; /* Do I use cuda for both root&leafmtype? */ PetscInt allCuda = oneCuda; /* Assume the same for all ranks. But if not, in opt mode, return value <use_nvshmem> won't be collective! */ #if defined(PETSC_USE_DEBUG) /* Check in debug mode. Note MPI_Allreduce is expensive, so only in debug mode */ ierr = MPI_Allreduce(&oneCuda,&allCuda,1,MPIU_INT,MPI_LAND,comm);CHKERRMPI(ierr); if (allCuda != oneCuda) SETERRQ(comm,PETSC_ERR_SUP,"root/leaf mtypes are inconsistent among ranks, which may lead to SF nvshmem failure in opt mode. Add -use_nvshmem 0 to disable it."); #endif if (allCuda) { ierr = PetscNvshmemInitializeCheck();CHKERRQ(ierr); if (!sf->setup_nvshmem) { /* Set up nvshmem related fields on this SF on-demand */ ierr = PetscSFSetUp_Basic_NVSHMEM(sf);CHKERRQ(ierr); sf->setup_nvshmem = PETSC_TRUE; } *use_nvshmem = PETSC_TRUE; } else { *use_nvshmem = PETSC_FALSE; } } else { *use_nvshmem = PETSC_FALSE; } PetscFunctionReturn(0); } /* Build dependence between <stream> and <remoteCommStream> at the entry of NVSHMEM communication */ static PetscErrorCode PetscSFLinkBuildDependenceBegin(PetscSF sf,PetscSFLink link,PetscSFDirection direction) { cudaError_t cerr; PetscSF_Basic *bas = (PetscSF_Basic *)sf->data; PetscInt buflen = (direction == PETSCSF_ROOT2LEAF)? bas->rootbuflen[PETSCSF_REMOTE] : sf->leafbuflen[PETSCSF_REMOTE]; PetscFunctionBegin; if (buflen) { cerr = cudaEventRecord(link->dataReady,link->stream);CHKERRCUDA(cerr); cerr = cudaStreamWaitEvent(link->remoteCommStream,link->dataReady,0);CHKERRCUDA(cerr); } PetscFunctionReturn(0); } /* Build dependence between <stream> and <remoteCommStream> at the exit of NVSHMEM communication */ static PetscErrorCode PetscSFLinkBuildDependenceEnd(PetscSF sf,PetscSFLink link,PetscSFDirection direction) { cudaError_t cerr; PetscSF_Basic *bas = (PetscSF_Basic *)sf->data; PetscInt buflen = (direction == PETSCSF_ROOT2LEAF)? sf->leafbuflen[PETSCSF_REMOTE] : bas->rootbuflen[PETSCSF_REMOTE]; PetscFunctionBegin; /* If unpack to non-null device buffer, build the endRemoteComm dependance */ if (buflen) { cerr = cudaEventRecord(link->endRemoteComm,link->remoteCommStream);CHKERRCUDA(cerr); cerr = cudaStreamWaitEvent(link->stream,link->endRemoteComm,0);CHKERRCUDA(cerr); } PetscFunctionReturn(0); } /* Send/Put signals to remote ranks Input parameters: + n - Number of remote ranks . sig - Signal address in symmetric heap . sigdisp - To i-th rank, use its signal at offset sigdisp[i] . ranks - remote ranks - newval - Set signals to this value */ __global__ static void NvshmemSendSignals(PetscInt n,uint64_t *sig,PetscInt *sigdisp,PetscMPIInt *ranks,uint64_t newval) { int i = blockIdx.x*blockDim.x + threadIdx.x; /* Each thread puts one remote signal */ if (i < n) nvshmemx_uint64_signal(sig+sigdisp[i],newval,ranks[i]); } /* Wait until local signals equal to the expected value and then set them to a new value Input parameters: + n - Number of signals . sig - Local signal address . expval - expected value - newval - Set signals to this new value */ __global__ static void NvshmemWaitSignals(PetscInt n,uint64_t *sig,uint64_t expval,uint64_t newval) { #if 0 /* Akhil Langer@NVIDIA said using 1 thread and nvshmem_uint64_wait_until_all is better */ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { nvshmem_signal_wait_until(sig+i,NVSHMEM_CMP_EQ,expval); sig[i] = newval; } #else nvshmem_uint64_wait_until_all(sig,n,NULL/*no mask*/,NVSHMEM_CMP_EQ,expval); for (int i=0; i<n; i++) sig[i] = newval; #endif } /* =========================================================================================================== A set of routines to support receiver initiated communication using the get method The getting protocol is: Sender has a send buf (sbuf) and a signal variable (ssig); Receiver has a recv buf (rbuf) and a signal variable (rsig); All signal variables have an initial value 0. Sender: | Receiver: 1. Wait ssig be 0, then set it to 1 2. Pack data into stand alone sbuf | 3. Put 1 to receiver's rsig | 1. Wait rsig to be 1, then set it 0 | 2. Get data from remote sbuf to local rbuf | 3. Put 1 to sender's ssig | 4. Unpack data from local rbuf ===========================================================================================================*/ /* PrePack operation -- since sender will overwrite the send buffer which the receiver might be getting data from. Sender waits for signals (from receivers) indicating receivers have finished getting data */ PetscErrorCode PetscSFLinkWaitSignalsOfCompletionOfGettingData_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction) { PetscSF_Basic *bas = (PetscSF_Basic*)sf->data; uint64_t *sig; PetscInt n; PetscFunctionBegin; if (direction == PETSCSF_ROOT2LEAF) { /* leaf ranks are getting data */ sig = link->rootSendSig; /* leaf ranks set my rootSendsig */ n = bas->nRemoteLeafRanks; } else { /* LEAF2ROOT */ sig = link->leafSendSig; n = sf->nRemoteRootRanks; } if (n) { NvshmemWaitSignals<<<1,1,0,link->remoteCommStream>>>(n,sig,0,1); /* wait the signals to be 0, then set them to 1 */ cudaError_t cerr = cudaGetLastError();CHKERRCUDA(cerr); } PetscFunctionReturn(0); } /* n thread blocks. Each takes in charge one remote rank */ __global__ static void GetDataFromRemotelyAccessible(PetscInt nsrcranks,PetscMPIInt *srcranks,const char *src,PetscInt *srcdisp,char *dst,PetscInt *dstdisp,PetscInt unitbytes) { int bid = blockIdx.x; PetscMPIInt pe = srcranks[bid]; if (!nvshmem_ptr(src,pe)) { PetscInt nelems = (dstdisp[bid+1]-dstdisp[bid])*unitbytes; nvshmem_getmem_nbi(dst+(dstdisp[bid]-dstdisp[0])*unitbytes,src+srcdisp[bid]*unitbytes,nelems,pe); } } /* Start communication -- Get data in the given direction */ PetscErrorCode PetscSFLinkGetDataBegin_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction) { PetscErrorCode ierr; cudaError_t cerr; PetscSF_Basic *bas = (PetscSF_Basic*)sf->data; PetscInt nsrcranks,ndstranks,nLocallyAccessible = 0; char *src,*dst; PetscInt *srcdisp_h,*dstdisp_h; PetscInt *srcdisp_d,*dstdisp_d; PetscMPIInt *srcranks_h; PetscMPIInt *srcranks_d,*dstranks_d; uint64_t *dstsig; PetscInt *dstsigdisp_d; PetscFunctionBegin; ierr = PetscSFLinkBuildDependenceBegin(sf,link,direction);CHKERRQ(ierr); if (direction == PETSCSF_ROOT2LEAF) { /* src is root, dst is leaf; we will move data from src to dst */ nsrcranks = sf->nRemoteRootRanks; src = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* root buf is the send buf; it is in symmetric heap */ srcdisp_h = sf->rootbufdisp; /* for my i-th remote root rank, I will access its buf at offset rootbufdisp[i] */ srcdisp_d = sf->rootbufdisp_d; srcranks_h = sf->ranks+sf->ndranks; /* my (remote) root ranks */ srcranks_d = sf->ranks_d; ndstranks = bas->nRemoteLeafRanks; dst = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* recv buf is the local leaf buf, also in symmetric heap */ dstdisp_h = sf->roffset+sf->ndranks; /* offsets of the local leaf buf. Note dstdisp[0] is not necessarily 0 */ dstdisp_d = sf->roffset_d; dstranks_d = bas->iranks_d; /* my (remote) leaf ranks */ dstsig = link->leafRecvSig; dstsigdisp_d = bas->leafsigdisp_d; } else { /* src is leaf, dst is root; we will move data from src to dst */ nsrcranks = bas->nRemoteLeafRanks; src = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* leaf buf is the send buf */ srcdisp_h = bas->leafbufdisp; /* for my i-th remote root rank, I will access its buf at offset rootbufdisp[i] */ srcdisp_d = bas->leafbufdisp_d; srcranks_h = bas->iranks+bas->ndiranks; /* my (remote) root ranks */ srcranks_d = bas->iranks_d; ndstranks = sf->nRemoteRootRanks; dst = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* the local root buf is the recv buf */ dstdisp_h = bas->ioffset+bas->ndiranks; /* offsets of the local root buf. Note dstdisp[0] is not necessarily 0 */ dstdisp_d = bas->ioffset_d; dstranks_d = sf->ranks_d; /* my (remote) root ranks */ dstsig = link->rootRecvSig; dstsigdisp_d = sf->rootsigdisp_d; } /* After Pack operation -- src tells dst ranks that they are allowed to get data */ if (ndstranks) { NvshmemSendSignals<<<(ndstranks+255)/256,256,0,link->remoteCommStream>>>(ndstranks,dstsig,dstsigdisp_d,dstranks_d,1); /* set signals to 1 */ cerr = cudaGetLastError();CHKERRCUDA(cerr); } /* dst waits for signals (permissions) from src ranks to start getting data */ if (nsrcranks) { NvshmemWaitSignals<<<1,1,0,link->remoteCommStream>>>(nsrcranks,dstsig,1,0); /* wait the signals to be 1, then set them to 0 */ cerr = cudaGetLastError();CHKERRCUDA(cerr); } /* dst gets data from src ranks using non-blocking nvshmem_gets, which are finished in PetscSFLinkGetDataEnd_NVSHMEM() */ /* Count number of locally accessible src ranks, which should be a small number */ for (int i=0; i<nsrcranks; i++) {if (nvshmem_ptr(src,srcranks_h[i])) nLocallyAccessible++;} /* Get data from remotely accessible PEs */ if (nLocallyAccessible < nsrcranks) { GetDataFromRemotelyAccessible<<<nsrcranks,1,0,link->remoteCommStream>>>(nsrcranks,srcranks_d,src,srcdisp_d,dst,dstdisp_d,link->unitbytes); cerr = cudaGetLastError();CHKERRCUDA(cerr); } /* Get data from locally accessible PEs */ if (nLocallyAccessible) { for (int i=0; i<nsrcranks; i++) { int pe = srcranks_h[i]; if (nvshmem_ptr(src,pe)) { size_t nelems = (dstdisp_h[i+1]-dstdisp_h[i])*link->unitbytes; nvshmemx_getmem_nbi_on_stream(dst+(dstdisp_h[i]-dstdisp_h[0])*link->unitbytes,src+srcdisp_h[i]*link->unitbytes,nelems,pe,link->remoteCommStream); } } } PetscFunctionReturn(0); } /* Finish the communication (can be done before Unpack) Receiver tells its senders that they are allowed to reuse their send buffer (since receiver has got data from their send buffer) */ PetscErrorCode PetscSFLinkGetDataEnd_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction) { PetscErrorCode ierr; cudaError_t cerr; PetscSF_Basic *bas = (PetscSF_Basic*)sf->data; uint64_t *srcsig; PetscInt nsrcranks,*srcsigdisp; PetscMPIInt *srcranks; PetscFunctionBegin; if (direction == PETSCSF_ROOT2LEAF) { /* leaf ranks are getting data */ nsrcranks = sf->nRemoteRootRanks; srcsig = link->rootSendSig; /* I want to set their root signal */ srcsigdisp = sf->rootsigdisp_d; /* offset of each root signal */ srcranks = sf->ranks_d; /* ranks of the n root ranks */ } else { /* LEAF2ROOT, root ranks are getting data */ nsrcranks = bas->nRemoteLeafRanks; srcsig = link->leafSendSig; srcsigdisp = bas->leafsigdisp_d; srcranks = bas->iranks_d; } if (nsrcranks) { nvshmemx_quiet_on_stream(link->remoteCommStream); /* Finish the nonblocking get, so that we can unpack afterwards */ cerr = cudaGetLastError();CHKERRCUDA(cerr); NvshmemSendSignals<<<(nsrcranks+511)/512,512,0,link->remoteCommStream>>>(nsrcranks,srcsig,srcsigdisp,srcranks,0); /* set signals to 0 */ cerr = cudaGetLastError();CHKERRCUDA(cerr); } ierr = PetscSFLinkBuildDependenceEnd(sf,link,direction);CHKERRQ(ierr); PetscFunctionReturn(0); } /* =========================================================================================================== A set of routines to support sender initiated communication using the put-based method (the default) The putting protocol is: Sender has a send buf (sbuf) and a send signal var (ssig); Receiver has a stand-alone recv buf (rbuf) and a recv signal var (rsig); All signal variables have an initial value 0. rbuf is allocated by SF and is in nvshmem space. Sender: | Receiver: | 1. Pack data into sbuf | 2. Wait ssig be 0, then set it to 1 | 3. Put data to remote stand-alone rbuf | 4. Fence // make sure 5 happens after 3 | 5. Put 1 to receiver's rsig | 1. Wait rsig to be 1, then set it 0 | 2. Unpack data from local rbuf | 3. Put 0 to sender's ssig ===========================================================================================================*/ /* n thread blocks. Each takes in charge one remote rank */ __global__ static void WaitAndPutDataToRemotelyAccessible(PetscInt ndstranks,PetscMPIInt *dstranks,char *dst,PetscInt *dstdisp,const char *src,PetscInt *srcdisp,uint64_t *srcsig,PetscInt unitbytes) { int bid = blockIdx.x; PetscMPIInt pe = dstranks[bid]; if (!nvshmem_ptr(dst,pe)) { PetscInt nelems = (srcdisp[bid+1]-srcdisp[bid])*unitbytes; nvshmem_uint64_wait_until(srcsig+bid,NVSHMEM_CMP_EQ,0); /* Wait until the sig = 0 */ srcsig[bid] = 1; nvshmem_putmem_nbi(dst+dstdisp[bid]*unitbytes,src+(srcdisp[bid]-srcdisp[0])*unitbytes,nelems,pe); } } /* one-thread kernel, which takes in charge all locally accesible */ __global__ static void WaitSignalsFromLocallyAccessible(PetscInt ndstranks,PetscMPIInt *dstranks,uint64_t *srcsig,const char *dst) { for (int i=0; i<ndstranks; i++) { int pe = dstranks[i]; if (nvshmem_ptr(dst,pe)) { nvshmem_uint64_wait_until(srcsig+i,NVSHMEM_CMP_EQ,0); /* Wait until the sig = 0 */ srcsig[i] = 1; } } } /* Put data in the given direction */ PetscErrorCode PetscSFLinkPutDataBegin_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction) { PetscErrorCode ierr; cudaError_t cerr; PetscSF_Basic *bas = (PetscSF_Basic*)sf->data; PetscInt ndstranks,nLocallyAccessible = 0; char *src,*dst; PetscInt *srcdisp_h,*dstdisp_h; PetscInt *srcdisp_d,*dstdisp_d; PetscMPIInt *dstranks_h; PetscMPIInt *dstranks_d; uint64_t *srcsig; PetscFunctionBegin; ierr = PetscSFLinkBuildDependenceBegin(sf,link,direction);CHKERRQ(ierr); if (direction == PETSCSF_ROOT2LEAF) { /* put data in rootbuf to leafbuf */ ndstranks = bas->nRemoteLeafRanks; /* number of (remote) leaf ranks */ src = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* Both src & dst must be symmetric */ dst = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; srcdisp_h = bas->ioffset+bas->ndiranks; /* offsets of rootbuf. srcdisp[0] is not necessarily zero */ srcdisp_d = bas->ioffset_d; srcsig = link->rootSendSig; dstdisp_h = bas->leafbufdisp; /* for my i-th remote leaf rank, I will access its leaf buf at offset leafbufdisp[i] */ dstdisp_d = bas->leafbufdisp_d; dstranks_h = bas->iranks+bas->ndiranks; /* remote leaf ranks */ dstranks_d = bas->iranks_d; } else { /* put data in leafbuf to rootbuf */ ndstranks = sf->nRemoteRootRanks; src = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; dst = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; srcdisp_h = sf->roffset+sf->ndranks; /* offsets of leafbuf */ srcdisp_d = sf->roffset_d; srcsig = link->leafSendSig; dstdisp_h = sf->rootbufdisp; /* for my i-th remote root rank, I will access its root buf at offset rootbufdisp[i] */ dstdisp_d = sf->rootbufdisp_d; dstranks_h = sf->ranks+sf->ndranks; /* remote root ranks */ dstranks_d = sf->ranks_d; } /* Wait for signals and then put data to dst ranks using non-blocking nvshmem_put, which are finished in PetscSFLinkPutDataEnd_NVSHMEM */ /* Count number of locally accessible neighbors, which should be a small number */ for (int i=0; i<ndstranks; i++) {if (nvshmem_ptr(dst,dstranks_h[i])) nLocallyAccessible++;} /* For remotely accessible PEs, send data to them in one kernel call */ if (nLocallyAccessible < ndstranks) { WaitAndPutDataToRemotelyAccessible<<<ndstranks,1,0,link->remoteCommStream>>>(ndstranks,dstranks_d,dst,dstdisp_d,src,srcdisp_d,srcsig,link->unitbytes); cerr = cudaGetLastError();CHKERRCUDA(cerr); } /* For locally accessible PEs, use host API, which uses CUDA copy-engines and is much faster than device API */ if (nLocallyAccessible) { WaitSignalsFromLocallyAccessible<<<1,1,0,link->remoteCommStream>>>(ndstranks,dstranks_d,srcsig,dst); for (int i=0; i<ndstranks; i++) { int pe = dstranks_h[i]; if (nvshmem_ptr(dst,pe)) { /* If return a non-null pointer, then <pe> is locally accessible */ size_t nelems = (srcdisp_h[i+1]-srcdisp_h[i])*link->unitbytes; /* Initiate the nonblocking communication */ nvshmemx_putmem_nbi_on_stream(dst+dstdisp_h[i]*link->unitbytes,src+(srcdisp_h[i]-srcdisp_h[0])*link->unitbytes,nelems,pe,link->remoteCommStream); } } } if (nLocallyAccessible) { nvshmemx_quiet_on_stream(link->remoteCommStream); /* Calling nvshmem_fence/quiet() does not fence the above nvshmemx_putmem_nbi_on_stream! */ } PetscFunctionReturn(0); } /* A one-thread kernel. The thread takes in charge all remote PEs */ __global__ static void PutDataEnd(PetscInt nsrcranks,PetscInt ndstranks,PetscMPIInt *dstranks,uint64_t *dstsig,PetscInt *dstsigdisp) { /* TODO: Shall we finished the non-blocking remote puts? */ /* 1. Send a signal to each dst rank */ /* According to Akhil@NVIDIA, IB is orderred, so no fence is needed for remote PEs. For local PEs, we already called nvshmemx_quiet_on_stream(). Therefore, we are good to send signals to all dst ranks now. */ for (int i=0; i<ndstranks; i++) {nvshmemx_uint64_signal(dstsig+dstsigdisp[i],1,dstranks[i]);} /* set sig to 1 */ /* 2. Wait for signals from src ranks (if any) */ if (nsrcranks) { nvshmem_uint64_wait_until_all(dstsig,nsrcranks,NULL/*no mask*/,NVSHMEM_CMP_EQ,1); /* wait sigs to be 1, then set them to 0 */ for (int i=0; i<nsrcranks; i++) dstsig[i] = 0; } } /* Finish the communication -- A receiver waits until it can access its receive buffer */ PetscErrorCode PetscSFLinkPutDataEnd_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction) { PetscErrorCode ierr; cudaError_t cerr; PetscSF_Basic *bas = (PetscSF_Basic*)sf->data; PetscMPIInt *dstranks; uint64_t *dstsig; PetscInt nsrcranks,ndstranks,*dstsigdisp; PetscFunctionBegin; if (direction == PETSCSF_ROOT2LEAF) { /* put root data to leaf */ nsrcranks = sf->nRemoteRootRanks; ndstranks = bas->nRemoteLeafRanks; dstranks = bas->iranks_d; /* leaf ranks */ dstsig = link->leafRecvSig; /* I will set my leaf ranks's RecvSig */ dstsigdisp = bas->leafsigdisp_d; /* for my i-th remote leaf rank, I will access its signal at offset leafsigdisp[i] */ } else { /* LEAF2ROOT */ nsrcranks = bas->nRemoteLeafRanks; ndstranks = sf->nRemoteRootRanks; dstranks = sf->ranks_d; dstsig = link->rootRecvSig; dstsigdisp = sf->rootsigdisp_d; } if (nsrcranks || ndstranks) { PutDataEnd<<<1,1,0,link->remoteCommStream>>>(nsrcranks,ndstranks,dstranks,dstsig,dstsigdisp); cerr = cudaGetLastError();CHKERRCUDA(cerr); } ierr = PetscSFLinkBuildDependenceEnd(sf,link,direction);CHKERRQ(ierr); PetscFunctionReturn(0); } /* PostUnpack operation -- A receiver tells its senders that they are allowed to put data to here (it implies recv buf is free to take new data) */ PetscErrorCode PetscSFLinkSendSignalsToAllowPuttingData_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction) { PetscSF_Basic *bas = (PetscSF_Basic*)sf->data; uint64_t *srcsig; PetscInt nsrcranks,*srcsigdisp_d; PetscMPIInt *srcranks_d; PetscFunctionBegin; if (direction == PETSCSF_ROOT2LEAF) { /* I allow my root ranks to put data to me */ nsrcranks = sf->nRemoteRootRanks; srcsig = link->rootSendSig; /* I want to set their send signals */ srcsigdisp_d = sf->rootsigdisp_d; /* offset of each root signal */ srcranks_d = sf->ranks_d; /* ranks of the n root ranks */ } else { /* LEAF2ROOT */ nsrcranks = bas->nRemoteLeafRanks; srcsig = link->leafSendSig; srcsigdisp_d = bas->leafsigdisp_d; srcranks_d = bas->iranks_d; } if (nsrcranks) { NvshmemSendSignals<<<(nsrcranks+255)/256,256,0,link->remoteCommStream>>>(nsrcranks,srcsig,srcsigdisp_d,srcranks_d,0); /* Set remote signals to 0 */ cudaError_t cerr = cudaGetLastError();CHKERRCUDA(cerr); } PetscFunctionReturn(0); } /* Destructor when the link uses nvshmem for communication */ static PetscErrorCode PetscSFLinkDestroy_NVSHMEM(PetscSF sf,PetscSFLink link) { PetscErrorCode ierr; cudaError_t cerr; PetscFunctionBegin; cerr = cudaEventDestroy(link->dataReady);CHKERRCUDA(cerr); cerr = cudaEventDestroy(link->endRemoteComm);CHKERRCUDA(cerr); cerr = cudaStreamDestroy(link->remoteCommStream);CHKERRCUDA(cerr); /* nvshmem does not need buffers on host, which should be NULL */ ierr = PetscNvshmemFree(link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]);CHKERRQ(ierr); ierr = PetscNvshmemFree(link->leafSendSig);CHKERRQ(ierr); ierr = PetscNvshmemFree(link->leafRecvSig);CHKERRQ(ierr); ierr = PetscNvshmemFree(link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]);CHKERRQ(ierr); ierr = PetscNvshmemFree(link->rootSendSig);CHKERRQ(ierr); ierr = PetscNvshmemFree(link->rootRecvSig);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode PetscSFLinkCreate_NVSHMEM(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,const void *leafdata,MPI_Op op,PetscSFOperation sfop,PetscSFLink *mylink) { PetscErrorCode ierr; cudaError_t cerr; PetscSF_Basic *bas = (PetscSF_Basic*)sf->data; PetscSFLink *p,link; PetscBool match,rootdirect[2],leafdirect[2]; int greatestPriority; PetscFunctionBegin; /* Check to see if we can directly send/recv root/leafdata with the given sf, sfop and op. We only care root/leafdirect[PETSCSF_REMOTE], since we never need intermeidate buffers in local communication with NVSHMEM. */ if (sfop == PETSCSF_BCAST) { /* Move data from rootbuf to leafbuf */ if (sf->use_nvshmem_get) { rootdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* send buffer has to be stand-alone (can't be rootdata) */ leafdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(leafmtype) && sf->leafcontig[PETSCSF_REMOTE] && op == MPI_REPLACE) ? PETSC_TRUE : PETSC_FALSE; } else { rootdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(rootmtype) && bas->rootcontig[PETSCSF_REMOTE]) ? PETSC_TRUE : PETSC_FALSE; leafdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* Our put-protocol always needs a nvshmem alloc'ed recv buffer */ } } else if (sfop == PETSCSF_REDUCE) { /* Move data from leafbuf to rootbuf */ if (sf->use_nvshmem_get) { rootdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(rootmtype) && bas->rootcontig[PETSCSF_REMOTE] && op == MPI_REPLACE) ? PETSC_TRUE : PETSC_FALSE; leafdirect[PETSCSF_REMOTE] = PETSC_FALSE; } else { rootdirect[PETSCSF_REMOTE] = PETSC_FALSE; leafdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(leafmtype) && sf->leafcontig[PETSCSF_REMOTE]) ? PETSC_TRUE : PETSC_FALSE; } } else { /* PETSCSF_FETCH */ rootdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* FETCH always need a separate rootbuf */ leafdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* We also force allocating a separate leafbuf so that leafdata and leafupdate can share mpi requests */ } /* Look for free nvshmem links in cache */ for (p=&bas->avail; (link=*p); p=&link->next) { if (link->use_nvshmem) { ierr = MPIPetsc_Type_compare(unit,link->unit,&match);CHKERRQ(ierr); if (match) { *p = link->next; /* Remove from available list */ goto found; } } } ierr = PetscNew(&link);CHKERRQ(ierr); ierr = PetscSFLinkSetUp_Host(sf,link,unit);CHKERRQ(ierr); /* Compute link->unitbytes, dup link->unit etc. */ if (sf->backend == PETSCSF_BACKEND_CUDA) {ierr = PetscSFLinkSetUp_CUDA(sf,link,unit);CHKERRQ(ierr);} /* Setup pack routines, streams etc */ #if defined(PETSC_HAVE_KOKKOS) else if (sf->backend == PETSCSF_BACKEND_KOKKOS) {ierr = PetscSFLinkSetUp_Kokkos(sf,link,unit);CHKERRQ(ierr);} #endif link->rootdirect[PETSCSF_LOCAL] = PETSC_TRUE; /* For the local part we directly use root/leafdata */ link->leafdirect[PETSCSF_LOCAL] = PETSC_TRUE; /* Init signals to zero */ if (!link->rootSendSig) {ierr = PetscNvshmemCalloc(bas->nRemoteLeafRanksMax*sizeof(uint64_t),(void**)&link->rootSendSig);CHKERRQ(ierr);} if (!link->rootRecvSig) {ierr = PetscNvshmemCalloc(bas->nRemoteLeafRanksMax*sizeof(uint64_t),(void**)&link->rootRecvSig);CHKERRQ(ierr);} if (!link->leafSendSig) {ierr = PetscNvshmemCalloc(sf->nRemoteRootRanksMax*sizeof(uint64_t),(void**)&link->leafSendSig);CHKERRQ(ierr);} if (!link->leafRecvSig) {ierr = PetscNvshmemCalloc(sf->nRemoteRootRanksMax*sizeof(uint64_t),(void**)&link->leafRecvSig);CHKERRQ(ierr);} link->use_nvshmem = PETSC_TRUE; link->rootmtype = PETSC_MEMTYPE_DEVICE; /* Only need 0/1-based mtype from now on */ link->leafmtype = PETSC_MEMTYPE_DEVICE; /* Overwrite some function pointers set by PetscSFLinkSetUp_CUDA */ link->Destroy = PetscSFLinkDestroy_NVSHMEM; if (sf->use_nvshmem_get) { /* get-based protocol */ link->PrePack = PetscSFLinkWaitSignalsOfCompletionOfGettingData_NVSHMEM; link->StartCommunication = PetscSFLinkGetDataBegin_NVSHMEM; link->FinishCommunication = PetscSFLinkGetDataEnd_NVSHMEM; } else { /* put-based protocol */ link->StartCommunication = PetscSFLinkPutDataBegin_NVSHMEM; link->FinishCommunication = PetscSFLinkPutDataEnd_NVSHMEM; link->PostUnpack = PetscSFLinkSendSignalsToAllowPuttingData_NVSHMEM; } cerr = cudaDeviceGetStreamPriorityRange(NULL,&greatestPriority);CHKERRCUDA(cerr); cerr = cudaStreamCreateWithPriority(&link->remoteCommStream,cudaStreamNonBlocking,greatestPriority);CHKERRCUDA(cerr); cerr = cudaEventCreateWithFlags(&link->dataReady,cudaEventDisableTiming);CHKERRCUDA(cerr); cerr = cudaEventCreateWithFlags(&link->endRemoteComm,cudaEventDisableTiming);CHKERRCUDA(cerr); found: if (rootdirect[PETSCSF_REMOTE]) { link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = (char*)rootdata + bas->rootstart[PETSCSF_REMOTE]*link->unitbytes; } else { if (!link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]) { ierr = PetscNvshmemMalloc(bas->rootbuflen_rmax*link->unitbytes,(void**)&link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]);CHKERRQ(ierr); } link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; } if (leafdirect[PETSCSF_REMOTE]) { link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = (char*)leafdata + sf->leafstart[PETSCSF_REMOTE]*link->unitbytes; } else { if (!link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]) { ierr = PetscNvshmemMalloc(sf->leafbuflen_rmax*link->unitbytes,(void**)&link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]);CHKERRQ(ierr); } link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; } link->rootdirect[PETSCSF_REMOTE] = rootdirect[PETSCSF_REMOTE]; link->leafdirect[PETSCSF_REMOTE] = leafdirect[PETSCSF_REMOTE]; link->rootdata = rootdata; /* root/leafdata are keys to look up links in PetscSFXxxEnd */ link->leafdata = leafdata; link->next = bas->inuse; bas->inuse = link; *mylink = link; PetscFunctionReturn(0); } #if defined(PETSC_USE_REAL_SINGLE) PetscErrorCode PetscNvshmemSum(PetscInt count,float *dst,const float *src) { PetscErrorCode ierr; PetscMPIInt num; /* Assume nvshmem's int is MPI's int */ PetscFunctionBegin; ierr = PetscMPIIntCast(count,&num);CHKERRQ(ierr); nvshmemx_float_sum_reduce_on_stream(NVSHMEM_TEAM_WORLD,dst,src,num,PetscDefaultCudaStream); PetscFunctionReturn(0); } PetscErrorCode PetscNvshmemMax(PetscInt count,float *dst,const float *src) { PetscErrorCode ierr; PetscMPIInt num; PetscFunctionBegin; ierr = PetscMPIIntCast(count,&num);CHKERRQ(ierr); nvshmemx_float_max_reduce_on_stream(NVSHMEM_TEAM_WORLD,dst,src,num,PetscDefaultCudaStream); PetscFunctionReturn(0); } #elif defined(PETSC_USE_REAL_DOUBLE) PetscErrorCode PetscNvshmemSum(PetscInt count,double *dst,const double *src) { PetscErrorCode ierr; PetscMPIInt num; PetscFunctionBegin; ierr = PetscMPIIntCast(count,&num);CHKERRQ(ierr); nvshmemx_double_sum_reduce_on_stream(NVSHMEM_TEAM_WORLD,dst,src,num,PetscDefaultCudaStream); PetscFunctionReturn(0); } PetscErrorCode PetscNvshmemMax(PetscInt count,double *dst,const double *src) { PetscErrorCode ierr; PetscMPIInt num; PetscFunctionBegin; ierr = PetscMPIIntCast(count,&num);CHKERRQ(ierr); nvshmemx_double_max_reduce_on_stream(NVSHMEM_TEAM_WORLD,dst,src,num,PetscDefaultCudaStream); PetscFunctionReturn(0); } #endif
86a97e91f0716711439a0720c664b8c77254b1ea.hip
// !!! This is a file automatically generated by hipify!!! #include "..\common\book.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #define N 1024*1024 #define FULL_SIZE (N*20) __global__ void kernel(int *a,int *b,int *c) { int i=threadIdx.x+blockIdx.x*blockDim.x; if (i<N) { int id1=(i+1)%256; int id2=(i-1)%256; a[i]=(a[id1]+a[id2]+a[i])/3.0f; b[i]=(b[id1]+b[id2]+b[i])/3.0f; c[i]=(a[i]+b[i])/2.0f; } } int main(void) { hipDeviceProp_t prop; int dev; HANDLE_ERROR(hipGetDevice(&dev)); HANDLE_ERROR(hipGetDeviceProperties(&prop,dev)); if (!prop.deviceOverlap) { printf("device can`t overflap\n"); return 0; } hipEvent_t start,stop; HANDLE_ERROR(hipEventCreate(&start)); HANDLE_ERROR(hipEventCreate(&stop)); HANDLE_ERROR(hipEventRecord(start,0)); hipStream_t stream; HANDLE_ERROR(hipStreamCreate(&stream)); int *dev_a,*dev_b,*dev_c; int *a,*b,*c; HANDLE_ERROR(hipMalloc((void **)&dev_a,N*sizeof(int))); HANDLE_ERROR(hipMalloc((void **)&dev_b,N*sizeof(int))); HANDLE_ERROR(hipMalloc((void **)&dev_c,N*sizeof(int))); HANDLE_ERROR(hipHostMalloc((void **)&a,FULL_SIZE*sizeof(int),hipHostMallocDefault)); HANDLE_ERROR(hipHostMalloc((void **)&b,FULL_SIZE*sizeof(int),hipHostMallocDefault)); HANDLE_ERROR(hipHostMalloc((void **)&c,FULL_SIZE*sizeof(int),hipHostMallocDefault)); for (int i=0;i!=FULL_SIZE;++i) { a[i]=rand(); b[i]=rand(); } for (int i=0;i!=FULL_SIZE;i+=N) { HANDLE_ERROR(hipMemcpyAsync(dev_a,a+i,N*sizeof(int),hipMemcpyHostToDevice,stream)); HANDLE_ERROR(hipMemcpyAsync(dev_b,b+i,N*sizeof(int),hipMemcpyHostToDevice,stream)); hipLaunchKernelGGL(( kernel), dim3(N/256),dim3(256),0,stream, dev_a,dev_b,dev_c); HANDLE_ERROR(hipMemcpyAsync(c+i,dev_c,N*sizeof(int),hipMemcpyDeviceToHost,stream)); } HANDLE_ERROR(hipStreamSynchronize(stream)); float elaspedtime; HANDLE_ERROR(hipEventRecord(stop,0)); HANDLE_ERROR(hipEventSynchronize(stop)); HANDLE_ERROR(hipEventElapsedTime(&elaspedtime,start,stop)); printf("total time:%.3f ms\n",elaspedtime); HANDLE_ERROR(hipEventDestroy(start)); HANDLE_ERROR(hipEventDestroy(stop)); HANDLE_ERROR(hipFree(dev_a)); HANDLE_ERROR(hipFree(dev_b)); HANDLE_ERROR(hipFree(dev_c)); HANDLE_ERROR(hipHostFree(a)); HANDLE_ERROR(hipHostFree(b)); HANDLE_ERROR(hipHostFree(c)); HANDLE_ERROR(hipStreamDestroy(stream)); getchar(); return 0; }
86a97e91f0716711439a0720c664b8c77254b1ea.cu
#include "..\common\book.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #define N 1024*1024 #define FULL_SIZE (N*20) __global__ void kernel(int *a,int *b,int *c) { int i=threadIdx.x+blockIdx.x*blockDim.x; if (i<N) { int id1=(i+1)%256; int id2=(i-1)%256; a[i]=(a[id1]+a[id2]+a[i])/3.0f; b[i]=(b[id1]+b[id2]+b[i])/3.0f; c[i]=(a[i]+b[i])/2.0f; } } int main(void) { cudaDeviceProp prop; int dev; HANDLE_ERROR(cudaGetDevice(&dev)); HANDLE_ERROR(cudaGetDeviceProperties(&prop,dev)); if (!prop.deviceOverlap) { printf("device can`t overflap\n"); return 0; } cudaEvent_t start,stop; HANDLE_ERROR(cudaEventCreate(&start)); HANDLE_ERROR(cudaEventCreate(&stop)); HANDLE_ERROR(cudaEventRecord(start,0)); cudaStream_t stream; HANDLE_ERROR(cudaStreamCreate(&stream)); int *dev_a,*dev_b,*dev_c; int *a,*b,*c; HANDLE_ERROR(cudaMalloc((void **)&dev_a,N*sizeof(int))); HANDLE_ERROR(cudaMalloc((void **)&dev_b,N*sizeof(int))); HANDLE_ERROR(cudaMalloc((void **)&dev_c,N*sizeof(int))); HANDLE_ERROR(cudaHostAlloc((void **)&a,FULL_SIZE*sizeof(int),cudaHostAllocDefault)); HANDLE_ERROR(cudaHostAlloc((void **)&b,FULL_SIZE*sizeof(int),cudaHostAllocDefault)); HANDLE_ERROR(cudaHostAlloc((void **)&c,FULL_SIZE*sizeof(int),cudaHostAllocDefault)); for (int i=0;i!=FULL_SIZE;++i) { a[i]=rand(); b[i]=rand(); } for (int i=0;i!=FULL_SIZE;i+=N) { HANDLE_ERROR(cudaMemcpyAsync(dev_a,a+i,N*sizeof(int),cudaMemcpyHostToDevice,stream)); HANDLE_ERROR(cudaMemcpyAsync(dev_b,b+i,N*sizeof(int),cudaMemcpyHostToDevice,stream)); kernel<<<N/256,256,0,stream>>>(dev_a,dev_b,dev_c); HANDLE_ERROR(cudaMemcpyAsync(c+i,dev_c,N*sizeof(int),cudaMemcpyDeviceToHost,stream)); } HANDLE_ERROR(cudaStreamSynchronize(stream)); float elaspedtime; HANDLE_ERROR(cudaEventRecord(stop,0)); HANDLE_ERROR(cudaEventSynchronize(stop)); HANDLE_ERROR(cudaEventElapsedTime(&elaspedtime,start,stop)); printf("total time:%.3f ms\n",elaspedtime); HANDLE_ERROR(cudaEventDestroy(start)); HANDLE_ERROR(cudaEventDestroy(stop)); HANDLE_ERROR(cudaFree(dev_a)); HANDLE_ERROR(cudaFree(dev_b)); HANDLE_ERROR(cudaFree(dev_c)); HANDLE_ERROR(cudaFreeHost(a)); HANDLE_ERROR(cudaFreeHost(b)); HANDLE_ERROR(cudaFreeHost(c)); HANDLE_ERROR(cudaStreamDestroy(stream)); getchar(); return 0; }
963731bab370ab87eda13dddc633e5bfc670ea6b.hip
// !!! This is a file automatically generated by hipify!!! #include "edge_overlay.h" #include <hip/hip_runtime.h> #define BPP 4 texture<unsigned char, 2> tex; static __device__ float getXY(int x, int y){ //return (float)input[y*1920+x]; return (float)tex2D(tex,x,y); } __global__ void _edge_overlay(Pixel *output, Pixel threshold){ int x = blockIdx.x*blockDim.x+threadIdx.x; int y_start = threadIdx.y*540; float a,b,c,d; for(int i=0;i<540;i++){ int y=y_start+i; int idx = ((y)*1920+x)*BPP; if(!(x<2 || x>=1918 || y>=1078 || y<2)){ float a=getXY(x,y); float b=getXY(x+1,y); c=getXY(x,y+1); d=getXY(x+1,y+1); float gx = (b+d)-(a+c); float gy = (c+d)-(a+b); Pixel mag = Pixel(sqrt(gx*gx+gy*gy)/2.0); if(mag>threshold){ output[idx] = 0; output[idx+1] = 255; output[idx+2] = 0; } } } } void edgeOverlay(Pixel *output, Pixel *input, Pixel threshold, hipStream_t stream){ size_t offset; hipChannelFormatDesc channeldesc=hipCreateChannelDesc<unsigned char>(); hipBindTexture2D(&offset,tex,input,channeldesc,1920,1080,1920); dim3 threads(128,2); dim3 blocks(1920/128); hipLaunchKernelGGL(( _edge_overlay), dim3(blocks),dim3(threads),0,stream, output,threshold); hipUnbindTexture(tex); }
963731bab370ab87eda13dddc633e5bfc670ea6b.cu
#include "edge_overlay.h" #include <cuda_runtime.h> #define BPP 4 texture<unsigned char, 2> tex; static __device__ float getXY(int x, int y){ //return (float)input[y*1920+x]; return (float)tex2D(tex,x,y); } __global__ void _edge_overlay(Pixel *output, Pixel threshold){ int x = blockIdx.x*blockDim.x+threadIdx.x; int y_start = threadIdx.y*540; float a,b,c,d; for(int i=0;i<540;i++){ int y=y_start+i; int idx = ((y)*1920+x)*BPP; if(!(x<2 || x>=1918 || y>=1078 || y<2)){ float a=getXY(x,y); float b=getXY(x+1,y); c=getXY(x,y+1); d=getXY(x+1,y+1); float gx = (b+d)-(a+c); float gy = (c+d)-(a+b); Pixel mag = Pixel(sqrt(gx*gx+gy*gy)/2.0); if(mag>threshold){ output[idx] = 0; output[idx+1] = 255; output[idx+2] = 0; } } } } void edgeOverlay(Pixel *output, Pixel *input, Pixel threshold, cudaStream_t stream){ size_t offset; cudaChannelFormatDesc channeldesc=cudaCreateChannelDesc<unsigned char>(); cudaBindTexture2D(&offset,tex,input,channeldesc,1920,1080,1920); dim3 threads(128,2); dim3 blocks(1920/128); _edge_overlay<<<blocks,threads,0,stream>>>(output,threshold); cudaUnbindTexture(tex); }
d6120dfdd0cbaaec7576c7efc75f2423925f1078.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include "header/table.h" #include "header/error.h" #include "header/constants.h" #include <unistd.h> #include <string.h> #include <stdarg.h> #include <time.h> #include <png.h> extern int width, height; extern png_structp png_ptr; extern png_infop info_ptr; extern png_bytep * row_pointers; extern void read_png_file(char* file_name); extern void write_png_file(char* file_name); __device__ float truncate(float i) { float r = round(i); if (r < 0.0 && r == i - 0.5) { return r + 1.0; } return r; } __device__ void RGB_YUV(float *rgb, float *yuv) { yuv[0] = 0.257 * rgb[0] + 0.504 * rgb[1] + 0.098 * rgb[2] + 16; yuv[1] = -0.148 * rgb[0] - 0.291 * rgb[1] + 0.439 * rgb[2] + 128; yuv[2] = 0.439 * rgb[0] - 0.368 * rgb[1] - 0.071 * rgb[2] + 128; } __device__ void YUV_RGB(float *yuv, float *rgb) { rgb[0] = 1.164 * (yuv[0] - 16) + 1.596 * (yuv[2] - 128); rgb[1] = 1.164 * (yuv[0] - 16) - 0.813 * (yuv[2] - 128) - 0.392 * (yuv[1] - 128); rgb[2] = 1.164 * (yuv[0] - 16) + 2.017 * (yuv[1] - 128); int k; for (k = 0; k < 3; k++) { if (rgb[k] < 0) rgb[k] = 0; else if (rgb[k] > 255) rgb[k] = 255; } } __global__ void FDCT(float *matrix, int width) { int i, j; __shared__ float mx[64]; int index_x = blockIdx.x * blockDim.x + threadIdx.x; int index_y = blockIdx.y * blockDim.y + threadIdx.y; mx[threadIdx.y * 8 + threadIdx.x] = *(matrix + index_y * width + index_x); __syncthreads(); float sum = 0, cu = 0, cv = 0; if (!threadIdx.x) cu = sqrt((float) 1 / blockDim.x); else cu = sqrt((float) 2 / blockDim.x); if (!threadIdx.y) cv = sqrt((float) 1 / blockDim.y); else cv = sqrt((float) 2 / blockDim.y); for (i = 0; i < blockDim.x; i++) { for (j = 0; j < blockDim.y; j++) { sum += mx[j * 8 + i] * (cos(((2 * i + 1) * threadIdx.x * M_PI) / 16)) * (cos(((2 * j + 1) * threadIdx.y * M_PI) / 16)); } } index_x = blockIdx.x * blockDim.x + threadIdx.x; index_y = blockIdx.y * blockDim.y + threadIdx.y; *(matrix + index_y * width + index_x) = cv * cu * sum; } __global__ void IDCT(float *matrix, int width) { int u, v; __shared__ float mx[64]; int index_x = blockIdx.x * blockDim.x + threadIdx.x; int index_y = blockIdx.y * blockDim.y + threadIdx.y; mx[threadIdx.y * 8 + threadIdx.x] = *(matrix + index_y * width + index_x); __syncthreads(); float sum = 0; for (u = 0; u < blockDim.x; u++) { for (v = 0; v < blockDim.y; v++) { float cu; float cv; if (!u) cu = sqrt((float) 1 / blockDim.x); else cu = sqrt((float) 2 / blockDim.x); if (!v) cv = sqrt((float) 1 / blockDim.y); else cv = sqrt((float) 2 / blockDim.y); sum += cu * cv * mx[v * 8 + u] * (cos(((2 * threadIdx.x + 1) * u * M_PI) / 16)) * (cos(((2 * threadIdx.y + 1) * v * M_PI) / 16)); } } index_x = blockIdx.x * blockDim.x + threadIdx.x; index_y = blockIdx.y * blockDim.y + threadIdx.y; *(matrix + index_y * width + index_x) = sum; } __global__ void QUANTIZATION(float *matrix, int width, int quality) { int index_x = blockIdx.x * blockDim.x + threadIdx.x; int index_y = blockIdx.y * blockDim.y + threadIdx.y; float divided; int s = (quality < 50) ? 5000 / quality : 200 - 2 * quality; int val = (s * Qy_[threadIdx.y * 8 + threadIdx.x] + 50) / 100; divided = *(matrix + index_y * width + index_x) / val; divided = truncate(divided); *(matrix + index_y * width + index_x) = (int) divided; } __global__ void DEQUANTIZATION(float *matrix, int width, int quality) { float divided; int index_x = blockIdx.x * blockDim.x + threadIdx.x; int index_y = blockIdx.y * blockDim.y + threadIdx.y; int s = (quality < 50) ? 5000 / quality : 200 - 2 * quality; int val = (s * Qy_[threadIdx.y * 8 + threadIdx.x] + 50) / 100; divided = *(matrix + index_y * width + index_x) * val; *(matrix + index_y * width + index_x) = (int) divided; } __global__ void shiftBlock(float *matrix, int width) { int index_x = blockIdx.x * blockDim.x + threadIdx.x; int index_y = blockIdx.y * blockDim.y + threadIdx.y; *(matrix + index_y * width + index_x) = (*(matrix + index_y * width + index_x) - 128); } __global__ void ishiftBlock(float *matrix, int width) { int index_x = blockIdx.x * blockDim.x + threadIdx.x; int index_y = blockIdx.y * blockDim.y + threadIdx.y; *(matrix + index_y * width + index_x) = (*(matrix + index_y * width + index_x) + 128); } __global__ void convertColorSpace_rgb2yuv(float *r, float *g, float *b, int width, int height) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; float rgb[3] = { r[j * width + i], g[j * width + i], b[j * width + i] }; float yuv[3]; RGB_YUV(rgb, yuv); r[j * width + i] = yuv[0]; g[j * width + i] = yuv[1]; b[j * width + i] = yuv[2]; } __global__ void convertColorSpace_yuv2rgb(float *y, float *cb, float *cr, int width, int height) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; float rgb[3]; float yuv[3] = { y[j * width + i], cb[j * width + i], cr[j * width + i] }; YUV_RGB(yuv, rgb); y[j * width + i] = rgb[0]; cb[j * width + i] = rgb[1]; cr[j * width + i] = rgb[2]; } __device__ void zigzag(float *matrix, UNIT16 side, float *sequence) { int i = 0; int j = 0; int index = 0; sequence[index++] = TABLE_ELEMENT(matrix, side, 0, 0); //for upper triangle of matrix do { j++; sequence[index++] = TABLE_ELEMENT(matrix, side, i, j); while (j != 0) { i++; j--; sequence[index++] = TABLE_ELEMENT(matrix, side, i, j); } i++; if (i > 7) { i--; break; } sequence[index++] = TABLE_ELEMENT(matrix, side, i, j); while (i != 0) { i--; j++; sequence[index++] = TABLE_ELEMENT(matrix, side, i, j); } } while (true); //for lower triangle of matrix do { j++; sequence[index++] = TABLE_ELEMENT(matrix, side, i, j); while (j != 7) { j++; i--; sequence[index++] = TABLE_ELEMENT(matrix, side, i, j); } i++; if (i > 7) { i--; break; } sequence[index++] = TABLE_ELEMENT(matrix, side, i, j); while (i != 7) { i++; j--; sequence[index++] = TABLE_ELEMENT(matrix, side, i, j); } } while (true); } __device__ int sizeofNumber(float number) { int k = 0; for (k = 0; k < 12; k++) { if ((number < (1 << k)) && (number > -(1 << k))) { return k; } } return 0; } __device__ void countHuffAcCode(float *matrix, int block_x, int block_y, int width, UNIT32 *count) { int i, j, k, m, n; int idx_x, idx_y; float mx2[64]; float mx[64]; for (i = 0; i < block_x; i++) { for (k = 0; k < block_y; k++) { for (m = 0; m < 8; m++) { for (n = 0; n < 8; n++) { idx_y = k * 8 + n; idx_x = i * 8 + m; mx[n * 8 + m] = *(matrix + idx_y * width + idx_x); } } zigzag(mx, 8, mx2); int zc = 0; int size = 0; //skip DC code for (j = 1; j < 64; j++) { if (mx2[j] == 0) { zc++; if (zc != 16) { if (j == 63) { UNIT8 idx = ((zc & 0xF) << 4) | (0 & 0xF); count[idx]++; } continue; } else { zc = 15; size = 0; } } else { size = sizeofNumber(mx2[j]); } UNIT8 idx = ((zc & 0xF) << 4) | (size & 0xF); count[idx]++; zc = 0; size = 0; } count[0]++; } } } __device__ void countHuffDcCode(float *matrix, int block_x, int block_y, int width, UNIT32 *count) { int i, k, m, n; int idx_x, idx_y; float mx[64]; int prevdc = DEFAULT_DC; for (i = 0; i < block_x; i++) { for (k = 0; k < block_y; k++) { for (m = 0; m < 8; m++) { for (n = 0; n < 8; n++) { idx_y = k * 8 + n; idx_x = i * 8 + m; mx[n * 8 + m] = *(matrix + idx_y * width + idx_x); } } int diff = mx[0] - prevdc; int size = sizeofNumber(diff); UNIT8 idx = size & 0xF; count[idx]++; prevdc = mx[0]; } } } __global__ void huffCode(float *y, float *cb, float *cr, UNIT32 *counts_y_dc, UNIT32 *counts_y_ac, UNIT32 *counts_b_dc, UNIT32 *counts_b_ac, UNIT32 *counts_r_dc, UNIT32 *counts_r_ac, int block_x, int block_y, size_t pitch_y, size_t pitch_b, size_t pitch_r) { switch (threadIdx.x) { case 0: countHuffDcCode(y, block_x, block_y, pitch_y, counts_y_dc); break; case 1: countHuffAcCode(y, block_x, block_y, pitch_y, counts_y_ac); break; case 2: countHuffDcCode(cb, block_x, block_y, pitch_b, counts_b_dc); break; case 3: countHuffAcCode(cb, block_x, block_y, pitch_b, counts_b_ac); break; case 4: countHuffDcCode(cr, block_x, block_y, pitch_r, counts_r_dc); break; case 5: countHuffAcCode(cr, block_x, block_y, pitch_r, counts_r_ac); break; } } __host__ void process_file(int quality) { int y, x, skip; //UNIT8 *QY, *QC; float *ypointers; float *cbpointers; float *crpointers; UNIT32 *counts_y_dc, *counts_y_ac, *counts_b_dc, *counts_b_ac, *counts_r_dc, *counts_r_ac; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float seconds; if (png_get_color_type(png_ptr, info_ptr) != PNG_COLOR_TYPE_RGBA && png_get_color_type(png_ptr, info_ptr) != PNG_COLOR_TYPE_RGB) abort_( "[process_file] color_type of input file must be PNG_COLOR_TYPE_RGBA or PNG_COLOR_TYPE_RGB (%d) (is %d)", PNG_COLOR_TYPE_RGBA, png_get_color_type(png_ptr, info_ptr)); if (png_get_color_type(png_ptr, info_ptr) == PNG_COLOR_TYPE_RGB) skip = 3; else skip = 4; if (width % block_side != 0 || height % block_side != 0 || width > 20000 || height > 20000) abort_("Invalid and unsupported image width %d and height %d", width, height); if (quality > 100 || quality < 1) abort_("Invalid quality, the range is [0,100]"); /* allocate memory space for each component y cb cr*/ hipEventRecord(start, 0); int imgsize = sizeof(float) * height * width; hipMalloc(&ypointers, imgsize); hipMalloc(&cbpointers, imgsize); hipMalloc(&crpointers, imgsize); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&seconds, start, stop); printf("%f milliseconds to allocate device memory space\n", seconds); /* divide original 4-dimension matrix into three components*/ float *yp, *bp, *rp; yp = (float *) malloc(imgsize); bp = (float *) malloc(imgsize); rp = (float *) malloc(imgsize); for (y = 0; y < height; y++) { png_byte* row = row_pointers[y]; for (x = 0; x < width; x++) { png_byte* ptr = &(row[x * skip]); // set red value to 0 and green value to the blue one yp[y * width + x] = ptr[0]; bp[y * width + x] = ptr[1]; rp[y * width + x] = ptr[2]; } } /* load each component into GPU global memory */ hipEventRecord(start, 0); hipMemcpy(ypointers, yp, imgsize, hipMemcpyHostToDevice); hipMemcpy(cbpointers, bp, imgsize, hipMemcpyHostToDevice); hipMemcpy(crpointers, rp, imgsize, hipMemcpyHostToDevice); hipMalloc(&counts_y_dc, 256 * sizeof(UNIT32)); hipMemset(counts_y_dc, 0, sizeof(UNIT32) * 256); hipMalloc(&counts_y_ac, 256 * sizeof(UNIT32)); hipMemset(counts_y_ac, 0, sizeof(UNIT32) * 256); hipMalloc(&counts_b_dc, 256 * sizeof(UNIT32)); hipMemset(counts_b_dc, 0, sizeof(UNIT32) * 256); hipMalloc(&counts_b_ac, 256 * sizeof(UNIT32)); hipMemset(counts_b_ac, 0, sizeof(UNIT32) * 256); hipMalloc(&counts_r_dc, 256 * sizeof(UNIT32)); hipMemset(counts_r_dc, 0, sizeof(UNIT32) * 256); hipMalloc(&counts_r_ac, 256 * sizeof(UNIT32)); hipMemset(counts_r_ac, 0, sizeof(UNIT32) * 256); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&seconds, start, stop); printf("%f milliseconds to upload data to device \n", seconds); /* empty local buffer in main memory */ memset(yp, 0, imgsize); memset(bp, 0, imgsize); memset(rp, 0, imgsize); /* * create block with size 8 X 8, e.g each block has 64 threads * create grid with width/8 + height/8 size. * */ dim3 dimBlock2(8, 8); dim3 dimGrid2((dimBlock2.x - 1 + width) / dimBlock2.x, (dimBlock2.y - 1 + height) / dimBlock2.y); /* * convert color space from rgb to yuv * */ hipEventRecord(start, 0); hipLaunchKernelGGL(( convertColorSpace_rgb2yuv), dim3(dimGrid2), dim3(dimBlock2), 0, 0, ypointers, cbpointers, crpointers, width, height); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&seconds, start, stop); printf("%f milliseconds for color space conversion from rgb to yuv\n", seconds); hipEventRecord(start, 0); hipLaunchKernelGGL(( shiftBlock), dim3(dimGrid2), dim3(dimBlock2), 0, 0, ypointers, width); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&seconds, start, stop); printf("%f milliseconds for data shift\n", seconds * 3); hipEventRecord(start, 0); hipLaunchKernelGGL(( FDCT), dim3(dimGrid2), dim3(dimBlock2), 0, 0, ypointers, width); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&seconds, start, stop); printf("%f milliseconds for DCT transform\n", seconds * 3); hipEventRecord(start, 0); hipLaunchKernelGGL(( QUANTIZATION), dim3(dimGrid2), dim3(dimBlock2), 0, 0, ypointers, width, quality); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&seconds, start, stop); printf("%f milliseconds for Quantization\n", seconds * 3); hipLaunchKernelGGL(( shiftBlock), dim3(dimGrid2), dim3(dimBlock2), 0, 0, cbpointers, width); hipLaunchKernelGGL(( FDCT), dim3(dimGrid2), dim3(dimBlock2), 0, 0, cbpointers, width); hipLaunchKernelGGL(( QUANTIZATION), dim3(dimGrid2), dim3(dimBlock2), 0, 0, cbpointers, width, quality); hipLaunchKernelGGL(( shiftBlock), dim3(dimGrid2), dim3(dimBlock2), 0, 0, crpointers, width); hipLaunchKernelGGL(( FDCT), dim3(dimGrid2), dim3(dimBlock2), 0, 0, crpointers, width); hipLaunchKernelGGL(( QUANTIZATION), dim3(dimGrid2), dim3(dimBlock2), 0, 0, crpointers, width, quality); hipEventRecord(start, 0); hipLaunchKernelGGL(( huffCode), dim3(6), dim3(1), 0, 0, ypointers, cbpointers, crpointers, counts_y_dc, counts_y_ac, counts_b_dc, counts_b_ac, counts_r_dc, counts_r_ac, width / 8, height / 8, (size_t)width, (size_t)width, (size_t)width); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&seconds, start, stop); printf("%f milliseconds for encoding\n", seconds); hipEventRecord(start, 0); hipLaunchKernelGGL(( DEQUANTIZATION), dim3(dimGrid2), dim3(dimBlock2), 0, 0, ypointers, width, quality); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&seconds, start, stop); printf("%f milliseconds for Dequantization\n", seconds * 3); hipEventRecord(start, 0); hipLaunchKernelGGL(( IDCT), dim3(dimGrid2), dim3(dimBlock2), 0, 0, ypointers, width); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&seconds, start, stop); printf("%f milliseconds for IDCT transform\n", seconds * 3); hipEventRecord(start, 0); hipLaunchKernelGGL(( ishiftBlock), dim3(dimGrid2), dim3(dimBlock2), 0, 0, ypointers, width); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&seconds, start, stop); printf("%f milliseconds for reverse data shift\n", seconds * 3); hipLaunchKernelGGL(( DEQUANTIZATION), dim3(dimGrid2), dim3(dimBlock2), 0, 0, cbpointers, width, quality); hipLaunchKernelGGL(( IDCT), dim3(dimGrid2), dim3(dimBlock2), 0, 0, cbpointers, width); hipLaunchKernelGGL(( ishiftBlock), dim3(dimGrid2), dim3(dimBlock2), 0, 0, cbpointers, width); hipLaunchKernelGGL(( DEQUANTIZATION), dim3(dimGrid2), dim3(dimBlock2), 0, 0, crpointers, width, quality); hipLaunchKernelGGL(( IDCT), dim3(dimGrid2), dim3(dimBlock2), 0, 0, crpointers, width); hipLaunchKernelGGL(( ishiftBlock), dim3(dimGrid2), dim3(dimBlock2), 0, 0, crpointers, width); /* * convert color space back from yuv to rgb * */ hipEventRecord(start, 0); hipLaunchKernelGGL(( convertColorSpace_yuv2rgb), dim3(dimGrid2), dim3(dimBlock2), 0, 0, ypointers, cbpointers, crpointers, width, height); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&seconds, start, stop); printf("%f milliseconds for color space conversion from yuv to rgb\n", seconds); hipEventRecord(start, 0); hipMemcpy(yp, ypointers, imgsize, hipMemcpyDeviceToHost); hipMemcpy(bp, cbpointers, imgsize, hipMemcpyDeviceToHost); hipMemcpy(rp, crpointers, imgsize, hipMemcpyDeviceToHost); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&seconds, start, stop); printf("%f milliseconds used to download data from device \n", seconds); for (y = 0; y < height; y++) { png_byte* row = row_pointers[y]; for (x = 0; x < width; x++) { png_byte* ptr = &(row[x * skip]); // set red value to 0 and green value to the blue one ptr[0] = yp[y * width + x]; ptr[1] = bp[y * width + x]; ptr[2] = rp[y * width + x]; } } hipEventRecord(start, 0); hipFree(ypointers); hipFree(cbpointers); hipFree(crpointers); hipFree(counts_y_dc); hipFree(counts_y_ac); hipFree(counts_b_dc); hipFree(counts_b_ac); hipFree(counts_r_dc); hipFree(counts_r_ac); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&seconds, start, stop); printf("%f milliseconds used to free allocated memory on device \n", seconds); hipEventDestroy(start); hipEventDestroy(stop); free(yp); free(bp); free(rp); } void printBlock_float(float *matrix, int width, int block_x, int block_y) { int u, v; for (u = 0; u < block_side; u++) { for (v = 0; v < block_side; v++) { int index_x = block_x * width * block_side + u * width; int index_y = block_y * block_side + v; printf("%5.1f\t", *(matrix + index_x + index_y)); } printf("\n"); } printf("\n"); } int main(int argc, char *argv[]) { if (argc < 4) abort_( "Usage: program_name file_path_of_input_file file_path_of_output_file quality"); read_png_file(argv[1]); process_file(atoi(argv[3])); write_png_file(argv[2]); }
d6120dfdd0cbaaec7576c7efc75f2423925f1078.cu
/* * * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include "header/table.h" #include "header/error.h" #include "header/constants.h" #include <unistd.h> #include <string.h> #include <stdarg.h> #include <time.h> #include <png.h> extern int width, height; extern png_structp png_ptr; extern png_infop info_ptr; extern png_bytep * row_pointers; extern void read_png_file(char* file_name); extern void write_png_file(char* file_name); __device__ float truncate(float i) { float r = round(i); if (r < 0.0 && r == i - 0.5) { return r + 1.0; } return r; } __device__ void RGB_YUV(float *rgb, float *yuv) { yuv[0] = 0.257 * rgb[0] + 0.504 * rgb[1] + 0.098 * rgb[2] + 16; yuv[1] = -0.148 * rgb[0] - 0.291 * rgb[1] + 0.439 * rgb[2] + 128; yuv[2] = 0.439 * rgb[0] - 0.368 * rgb[1] - 0.071 * rgb[2] + 128; } __device__ void YUV_RGB(float *yuv, float *rgb) { rgb[0] = 1.164 * (yuv[0] - 16) + 1.596 * (yuv[2] - 128); rgb[1] = 1.164 * (yuv[0] - 16) - 0.813 * (yuv[2] - 128) - 0.392 * (yuv[1] - 128); rgb[2] = 1.164 * (yuv[0] - 16) + 2.017 * (yuv[1] - 128); int k; for (k = 0; k < 3; k++) { if (rgb[k] < 0) rgb[k] = 0; else if (rgb[k] > 255) rgb[k] = 255; } } __global__ void FDCT(float *matrix, int width) { int i, j; __shared__ float mx[64]; int index_x = blockIdx.x * blockDim.x + threadIdx.x; int index_y = blockIdx.y * blockDim.y + threadIdx.y; mx[threadIdx.y * 8 + threadIdx.x] = *(matrix + index_y * width + index_x); __syncthreads(); float sum = 0, cu = 0, cv = 0; if (!threadIdx.x) cu = sqrt((float) 1 / blockDim.x); else cu = sqrt((float) 2 / blockDim.x); if (!threadIdx.y) cv = sqrt((float) 1 / blockDim.y); else cv = sqrt((float) 2 / blockDim.y); for (i = 0; i < blockDim.x; i++) { for (j = 0; j < blockDim.y; j++) { sum += mx[j * 8 + i] * (cos(((2 * i + 1) * threadIdx.x * M_PI) / 16)) * (cos(((2 * j + 1) * threadIdx.y * M_PI) / 16)); } } index_x = blockIdx.x * blockDim.x + threadIdx.x; index_y = blockIdx.y * blockDim.y + threadIdx.y; *(matrix + index_y * width + index_x) = cv * cu * sum; } __global__ void IDCT(float *matrix, int width) { int u, v; __shared__ float mx[64]; int index_x = blockIdx.x * blockDim.x + threadIdx.x; int index_y = blockIdx.y * blockDim.y + threadIdx.y; mx[threadIdx.y * 8 + threadIdx.x] = *(matrix + index_y * width + index_x); __syncthreads(); float sum = 0; for (u = 0; u < blockDim.x; u++) { for (v = 0; v < blockDim.y; v++) { float cu; float cv; if (!u) cu = sqrt((float) 1 / blockDim.x); else cu = sqrt((float) 2 / blockDim.x); if (!v) cv = sqrt((float) 1 / blockDim.y); else cv = sqrt((float) 2 / blockDim.y); sum += cu * cv * mx[v * 8 + u] * (cos(((2 * threadIdx.x + 1) * u * M_PI) / 16)) * (cos(((2 * threadIdx.y + 1) * v * M_PI) / 16)); } } index_x = blockIdx.x * blockDim.x + threadIdx.x; index_y = blockIdx.y * blockDim.y + threadIdx.y; *(matrix + index_y * width + index_x) = sum; } __global__ void QUANTIZATION(float *matrix, int width, int quality) { int index_x = blockIdx.x * blockDim.x + threadIdx.x; int index_y = blockIdx.y * blockDim.y + threadIdx.y; float divided; int s = (quality < 50) ? 5000 / quality : 200 - 2 * quality; int val = (s * Qy_[threadIdx.y * 8 + threadIdx.x] + 50) / 100; divided = *(matrix + index_y * width + index_x) / val; divided = truncate(divided); *(matrix + index_y * width + index_x) = (int) divided; } __global__ void DEQUANTIZATION(float *matrix, int width, int quality) { float divided; int index_x = blockIdx.x * blockDim.x + threadIdx.x; int index_y = blockIdx.y * blockDim.y + threadIdx.y; int s = (quality < 50) ? 5000 / quality : 200 - 2 * quality; int val = (s * Qy_[threadIdx.y * 8 + threadIdx.x] + 50) / 100; divided = *(matrix + index_y * width + index_x) * val; *(matrix + index_y * width + index_x) = (int) divided; } __global__ void shiftBlock(float *matrix, int width) { int index_x = blockIdx.x * blockDim.x + threadIdx.x; int index_y = blockIdx.y * blockDim.y + threadIdx.y; *(matrix + index_y * width + index_x) = (*(matrix + index_y * width + index_x) - 128); } __global__ void ishiftBlock(float *matrix, int width) { int index_x = blockIdx.x * blockDim.x + threadIdx.x; int index_y = blockIdx.y * blockDim.y + threadIdx.y; *(matrix + index_y * width + index_x) = (*(matrix + index_y * width + index_x) + 128); } __global__ void convertColorSpace_rgb2yuv(float *r, float *g, float *b, int width, int height) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; float rgb[3] = { r[j * width + i], g[j * width + i], b[j * width + i] }; float yuv[3]; RGB_YUV(rgb, yuv); r[j * width + i] = yuv[0]; g[j * width + i] = yuv[1]; b[j * width + i] = yuv[2]; } __global__ void convertColorSpace_yuv2rgb(float *y, float *cb, float *cr, int width, int height) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; float rgb[3]; float yuv[3] = { y[j * width + i], cb[j * width + i], cr[j * width + i] }; YUV_RGB(yuv, rgb); y[j * width + i] = rgb[0]; cb[j * width + i] = rgb[1]; cr[j * width + i] = rgb[2]; } __device__ void zigzag(float *matrix, UNIT16 side, float *sequence) { int i = 0; int j = 0; int index = 0; sequence[index++] = TABLE_ELEMENT(matrix, side, 0, 0); //for upper triangle of matrix do { j++; sequence[index++] = TABLE_ELEMENT(matrix, side, i, j); while (j != 0) { i++; j--; sequence[index++] = TABLE_ELEMENT(matrix, side, i, j); } i++; if (i > 7) { i--; break; } sequence[index++] = TABLE_ELEMENT(matrix, side, i, j); while (i != 0) { i--; j++; sequence[index++] = TABLE_ELEMENT(matrix, side, i, j); } } while (true); //for lower triangle of matrix do { j++; sequence[index++] = TABLE_ELEMENT(matrix, side, i, j); while (j != 7) { j++; i--; sequence[index++] = TABLE_ELEMENT(matrix, side, i, j); } i++; if (i > 7) { i--; break; } sequence[index++] = TABLE_ELEMENT(matrix, side, i, j); while (i != 7) { i++; j--; sequence[index++] = TABLE_ELEMENT(matrix, side, i, j); } } while (true); } __device__ int sizeofNumber(float number) { int k = 0; for (k = 0; k < 12; k++) { if ((number < (1 << k)) && (number > -(1 << k))) { return k; } } return 0; } __device__ void countHuffAcCode(float *matrix, int block_x, int block_y, int width, UNIT32 *count) { int i, j, k, m, n; int idx_x, idx_y; float mx2[64]; float mx[64]; for (i = 0; i < block_x; i++) { for (k = 0; k < block_y; k++) { for (m = 0; m < 8; m++) { for (n = 0; n < 8; n++) { idx_y = k * 8 + n; idx_x = i * 8 + m; mx[n * 8 + m] = *(matrix + idx_y * width + idx_x); } } zigzag(mx, 8, mx2); int zc = 0; int size = 0; //skip DC code for (j = 1; j < 64; j++) { if (mx2[j] == 0) { zc++; if (zc != 16) { if (j == 63) { UNIT8 idx = ((zc & 0xF) << 4) | (0 & 0xF); count[idx]++; } continue; } else { zc = 15; size = 0; } } else { size = sizeofNumber(mx2[j]); } UNIT8 idx = ((zc & 0xF) << 4) | (size & 0xF); count[idx]++; zc = 0; size = 0; } count[0]++; } } } __device__ void countHuffDcCode(float *matrix, int block_x, int block_y, int width, UNIT32 *count) { int i, k, m, n; int idx_x, idx_y; float mx[64]; int prevdc = DEFAULT_DC; for (i = 0; i < block_x; i++) { for (k = 0; k < block_y; k++) { for (m = 0; m < 8; m++) { for (n = 0; n < 8; n++) { idx_y = k * 8 + n; idx_x = i * 8 + m; mx[n * 8 + m] = *(matrix + idx_y * width + idx_x); } } int diff = mx[0] - prevdc; int size = sizeofNumber(diff); UNIT8 idx = size & 0xF; count[idx]++; prevdc = mx[0]; } } } __global__ void huffCode(float *y, float *cb, float *cr, UNIT32 *counts_y_dc, UNIT32 *counts_y_ac, UNIT32 *counts_b_dc, UNIT32 *counts_b_ac, UNIT32 *counts_r_dc, UNIT32 *counts_r_ac, int block_x, int block_y, size_t pitch_y, size_t pitch_b, size_t pitch_r) { switch (threadIdx.x) { case 0: countHuffDcCode(y, block_x, block_y, pitch_y, counts_y_dc); break; case 1: countHuffAcCode(y, block_x, block_y, pitch_y, counts_y_ac); break; case 2: countHuffDcCode(cb, block_x, block_y, pitch_b, counts_b_dc); break; case 3: countHuffAcCode(cb, block_x, block_y, pitch_b, counts_b_ac); break; case 4: countHuffDcCode(cr, block_x, block_y, pitch_r, counts_r_dc); break; case 5: countHuffAcCode(cr, block_x, block_y, pitch_r, counts_r_ac); break; } } __host__ void process_file(int quality) { int y, x, skip; //UNIT8 *QY, *QC; float *ypointers; float *cbpointers; float *crpointers; UNIT32 *counts_y_dc, *counts_y_ac, *counts_b_dc, *counts_b_ac, *counts_r_dc, *counts_r_ac; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float seconds; if (png_get_color_type(png_ptr, info_ptr) != PNG_COLOR_TYPE_RGBA && png_get_color_type(png_ptr, info_ptr) != PNG_COLOR_TYPE_RGB) abort_( "[process_file] color_type of input file must be PNG_COLOR_TYPE_RGBA or PNG_COLOR_TYPE_RGB (%d) (is %d)", PNG_COLOR_TYPE_RGBA, png_get_color_type(png_ptr, info_ptr)); if (png_get_color_type(png_ptr, info_ptr) == PNG_COLOR_TYPE_RGB) skip = 3; else skip = 4; if (width % block_side != 0 || height % block_side != 0 || width > 20000 || height > 20000) abort_("Invalid and unsupported image width %d and height %d", width, height); if (quality > 100 || quality < 1) abort_("Invalid quality, the range is [0,100]"); /* allocate memory space for each component y cb cr*/ cudaEventRecord(start, 0); int imgsize = sizeof(float) * height * width; cudaMalloc(&ypointers, imgsize); cudaMalloc(&cbpointers, imgsize); cudaMalloc(&crpointers, imgsize); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&seconds, start, stop); printf("%f milliseconds to allocate device memory space\n", seconds); /* divide original 4-dimension matrix into three components*/ float *yp, *bp, *rp; yp = (float *) malloc(imgsize); bp = (float *) malloc(imgsize); rp = (float *) malloc(imgsize); for (y = 0; y < height; y++) { png_byte* row = row_pointers[y]; for (x = 0; x < width; x++) { png_byte* ptr = &(row[x * skip]); // set red value to 0 and green value to the blue one yp[y * width + x] = ptr[0]; bp[y * width + x] = ptr[1]; rp[y * width + x] = ptr[2]; } } /* load each component into GPU global memory */ cudaEventRecord(start, 0); cudaMemcpy(ypointers, yp, imgsize, cudaMemcpyHostToDevice); cudaMemcpy(cbpointers, bp, imgsize, cudaMemcpyHostToDevice); cudaMemcpy(crpointers, rp, imgsize, cudaMemcpyHostToDevice); cudaMalloc(&counts_y_dc, 256 * sizeof(UNIT32)); cudaMemset(counts_y_dc, 0, sizeof(UNIT32) * 256); cudaMalloc(&counts_y_ac, 256 * sizeof(UNIT32)); cudaMemset(counts_y_ac, 0, sizeof(UNIT32) * 256); cudaMalloc(&counts_b_dc, 256 * sizeof(UNIT32)); cudaMemset(counts_b_dc, 0, sizeof(UNIT32) * 256); cudaMalloc(&counts_b_ac, 256 * sizeof(UNIT32)); cudaMemset(counts_b_ac, 0, sizeof(UNIT32) * 256); cudaMalloc(&counts_r_dc, 256 * sizeof(UNIT32)); cudaMemset(counts_r_dc, 0, sizeof(UNIT32) * 256); cudaMalloc(&counts_r_ac, 256 * sizeof(UNIT32)); cudaMemset(counts_r_ac, 0, sizeof(UNIT32) * 256); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&seconds, start, stop); printf("%f milliseconds to upload data to device \n", seconds); /* empty local buffer in main memory */ memset(yp, 0, imgsize); memset(bp, 0, imgsize); memset(rp, 0, imgsize); /* * create block with size 8 X 8, e.g each block has 64 threads * create grid with width/8 + height/8 size. * */ dim3 dimBlock2(8, 8); dim3 dimGrid2((dimBlock2.x - 1 + width) / dimBlock2.x, (dimBlock2.y - 1 + height) / dimBlock2.y); /* * convert color space from rgb to yuv * */ cudaEventRecord(start, 0); convertColorSpace_rgb2yuv<<<dimGrid2, dimBlock2>>>(ypointers, cbpointers, crpointers, width, height); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&seconds, start, stop); printf("%f milliseconds for color space conversion from rgb to yuv\n", seconds); cudaEventRecord(start, 0); shiftBlock<<<dimGrid2, dimBlock2>>>(ypointers, width); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&seconds, start, stop); printf("%f milliseconds for data shift\n", seconds * 3); cudaEventRecord(start, 0); FDCT<<<dimGrid2, dimBlock2>>>(ypointers, width); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&seconds, start, stop); printf("%f milliseconds for DCT transform\n", seconds * 3); cudaEventRecord(start, 0); QUANTIZATION<<<dimGrid2, dimBlock2>>>(ypointers, width, quality); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&seconds, start, stop); printf("%f milliseconds for Quantization\n", seconds * 3); shiftBlock<<<dimGrid2, dimBlock2>>>(cbpointers, width); FDCT<<<dimGrid2, dimBlock2>>>(cbpointers, width); QUANTIZATION<<<dimGrid2, dimBlock2>>>(cbpointers, width, quality); shiftBlock<<<dimGrid2, dimBlock2>>>(crpointers, width); FDCT<<<dimGrid2, dimBlock2>>>(crpointers, width); QUANTIZATION<<<dimGrid2, dimBlock2>>>(crpointers, width, quality); cudaEventRecord(start, 0); huffCode<<<6, 1>>>(ypointers, cbpointers, crpointers, counts_y_dc, counts_y_ac, counts_b_dc, counts_b_ac, counts_r_dc, counts_r_ac, width / 8, height / 8, (size_t)width, (size_t)width, (size_t)width); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&seconds, start, stop); printf("%f milliseconds for encoding\n", seconds); cudaEventRecord(start, 0); DEQUANTIZATION<<<dimGrid2, dimBlock2>>>(ypointers, width, quality); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&seconds, start, stop); printf("%f milliseconds for Dequantization\n", seconds * 3); cudaEventRecord(start, 0); IDCT<<<dimGrid2, dimBlock2>>>(ypointers, width); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&seconds, start, stop); printf("%f milliseconds for IDCT transform\n", seconds * 3); cudaEventRecord(start, 0); ishiftBlock<<<dimGrid2, dimBlock2>>>(ypointers, width); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&seconds, start, stop); printf("%f milliseconds for reverse data shift\n", seconds * 3); DEQUANTIZATION<<<dimGrid2, dimBlock2>>>(cbpointers, width, quality); IDCT<<<dimGrid2, dimBlock2>>>(cbpointers, width); ishiftBlock<<<dimGrid2, dimBlock2>>>(cbpointers, width); DEQUANTIZATION<<<dimGrid2, dimBlock2>>>(crpointers, width, quality); IDCT<<<dimGrid2, dimBlock2>>>(crpointers, width); ishiftBlock<<<dimGrid2, dimBlock2>>>(crpointers, width); /* * convert color space back from yuv to rgb * */ cudaEventRecord(start, 0); convertColorSpace_yuv2rgb<<<dimGrid2, dimBlock2>>>(ypointers, cbpointers, crpointers, width, height); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&seconds, start, stop); printf("%f milliseconds for color space conversion from yuv to rgb\n", seconds); cudaEventRecord(start, 0); cudaMemcpy(yp, ypointers, imgsize, cudaMemcpyDeviceToHost); cudaMemcpy(bp, cbpointers, imgsize, cudaMemcpyDeviceToHost); cudaMemcpy(rp, crpointers, imgsize, cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&seconds, start, stop); printf("%f milliseconds used to download data from device \n", seconds); for (y = 0; y < height; y++) { png_byte* row = row_pointers[y]; for (x = 0; x < width; x++) { png_byte* ptr = &(row[x * skip]); // set red value to 0 and green value to the blue one ptr[0] = yp[y * width + x]; ptr[1] = bp[y * width + x]; ptr[2] = rp[y * width + x]; } } cudaEventRecord(start, 0); cudaFree(ypointers); cudaFree(cbpointers); cudaFree(crpointers); cudaFree(counts_y_dc); cudaFree(counts_y_ac); cudaFree(counts_b_dc); cudaFree(counts_b_ac); cudaFree(counts_r_dc); cudaFree(counts_r_ac); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&seconds, start, stop); printf("%f milliseconds used to free allocated memory on device \n", seconds); cudaEventDestroy(start); cudaEventDestroy(stop); free(yp); free(bp); free(rp); } void printBlock_float(float *matrix, int width, int block_x, int block_y) { int u, v; for (u = 0; u < block_side; u++) { for (v = 0; v < block_side; v++) { int index_x = block_x * width * block_side + u * width; int index_y = block_y * block_side + v; printf("%5.1f\t", *(matrix + index_x + index_y)); } printf("\n"); } printf("\n"); } int main(int argc, char *argv[]) { if (argc < 4) abort_( "Usage: program_name file_path_of_input_file file_path_of_output_file quality"); read_png_file(argv[1]); process_file(atoi(argv[3])); write_png_file(argv[2]); }
3c096bb8913b67a3163983b2fd67e7b1fe031c69.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" const int TB_DIM_X = 16; const int TB_DIM_Y = 16; __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. int image_x = blockIdx.x * blockDim.x + threadIdx.x; int image_y = blockIdx.y * blockDim.y + threadIdx.y; if (image_x >= numCols || image_y >= numRows) return; //assume filter is a square matrix float result = 0; unsigned char patch_pixel; int clamp_x, clamp_y; for (int r = -filterWidth / 2; r <= filterWidth / 2; ++r) { for (int c = -filterWidth / 2; c <= filterWidth / 2; ++c) { clamp_x = min( max(image_x + c, 0) , numCols -1); clamp_y = min( max(image_y + r, 0) , numRows -1); patch_pixel = *(inputChannel + clamp_y * numCols + clamp_x); result = result + patch_pixel * (*(filter+ (r + filterWidth/2) * filterWidth + c + filterWidth/2)); } } *(outputChannel+ image_y * numCols + image_x)=(unsigned char)result; } __global__ void gaussian_blur_shared_ver(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. int image_x = blockIdx.x * blockDim.x + threadIdx.x; int image_y = blockIdx.y * blockDim.y + threadIdx.y; int patch_x = threadIdx.x; int patch_y = threadIdx.y; if (image_x >= numCols || image_y >= numRows) return; int shift = filterWidth / 2; //load input to shared memory extern __shared__ unsigned char pixels[]; int PATCH_COLS = TB_DIM_X + shift * 2; pixels[(patch_y + shift) * PATCH_COLS + patch_x + shift] = *(inputChannel+ image_y * numCols +image_x); //sync within thread block __syncthreads(); //assume filter is a square matrix float result = 0; for (int r = -1 * shift; r <= shift; ++r) { for (int c = -1 * shift; c <= shift; ++c) { result = result + pixels[(patch_y + shift + r) * PATCH_COLS + patch_x + shift +c] * (* (filter + (r + shift) * filterWidth + c + shift)); } } ; *(outputChannel+ image_y * numCols + image_x)=(unsigned char)result; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < numCols && y < numRows){ const uchar4* sweetspot = inputImageRGBA + x + y * numCols; *(redChannel + x + y * numCols) = sweetspot->x; *(greenChannel + x + y * numCols) = sweetspot->y; *(blueChannel + x + y * numCols) = sweetspot->z; } } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(TB_DIM_X, TB_DIM_Y); //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. int GRID_DIM_X = (numCols-1) / TB_DIM_X + 1; int GRID_DIM_Y = (numRows-1) / TB_DIM_Y + 1; const dim3 gridSize(GRID_DIM_X, GRID_DIM_Y); //Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //Call your convolution kernel here 3 times, once for each color channel. hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // TODO: test shared memory // int sharedMemSize = sizeof(unsigned char) * (TB_DIM_Y + filterWidth / 2 * 2) * (TB_DIM_X + filterWidth / 2 * 2); // gaussian_blur_shared_ver<<<gridSize, blockSize, sharedMemSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); // gaussian_blur_shared_ver<<<gridSize, blockSize, sharedMemSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); // gaussian_blur_shared_ver<<<gridSize, blockSize, sharedMemSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); }
3c096bb8913b67a3163983b2fd67e7b1fe031c69.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" const int TB_DIM_X = 16; const int TB_DIM_Y = 16; __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. int image_x = blockIdx.x * blockDim.x + threadIdx.x; int image_y = blockIdx.y * blockDim.y + threadIdx.y; if (image_x >= numCols || image_y >= numRows) return; //assume filter is a square matrix float result = 0; unsigned char patch_pixel; int clamp_x, clamp_y; for (int r = -filterWidth / 2; r <= filterWidth / 2; ++r) { for (int c = -filterWidth / 2; c <= filterWidth / 2; ++c) { clamp_x = min( max(image_x + c, 0) , numCols -1); clamp_y = min( max(image_y + r, 0) , numRows -1); patch_pixel = *(inputChannel + clamp_y * numCols + clamp_x); result = result + patch_pixel * (*(filter+ (r + filterWidth/2) * filterWidth + c + filterWidth/2)); } } *(outputChannel+ image_y * numCols + image_x)=(unsigned char)result; } __global__ void gaussian_blur_shared_ver(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. int image_x = blockIdx.x * blockDim.x + threadIdx.x; int image_y = blockIdx.y * blockDim.y + threadIdx.y; int patch_x = threadIdx.x; int patch_y = threadIdx.y; if (image_x >= numCols || image_y >= numRows) return; int shift = filterWidth / 2; //load input to shared memory extern __shared__ unsigned char pixels[]; int PATCH_COLS = TB_DIM_X + shift * 2; pixels[(patch_y + shift) * PATCH_COLS + patch_x + shift] = *(inputChannel+ image_y * numCols +image_x); //sync within thread block __syncthreads(); //assume filter is a square matrix float result = 0; for (int r = -1 * shift; r <= shift; ++r) { for (int c = -1 * shift; c <= shift; ++c) { result = result + pixels[(patch_y + shift + r) * PATCH_COLS + patch_x + shift +c] * (* (filter + (r + shift) * filterWidth + c + shift)); } } ; *(outputChannel+ image_y * numCols + image_x)=(unsigned char)result; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < numCols && y < numRows){ const uchar4* sweetspot = inputImageRGBA + x + y * numCols; *(redChannel + x + y * numCols) = sweetspot->x; *(greenChannel + x + y * numCols) = sweetspot->y; *(blueChannel + x + y * numCols) = sweetspot->z; } } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(TB_DIM_X, TB_DIM_Y); //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. int GRID_DIM_X = (numCols-1) / TB_DIM_X + 1; int GRID_DIM_Y = (numRows-1) / TB_DIM_Y + 1; const dim3 gridSize(GRID_DIM_X, GRID_DIM_Y); //Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //Call your convolution kernel here 3 times, once for each color channel. gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // TODO: test shared memory // int sharedMemSize = sizeof(unsigned char) * (TB_DIM_Y + filterWidth / 2 * 2) * (TB_DIM_X + filterWidth / 2 * 2); // gaussian_blur_shared_ver<<<gridSize, blockSize, sharedMemSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); // gaussian_blur_shared_ver<<<gridSize, blockSize, sharedMemSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); // gaussian_blur_shared_ver<<<gridSize, blockSize, sharedMemSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); }
1ade806f8f7de24764c10b5a2e3614d103fb4e04.hip
// !!! This is a file automatically generated by hipify!!! #include "GPUExpEvaluator.h" /** A boolean whether to use linear interpolation to compute exponentials */ __constant__ bool interpolate[1]; /** The maximum allowable optical length represented in the table */ __constant__ FP_PRECISION max_optical_length[1]; /** The inverse spacing for the exponential linear interpolation table */ __constant__ FP_PRECISION inverse_exp_table_spacing[1]; /** An array for the sines of the polar angle in the polar Quadrature set */ extern __constant__ FP_PRECISION sin_thetas[MAX_POLAR_ANGLES_GPU]; /** Twice the number of polar angles */ extern __constant__ int num_polar[1]; /** * @brief Given a pointer to an ExpEvaluator on the host and a * GPUExpEvaluator on the GPU, copy all of the properties from * the ExpEvaluator object on the host to the GPU. * @details This routine is called by the GPUSolver::initializeExpEvaluator() * private class method and is not intended to be called directly. * @param eavluator_h pointer to a ExpEvaluator on the host * @param evaluator_d pointer to a GPUExpEvaluator on the GPU */ void clone_exp_evaluator(ExpEvaluator* evaluator_h, GPUExpEvaluator* evaluator_d) { /* Copy a boolean indicating whether or not to use the linear interpolation * table or the exp intrinsic function to constant memory on the device */ bool interpolate_exp = evaluator_h->isUsingInterpolation(); hipMemcpyToSymbol(interpolate, (void*)&interpolate_exp, sizeof(bool), 0, hipMemcpyHostToDevice); if (evaluator_h->isUsingInterpolation()) { /* Copy inverse table spacing to constant memory on the device */ FP_PRECISION inverse_spacing_h = 1.0 / evaluator_h->getTableSpacing(); hipMemcpyToSymbol(inverse_exp_table_spacing, (void*)&inverse_spacing_h, sizeof(FP_PRECISION), 0, hipMemcpyHostToDevice); /* Copy the number of table entries to constant memory on the device */ FP_PRECISION max_optical_length_h = evaluator_h->getMaxOpticalLength(); hipMemcpyToSymbol(max_optical_length, (void*)&max_optical_length_h, sizeof(FP_PRECISION), 0, hipMemcpyHostToDevice); /* Allocate memory for the interpolation table on the device */ int exp_table_size_h = evaluator_h->getTableSize(); FP_PRECISION* exp_table_h = evaluator_h->getExpTable(); FP_PRECISION* exp_table_d; hipMalloc((void**)&exp_table_d, exp_table_size_h * sizeof(FP_PRECISION)); hipMemcpy((void*)exp_table_d, (void*)exp_table_h, exp_table_size_h * sizeof(FP_PRECISION), hipMemcpyHostToDevice); hipMemcpy((void*)&evaluator_d->_exp_table, (void*)&exp_table_d, sizeof(FP_PRECISION*), hipMemcpyHostToDevice); } return; } /** * @brief Computes the exponential term for a optical length and polar angle. * @details This method computes \f$ 1 - exp(-\tau/sin(\theta_p)) \f$ * for some optical path length and polar angle. This method * uses either a linear interpolation table (default) or the * exponential intrinsic exp(...) function. * @param tau the optical path length (e.g., sigma_t times length) * @param polar the polar angle index * @return the evaluated exponential */ __device__ FP_PRECISION GPUExpEvaluator::computeExponential(FP_PRECISION tau, int polar) { FP_PRECISION exponential; /* Evaluate the exponential using the linear interpolation table */ if (*interpolate) { tau = min(tau, (*max_optical_length)); int index = floor(tau * (*inverse_exp_table_spacing)); index *= (*num_polar); exponential = (1. - (_exp_table[index + 2 * polar] * tau + _exp_table[index + 2 * polar +1])); } /* Evalute the exponential using the intrinsic exp(...) function */ else { #ifdef SINGLE exponential = 1.0 - __expf(- tau / sin_thetas[polar]); #else exponential = 1.0 - exp(- tau / sin_thetas[polar]); #endif } return exponential; }
1ade806f8f7de24764c10b5a2e3614d103fb4e04.cu
#include "GPUExpEvaluator.h" /** A boolean whether to use linear interpolation to compute exponentials */ __constant__ bool interpolate[1]; /** The maximum allowable optical length represented in the table */ __constant__ FP_PRECISION max_optical_length[1]; /** The inverse spacing for the exponential linear interpolation table */ __constant__ FP_PRECISION inverse_exp_table_spacing[1]; /** An array for the sines of the polar angle in the polar Quadrature set */ extern __constant__ FP_PRECISION sin_thetas[MAX_POLAR_ANGLES_GPU]; /** Twice the number of polar angles */ extern __constant__ int num_polar[1]; /** * @brief Given a pointer to an ExpEvaluator on the host and a * GPUExpEvaluator on the GPU, copy all of the properties from * the ExpEvaluator object on the host to the GPU. * @details This routine is called by the GPUSolver::initializeExpEvaluator() * private class method and is not intended to be called directly. * @param eavluator_h pointer to a ExpEvaluator on the host * @param evaluator_d pointer to a GPUExpEvaluator on the GPU */ void clone_exp_evaluator(ExpEvaluator* evaluator_h, GPUExpEvaluator* evaluator_d) { /* Copy a boolean indicating whether or not to use the linear interpolation * table or the exp intrinsic function to constant memory on the device */ bool interpolate_exp = evaluator_h->isUsingInterpolation(); cudaMemcpyToSymbol(interpolate, (void*)&interpolate_exp, sizeof(bool), 0, cudaMemcpyHostToDevice); if (evaluator_h->isUsingInterpolation()) { /* Copy inverse table spacing to constant memory on the device */ FP_PRECISION inverse_spacing_h = 1.0 / evaluator_h->getTableSpacing(); cudaMemcpyToSymbol(inverse_exp_table_spacing, (void*)&inverse_spacing_h, sizeof(FP_PRECISION), 0, cudaMemcpyHostToDevice); /* Copy the number of table entries to constant memory on the device */ FP_PRECISION max_optical_length_h = evaluator_h->getMaxOpticalLength(); cudaMemcpyToSymbol(max_optical_length, (void*)&max_optical_length_h, sizeof(FP_PRECISION), 0, cudaMemcpyHostToDevice); /* Allocate memory for the interpolation table on the device */ int exp_table_size_h = evaluator_h->getTableSize(); FP_PRECISION* exp_table_h = evaluator_h->getExpTable(); FP_PRECISION* exp_table_d; cudaMalloc((void**)&exp_table_d, exp_table_size_h * sizeof(FP_PRECISION)); cudaMemcpy((void*)exp_table_d, (void*)exp_table_h, exp_table_size_h * sizeof(FP_PRECISION), cudaMemcpyHostToDevice); cudaMemcpy((void*)&evaluator_d->_exp_table, (void*)&exp_table_d, sizeof(FP_PRECISION*), cudaMemcpyHostToDevice); } return; } /** * @brief Computes the exponential term for a optical length and polar angle. * @details This method computes \f$ 1 - exp(-\tau/sin(\theta_p)) \f$ * for some optical path length and polar angle. This method * uses either a linear interpolation table (default) or the * exponential intrinsic exp(...) function. * @param tau the optical path length (e.g., sigma_t times length) * @param polar the polar angle index * @return the evaluated exponential */ __device__ FP_PRECISION GPUExpEvaluator::computeExponential(FP_PRECISION tau, int polar) { FP_PRECISION exponential; /* Evaluate the exponential using the linear interpolation table */ if (*interpolate) { tau = min(tau, (*max_optical_length)); int index = floor(tau * (*inverse_exp_table_spacing)); index *= (*num_polar); exponential = (1. - (_exp_table[index + 2 * polar] * tau + _exp_table[index + 2 * polar +1])); } /* Evalute the exponential using the intrinsic exp(...) function */ else { #ifdef SINGLE exponential = 1.0 - __expf(- tau / sin_thetas[polar]); #else exponential = 1.0 - exp(- tau / sin_thetas[polar]); #endif } return exponential; }
d5d74f443bcba7297adcc85335fa97918aafd769.hip
// !!! This is a file automatically generated by hipify!!! /** * Sample Program for CUDA 2.3 * written by M.Saito ([email protected]) * * This sample uses texture reference. * The generation speed of PRNG using texture is faster than using * constant table on Geforce GTX 260. * * MTGP32-11213 * This program generates 32-bit unsigned integers. * The period of generated integers is 2<sup>11213</sup>-1. * This also generates single precision floating point numbers. */ #define __STDC_FORMAT_MACROS 1 #define __STDC_CONSTANT_MACROS 1 #include <stdio.h> #include <hip/hip_runtime.h> #include <shoverand/util/myCutil.h> #include <stdint.h> #include <inttypes.h> #include <errno.h> #include <stdlib.h> extern "C" { #include "mtgp32-fast.h" #include "mtgp32dc-param-11213.c" } #define MEXP 11213 #define N MTGPDC_N #define THREAD_NUM MTGPDC_FLOOR_2P #define LARGE_SIZE (N + THREAD_NUM) //#define LARGE_SIZE 256 //#define BLOCK_NUM 32 #define BLOCK_NUM_MAX 200 #define TBL_SIZE 16 /** * kernel I/O * This structure must be initialized before first use. */ struct mtgp32_kernel_status_t { uint32_t status[N]; }; texture<uint32_t, 1, hipReadModeElementType> tex_param_ref; texture<uint32_t, 1, hipReadModeElementType> tex_temper_ref; texture<uint32_t, 1, hipReadModeElementType> tex_single_ref; /* * Generator Parameters. */ __constant__ uint32_t pos_tbl[BLOCK_NUM_MAX]; __constant__ uint32_t sh1_tbl[BLOCK_NUM_MAX]; __constant__ uint32_t sh2_tbl[BLOCK_NUM_MAX]; /* high_mask and low_mask should be set by make_constant(), but * did not work. */ //__constant__ uint32_t mask = 0xff800000; __constant__ uint32_t mask[1]; /** * Shared memory * The generator's internal status vector. */ __shared__ uint32_t status[LARGE_SIZE]; /** * The function of the recursion formula calculation. * * @param[in] X1 the farthest part of state array. * @param[in] X2 the second farthest part of state array. * @param[in] Y a part of state array. * @param[in] bid block id. * @return output */ __device__ uint32_t para_rec(uint32_t X1, uint32_t X2, uint32_t Y, int bid) { uint32_t X = (X1 & mask[0]) ^ X2; uint32_t MAT; X ^= X << sh1_tbl[bid]; Y = X ^ (Y >> sh2_tbl[bid]); MAT = tex1Dfetch(tex_param_ref, bid * 16 + (Y & 0x0f)); return Y ^ MAT; } /** * The tempering function. * * @param[in] V the output value should be tempered. * @param[in] T the tempering helper value. * @param[in] bid block id. * @return the tempered value. */ __device__ uint32_t temper(uint32_t V, uint32_t T, int bid) { uint32_t MAT; T ^= T >> 16; T ^= T >> 8; MAT = tex1Dfetch(tex_temper_ref, bid * 16 + (T & 0x0f)); return V ^ MAT; } /** * The tempering and converting function. * By using the preset-ted table, converting to IEEE format * and tempering are done simultaneously. * Resulted outputs are distributed in the range [1, 2). * * @param[in] V the output value should be tempered. * @param[in] T the tempering helper value. * @param[in] bid block id. * @return the tempered and converted value. */ __device__ float temper_single(uint32_t V, uint32_t T, int bid) { uint32_t MAT; uint32_t r; T ^= T >> 16; T ^= T >> 8; MAT = tex1Dfetch(tex_single_ref, bid * 16 + (T & 0x0f)); r = (V >> 9) ^ MAT; return __int_as_float(r); } /** * The tempering and converting function. * By using the preset-ted table, converting to IEEE format * and tempering are done simultaneously. * Resulted outputs are distributed in the range [0, 1). * * @param[in] V the output value should be tempered. * @param[in] T the tempering helper value. * @param[in] bid block id. * @return the tempered and converted value. */ __device__ float temper_single01(uint32_t V, uint32_t T, int bid) { uint32_t MAT; uint32_t r; T ^= T >> 16; T ^= T >> 8; MAT = tex1Dfetch(tex_single_ref, bid * 16 + (T & 0x0f)); r = (V >> 9) ^ MAT; return __int_as_float(r) - 1.0f; } /** * Read the internal state vector from kernel I/O data, and * put them into shared memory. * * @param[out] status shared memory. * @param[in] d_status kernel I/O data * @param[in] bid block id * @param[in] tid thread id */ __device__ void status_read(uint32_t status[LARGE_SIZE], const mtgp32_kernel_status_t *d_status, int bid, int tid) { status[tid] = d_status[bid].status[tid]; if (tid < N - THREAD_NUM) { status[THREAD_NUM + tid] = d_status[bid].status[THREAD_NUM + tid]; } __syncthreads(); } /** * Read the internal state vector from shared memory, and * write them into kernel I/O data. * * @param[out] d_status kernel I/O data * @param[in] status shared memory. * @param[in] bid block id * @param[in] tid thread id */ __device__ void status_write(mtgp32_kernel_status_t *d_status, const uint32_t status[LARGE_SIZE], int bid, int tid, int index) { d_status[bid].status[tid] = status[index % LARGE_SIZE]; if (tid < N - THREAD_NUM) { d_status[bid].status[THREAD_NUM + tid] = status[(THREAD_NUM + index) % LARGE_SIZE]; } __syncthreads(); } /** * kernel function. * This function generates 32-bit unsigned integers in d_data * * @params[in,out] d_status kernel I/O data * @params[out] d_data output * @params[in] size number of output data requested. */ __global__ void mtgp32_uint32_kernel(mtgp32_kernel_status_t* d_status, uint32_t* d_data, int size) { const int bid = blockIdx.x; const int tid = threadIdx.x; int pos = pos_tbl[bid]; int index = tid; uint32_t r; uint32_t o; // copy status data from global memory to shared memory. status_read(status, d_status, bid, tid); // main loop for (int i = 0; i < size; i += THREAD_NUM) { r = para_rec(status[index], status[(index + 1) % LARGE_SIZE], status[(index + pos) % LARGE_SIZE], bid); status[(index + N) % LARGE_SIZE] = r; o = temper(r, status[(index + pos - 1) % LARGE_SIZE], bid); d_data[size * bid + i + tid] = o; __syncthreads(); index = (index + THREAD_NUM) % LARGE_SIZE; } // write back status for next call status_write(d_status, status, bid, tid, index); } /** * kernel function. * This function generates single precision floating point numbers in d_data. * * @params[in,out] d_status kernel I/O data * @params[out] d_data output. IEEE single precision format. * @params[in] size number of output data requested. */ __global__ void mtgp32_single_kernel(mtgp32_kernel_status_t* d_status, float* d_data, int size) { const int bid = blockIdx.x; const int tid = threadIdx.x; int pos = pos_tbl[bid]; int index = tid; uint32_t r; float o; // copy status data from global memory to shared memory. status_read(status, d_status, bid, tid); #if defined(DEBUG) && defined(__DEVICE_EMULATION__) printf("status[0]:%08x\n", status[0]); printf("status[1]:%08x\n", status[1]); #endif // main loop for (int i = 0; i < size; i += THREAD_NUM) { r = para_rec(status[index], status[(index + 1) % LARGE_SIZE], status[(index + pos) % LARGE_SIZE], bid); status[(index + N) % LARGE_SIZE] = r; o = temper_single(r, status[(index + pos - 1) % LARGE_SIZE], bid); d_data[size * bid + i + tid] = o; __syncthreads(); index = (index + THREAD_NUM) % LARGE_SIZE; } // write back status for next call status_write(d_status, status, bid, tid, index); } /** * kernel function. * This function generates single precision floating point numbers in d_data. * * @params[in,out] d_status kernel I/O data * @params[out] d_data output. IEEE single precision format. * @params[in] size number of output data requested. */ __global__ void mtgp32_single01_kernel(mtgp32_kernel_status_t* d_status, float* d_data, int size) { const int bid = blockIdx.x; const int tid = threadIdx.x; int pos = pos_tbl[bid]; int index = tid; uint32_t r; float o; // copy status data from global memory to shared memory. status_read(status, d_status, bid, tid); // main loop for (int i = 0; i < size; i += THREAD_NUM) { r = para_rec(status[index], status[(index + 1) % LARGE_SIZE], status[(index + pos) % LARGE_SIZE], bid); status[(index + N) % LARGE_SIZE] = r; o = temper_single01(r, status[(index + pos - 1) % LARGE_SIZE], bid); d_data[size * bid + i + tid] = o; __syncthreads(); index = (index + THREAD_NUM) % LARGE_SIZE; } // write back status for next call status_write(d_status, status, bid, tid, index); } #include "mtgp-cuda-common.c" #include "mtgp32-cuda-common.c" /** * This function sets constants in device memory. * @param params input, MTGP32 parameters. */ void make_constant_param(const mtgp32_params_fast_t params[], int block_num) { const int size1 = sizeof(uint32_t) * block_num; uint32_t *h_pos_tbl; uint32_t *h_sh1_tbl; uint32_t *h_sh2_tbl; uint32_t *h_mask; h_pos_tbl = (uint32_t *)malloc(size1); h_sh1_tbl = (uint32_t *)malloc(size1); h_sh2_tbl = (uint32_t *)malloc(size1); h_mask = (uint32_t *)malloc(sizeof(uint32_t)); if (h_pos_tbl == NULL || h_sh1_tbl == NULL || h_sh2_tbl == NULL || h_mask == NULL ) { printf("failure in allocating host memory for constant table.\n"); exit(1); } h_mask[0] = params[0].mask; for (int i = 0; i < block_num; i++) { h_pos_tbl[i] = params[i].pos; h_sh1_tbl[i] = params[i].sh1; h_sh2_tbl[i] = params[i].sh2; } // copy from malloc area only myCutilSafeCall(hipMemcpyToSymbol(pos_tbl, h_pos_tbl, size1)); myCutilSafeCall(hipMemcpyToSymbol(sh1_tbl, h_sh1_tbl, size1)); myCutilSafeCall(hipMemcpyToSymbol(sh2_tbl, h_sh2_tbl, size1)); myCutilSafeCall(hipMemcpyToSymbol(mask, h_mask, sizeof(uint32_t))); free(h_pos_tbl); free(h_sh1_tbl); free(h_sh2_tbl); free(h_mask); } /** * This function sets texture lookup table. * @param params input, MTGP32 parameters. * @param d_texture_tbl device memory used for texture bind * @param block_num block number used for kernel call */ void make_texture(const mtgp32_params_fast_t params[], uint32_t *d_texture_tbl[3], int block_num) { const int count = block_num * TBL_SIZE; const int size = sizeof(uint32_t) * count; uint32_t *h_texture_tbl[3]; int i, j; for (i = 0; i < 3; i++) { h_texture_tbl[i] = (uint32_t *)malloc(size); if (h_texture_tbl[i] == NULL) { for (j = 0; j < i; j++) { free(h_texture_tbl[i]); } printf("failure in allocating host memory for constant table.\n"); exit(1); } } for (int i = 0; i < block_num; i++) { for (int j = 0; j < TBL_SIZE; j++) { h_texture_tbl[0][i * TBL_SIZE + j] = params[i].tbl[j]; h_texture_tbl[1][i * TBL_SIZE + j] = params[i].tmp_tbl[j]; h_texture_tbl[2][i * TBL_SIZE + j] = params[i].flt_tmp_tbl[j]; } } myCutilSafeCall(hipMemcpy(d_texture_tbl[0], h_texture_tbl[0], size, hipMemcpyHostToDevice)); myCutilSafeCall(hipMemcpy(d_texture_tbl[1], h_texture_tbl[1], size, hipMemcpyHostToDevice)); myCutilSafeCall(hipMemcpy(d_texture_tbl[2], h_texture_tbl[2], size, hipMemcpyHostToDevice)); tex_param_ref.filterMode = hipFilterModePoint; tex_temper_ref.filterMode = hipFilterModePoint; tex_single_ref.filterMode = hipFilterModePoint; myCutilSafeCall(hipBindTexture(0, tex_param_ref, d_texture_tbl[0], size)); myCutilSafeCall(hipBindTexture(0, tex_temper_ref, d_texture_tbl[1], size)); myCutilSafeCall(hipBindTexture(0, tex_single_ref, d_texture_tbl[2], size)); free(h_texture_tbl[0]); free(h_texture_tbl[1]); free(h_texture_tbl[2]); } /** * host function. * This function calls corresponding kernel function. * * @param d_status kernel I/O data. * @param num_data number of data to be generated. */ void make_uint32_random(mtgp32_kernel_status_t* d_status, int num_data, int block_num) { uint32_t* d_data; unsigned int timer = 0; uint32_t* h_data; hipError_t e; float gputime; printf("generating 32-bit unsigned random numbers.\n"); myCutilSafeCall(hipMalloc((void**)&d_data, sizeof(uint32_t) * num_data)); CUT_SAFE_CALL(cutCreateTimer(&timer)); h_data = (uint32_t *) malloc(sizeof(uint32_t) * num_data); if (h_data == NULL) { printf("failure in allocating host memory for output data.\n"); exit(1); } CUT_SAFE_CALL(cutStartTimer(timer)); if (hipGetLastError() != hipSuccess) { printf("error has been occured before kernel call.\n"); exit(1); } /* kernel call */ hipLaunchKernelGGL(( mtgp32_uint32_kernel), dim3(block_num), dim3(THREAD_NUM), 0, 0, d_status, d_data, num_data / block_num); hipDeviceSynchronize(); e = hipGetLastError(); if (e != hipSuccess) { printf("failure in kernel call.\n%s\n", hipGetErrorString(e)); exit(1); } CUT_SAFE_CALL(cutStopTimer(timer)); myCutilSafeCall( hipMemcpy(h_data, d_data, sizeof(uint32_t) * num_data, hipMemcpyDeviceToHost)); gputime = cutGetTimerValue(timer); print_uint32_array(h_data, num_data, block_num); printf("generated numbers: %d\n", num_data); printf("Processing time: %f (ms)\n", gputime); printf("Samples per second: %E \n", num_data / (gputime * 0.001)); CUT_SAFE_CALL(cutDeleteTimer(timer)); //free memories free(h_data); myCutilSafeCall(hipFree(d_data)); } /** * host function. * This function calls corresponding kernel function. * * @param d_status kernel I/O data. * @param num_data number of data to be generated. */ void make_single_random(mtgp32_kernel_status_t* d_status, int num_data, int block_num) { float* d_data; unsigned int timer = 0; float* h_data; hipError_t e; float gputime; printf("generating single precision floating point random numbers.\n"); myCutilSafeCall(hipMalloc((void**)&d_data, sizeof(float) * num_data)); CUT_SAFE_CALL(cutCreateTimer(&timer)); h_data = (float *) malloc(sizeof(float) * num_data); if (h_data == NULL) { printf("failure in allocating host memory for output data.\n"); exit(1); } CUT_SAFE_CALL(cutStartTimer(timer)); if (hipGetLastError() != hipSuccess) { printf("error has been occured before kernel call.\n"); exit(1); } /* kernel call */ hipLaunchKernelGGL(( mtgp32_single_kernel), dim3(block_num), dim3(THREAD_NUM) , 0, 0, d_status, d_data, num_data / block_num); hipDeviceSynchronize(); e = hipGetLastError(); if (e != hipSuccess) { printf("failure in kernel call.\n%s\n", hipGetErrorString(e)); exit(1); } CUT_SAFE_CALL(cutStopTimer(timer)); myCutilSafeCall( hipMemcpy(h_data, d_data, sizeof(float) * num_data, hipMemcpyDeviceToHost)); gputime = cutGetTimerValue(timer); print_float_array(h_data, num_data, block_num); printf("generated numbers: %d\n", num_data); printf("Processing time: %f (ms)\n", gputime); printf("Samples per second: %E \n", num_data / (gputime * 0.001)); CUT_SAFE_CALL(cutDeleteTimer(timer)); //free memories free(h_data); myCutilSafeCall(hipFree(d_data)); } /** * host function. * This function calls corresponding kernel function. * * @param d_status kernel I/O data. * @param num_data number of data to be generated. */ void make_single01_random(mtgp32_kernel_status_t* d_status, int num_data, int block_num) { float* d_data; unsigned int timer = 0; float* h_data; hipError_t e; float gputime; printf("generating single precision floating point random numbers.\n"); myCutilSafeCall(hipMalloc((void**)&d_data, sizeof(float) * num_data)); CUT_SAFE_CALL(cutCreateTimer(&timer)); h_data = (float *) malloc(sizeof(float) * num_data); if (h_data == NULL) { printf("failure in allocating host memory for output data.\n"); exit(1); } CUT_SAFE_CALL(cutStartTimer(timer)); if (hipGetLastError() != hipSuccess) { printf("error has been occured before kernel call.\n"); exit(1); } /* kernel call */ hipLaunchKernelGGL(( mtgp32_single01_kernel), dim3(block_num), dim3(THREAD_NUM) , 0, 0, d_status, d_data, num_data / block_num); hipDeviceSynchronize(); e = hipGetLastError(); if (e != hipSuccess) { printf("failure in kernel call.\n%s\n", hipGetErrorString(e)); exit(1); } CUT_SAFE_CALL(cutStopTimer(timer)); myCutilSafeCall( hipMemcpy(h_data, d_data, sizeof(float) * num_data, hipMemcpyDeviceToHost)); gputime = cutGetTimerValue(timer); print_float_array(h_data, num_data, block_num); printf("generated numbers: %d\n", num_data); printf("Processing time: %f (ms)\n", gputime); printf("Samples per second: %E \n", num_data / (gputime * 0.001)); CUT_SAFE_CALL(cutDeleteTimer(timer)); //free memories free(h_data); myCutilSafeCall(hipFree(d_data)); } int main(int argc, char *argv[]) { // LARGE_SIZE is a multiple of 16 int num_data = 10000000; int block_num; int num_unit; int r; mtgp32_kernel_status_t *d_status; uint32_t *d_texture[3]; if (argc >= 2) { errno = 0; block_num = strtol(argv[1], NULL, 10); if (errno) { printf("%s number_of_block number_of_output\n", argv[0]); return 1; } if (block_num < 1 || block_num > BLOCK_NUM_MAX) { printf("%s block_num should be between 1 and %d\n", argv[0], BLOCK_NUM_MAX); return 1; } errno = 0; num_data = strtol(argv[2], NULL, 10); if (errno) { printf("%s number_of_block number_of_output\n", argv[0]); return 1; } argc -= 2; argv += 2; } else { CUT_DEVICE_INIT(argc, argv); printf("%s number_of_block number_of_output\n", argv[0]); block_num = get_suitable_block_num(sizeof(uint32_t), THREAD_NUM, LARGE_SIZE); if (block_num <= 0) { printf("can't calculate sutable number of blocks.\n"); return 1; } printf("the suitable number of blocks for device 0 " "will be multiple of %d\n", block_num); return 1; } CUT_DEVICE_INIT(argc, argv); num_unit = THREAD_NUM * 3 * block_num; myCutilSafeCall(hipMalloc((void**)&d_status, sizeof(mtgp32_kernel_status_t) * block_num)); myCutilSafeCall(hipMalloc((void**)&d_texture[0], sizeof(uint32_t) * block_num * TBL_SIZE)); myCutilSafeCall(hipMalloc((void**)&d_texture[1], sizeof(uint32_t) * block_num * TBL_SIZE)); myCutilSafeCall(hipMalloc((void**)&d_texture[2], sizeof(uint32_t) * block_num * TBL_SIZE)); r = num_data % num_unit; if (r != 0) { num_data = num_data + num_unit - r; } make_constant_param(MTGPDC_PARAM_TABLE, block_num); make_texture(MTGPDC_PARAM_TABLE, d_texture, block_num); make_kernel_data(d_status, MTGPDC_PARAM_TABLE, block_num); make_uint32_random(d_status, num_data, block_num); make_single_random(d_status, num_data, block_num); make_single01_random(d_status, num_data, block_num); //finalize myCutilSafeCall(hipFree(d_status)); myCutilSafeCall(hipFree(d_texture[0])); myCutilSafeCall(hipFree(d_texture[1])); myCutilSafeCall(hipFree(d_texture[2])); #ifdef NEED_PROMPT CUT_EXIT(argc, argv); #endif }
d5d74f443bcba7297adcc85335fa97918aafd769.cu
/** * Sample Program for CUDA 2.3 * written by M.Saito ([email protected]) * * This sample uses texture reference. * The generation speed of PRNG using texture is faster than using * constant table on Geforce GTX 260. * * MTGP32-11213 * This program generates 32-bit unsigned integers. * The period of generated integers is 2<sup>11213</sup>-1. * This also generates single precision floating point numbers. */ #define __STDC_FORMAT_MACROS 1 #define __STDC_CONSTANT_MACROS 1 #include <stdio.h> #include <cuda.h> #include <shoverand/util/myCutil.h> #include <stdint.h> #include <inttypes.h> #include <errno.h> #include <stdlib.h> extern "C" { #include "mtgp32-fast.h" #include "mtgp32dc-param-11213.c" } #define MEXP 11213 #define N MTGPDC_N #define THREAD_NUM MTGPDC_FLOOR_2P #define LARGE_SIZE (N + THREAD_NUM) //#define LARGE_SIZE 256 //#define BLOCK_NUM 32 #define BLOCK_NUM_MAX 200 #define TBL_SIZE 16 /** * kernel I/O * This structure must be initialized before first use. */ struct mtgp32_kernel_status_t { uint32_t status[N]; }; texture<uint32_t, 1, cudaReadModeElementType> tex_param_ref; texture<uint32_t, 1, cudaReadModeElementType> tex_temper_ref; texture<uint32_t, 1, cudaReadModeElementType> tex_single_ref; /* * Generator Parameters. */ __constant__ uint32_t pos_tbl[BLOCK_NUM_MAX]; __constant__ uint32_t sh1_tbl[BLOCK_NUM_MAX]; __constant__ uint32_t sh2_tbl[BLOCK_NUM_MAX]; /* high_mask and low_mask should be set by make_constant(), but * did not work. */ //__constant__ uint32_t mask = 0xff800000; __constant__ uint32_t mask[1]; /** * Shared memory * The generator's internal status vector. */ __shared__ uint32_t status[LARGE_SIZE]; /** * The function of the recursion formula calculation. * * @param[in] X1 the farthest part of state array. * @param[in] X2 the second farthest part of state array. * @param[in] Y a part of state array. * @param[in] bid block id. * @return output */ __device__ uint32_t para_rec(uint32_t X1, uint32_t X2, uint32_t Y, int bid) { uint32_t X = (X1 & mask[0]) ^ X2; uint32_t MAT; X ^= X << sh1_tbl[bid]; Y = X ^ (Y >> sh2_tbl[bid]); MAT = tex1Dfetch(tex_param_ref, bid * 16 + (Y & 0x0f)); return Y ^ MAT; } /** * The tempering function. * * @param[in] V the output value should be tempered. * @param[in] T the tempering helper value. * @param[in] bid block id. * @return the tempered value. */ __device__ uint32_t temper(uint32_t V, uint32_t T, int bid) { uint32_t MAT; T ^= T >> 16; T ^= T >> 8; MAT = tex1Dfetch(tex_temper_ref, bid * 16 + (T & 0x0f)); return V ^ MAT; } /** * The tempering and converting function. * By using the preset-ted table, converting to IEEE format * and tempering are done simultaneously. * Resulted outputs are distributed in the range [1, 2). * * @param[in] V the output value should be tempered. * @param[in] T the tempering helper value. * @param[in] bid block id. * @return the tempered and converted value. */ __device__ float temper_single(uint32_t V, uint32_t T, int bid) { uint32_t MAT; uint32_t r; T ^= T >> 16; T ^= T >> 8; MAT = tex1Dfetch(tex_single_ref, bid * 16 + (T & 0x0f)); r = (V >> 9) ^ MAT; return __int_as_float(r); } /** * The tempering and converting function. * By using the preset-ted table, converting to IEEE format * and tempering are done simultaneously. * Resulted outputs are distributed in the range [0, 1). * * @param[in] V the output value should be tempered. * @param[in] T the tempering helper value. * @param[in] bid block id. * @return the tempered and converted value. */ __device__ float temper_single01(uint32_t V, uint32_t T, int bid) { uint32_t MAT; uint32_t r; T ^= T >> 16; T ^= T >> 8; MAT = tex1Dfetch(tex_single_ref, bid * 16 + (T & 0x0f)); r = (V >> 9) ^ MAT; return __int_as_float(r) - 1.0f; } /** * Read the internal state vector from kernel I/O data, and * put them into shared memory. * * @param[out] status shared memory. * @param[in] d_status kernel I/O data * @param[in] bid block id * @param[in] tid thread id */ __device__ void status_read(uint32_t status[LARGE_SIZE], const mtgp32_kernel_status_t *d_status, int bid, int tid) { status[tid] = d_status[bid].status[tid]; if (tid < N - THREAD_NUM) { status[THREAD_NUM + tid] = d_status[bid].status[THREAD_NUM + tid]; } __syncthreads(); } /** * Read the internal state vector from shared memory, and * write them into kernel I/O data. * * @param[out] d_status kernel I/O data * @param[in] status shared memory. * @param[in] bid block id * @param[in] tid thread id */ __device__ void status_write(mtgp32_kernel_status_t *d_status, const uint32_t status[LARGE_SIZE], int bid, int tid, int index) { d_status[bid].status[tid] = status[index % LARGE_SIZE]; if (tid < N - THREAD_NUM) { d_status[bid].status[THREAD_NUM + tid] = status[(THREAD_NUM + index) % LARGE_SIZE]; } __syncthreads(); } /** * kernel function. * This function generates 32-bit unsigned integers in d_data * * @params[in,out] d_status kernel I/O data * @params[out] d_data output * @params[in] size number of output data requested. */ __global__ void mtgp32_uint32_kernel(mtgp32_kernel_status_t* d_status, uint32_t* d_data, int size) { const int bid = blockIdx.x; const int tid = threadIdx.x; int pos = pos_tbl[bid]; int index = tid; uint32_t r; uint32_t o; // copy status data from global memory to shared memory. status_read(status, d_status, bid, tid); // main loop for (int i = 0; i < size; i += THREAD_NUM) { r = para_rec(status[index], status[(index + 1) % LARGE_SIZE], status[(index + pos) % LARGE_SIZE], bid); status[(index + N) % LARGE_SIZE] = r; o = temper(r, status[(index + pos - 1) % LARGE_SIZE], bid); d_data[size * bid + i + tid] = o; __syncthreads(); index = (index + THREAD_NUM) % LARGE_SIZE; } // write back status for next call status_write(d_status, status, bid, tid, index); } /** * kernel function. * This function generates single precision floating point numbers in d_data. * * @params[in,out] d_status kernel I/O data * @params[out] d_data output. IEEE single precision format. * @params[in] size number of output data requested. */ __global__ void mtgp32_single_kernel(mtgp32_kernel_status_t* d_status, float* d_data, int size) { const int bid = blockIdx.x; const int tid = threadIdx.x; int pos = pos_tbl[bid]; int index = tid; uint32_t r; float o; // copy status data from global memory to shared memory. status_read(status, d_status, bid, tid); #if defined(DEBUG) && defined(__DEVICE_EMULATION__) printf("status[0]:%08x\n", status[0]); printf("status[1]:%08x\n", status[1]); #endif // main loop for (int i = 0; i < size; i += THREAD_NUM) { r = para_rec(status[index], status[(index + 1) % LARGE_SIZE], status[(index + pos) % LARGE_SIZE], bid); status[(index + N) % LARGE_SIZE] = r; o = temper_single(r, status[(index + pos - 1) % LARGE_SIZE], bid); d_data[size * bid + i + tid] = o; __syncthreads(); index = (index + THREAD_NUM) % LARGE_SIZE; } // write back status for next call status_write(d_status, status, bid, tid, index); } /** * kernel function. * This function generates single precision floating point numbers in d_data. * * @params[in,out] d_status kernel I/O data * @params[out] d_data output. IEEE single precision format. * @params[in] size number of output data requested. */ __global__ void mtgp32_single01_kernel(mtgp32_kernel_status_t* d_status, float* d_data, int size) { const int bid = blockIdx.x; const int tid = threadIdx.x; int pos = pos_tbl[bid]; int index = tid; uint32_t r; float o; // copy status data from global memory to shared memory. status_read(status, d_status, bid, tid); // main loop for (int i = 0; i < size; i += THREAD_NUM) { r = para_rec(status[index], status[(index + 1) % LARGE_SIZE], status[(index + pos) % LARGE_SIZE], bid); status[(index + N) % LARGE_SIZE] = r; o = temper_single01(r, status[(index + pos - 1) % LARGE_SIZE], bid); d_data[size * bid + i + tid] = o; __syncthreads(); index = (index + THREAD_NUM) % LARGE_SIZE; } // write back status for next call status_write(d_status, status, bid, tid, index); } #include "mtgp-cuda-common.c" #include "mtgp32-cuda-common.c" /** * This function sets constants in device memory. * @param params input, MTGP32 parameters. */ void make_constant_param(const mtgp32_params_fast_t params[], int block_num) { const int size1 = sizeof(uint32_t) * block_num; uint32_t *h_pos_tbl; uint32_t *h_sh1_tbl; uint32_t *h_sh2_tbl; uint32_t *h_mask; h_pos_tbl = (uint32_t *)malloc(size1); h_sh1_tbl = (uint32_t *)malloc(size1); h_sh2_tbl = (uint32_t *)malloc(size1); h_mask = (uint32_t *)malloc(sizeof(uint32_t)); if (h_pos_tbl == NULL || h_sh1_tbl == NULL || h_sh2_tbl == NULL || h_mask == NULL ) { printf("failure in allocating host memory for constant table.\n"); exit(1); } h_mask[0] = params[0].mask; for (int i = 0; i < block_num; i++) { h_pos_tbl[i] = params[i].pos; h_sh1_tbl[i] = params[i].sh1; h_sh2_tbl[i] = params[i].sh2; } // copy from malloc area only myCutilSafeCall(cudaMemcpyToSymbol(pos_tbl, h_pos_tbl, size1)); myCutilSafeCall(cudaMemcpyToSymbol(sh1_tbl, h_sh1_tbl, size1)); myCutilSafeCall(cudaMemcpyToSymbol(sh2_tbl, h_sh2_tbl, size1)); myCutilSafeCall(cudaMemcpyToSymbol(mask, h_mask, sizeof(uint32_t))); free(h_pos_tbl); free(h_sh1_tbl); free(h_sh2_tbl); free(h_mask); } /** * This function sets texture lookup table. * @param params input, MTGP32 parameters. * @param d_texture_tbl device memory used for texture bind * @param block_num block number used for kernel call */ void make_texture(const mtgp32_params_fast_t params[], uint32_t *d_texture_tbl[3], int block_num) { const int count = block_num * TBL_SIZE; const int size = sizeof(uint32_t) * count; uint32_t *h_texture_tbl[3]; int i, j; for (i = 0; i < 3; i++) { h_texture_tbl[i] = (uint32_t *)malloc(size); if (h_texture_tbl[i] == NULL) { for (j = 0; j < i; j++) { free(h_texture_tbl[i]); } printf("failure in allocating host memory for constant table.\n"); exit(1); } } for (int i = 0; i < block_num; i++) { for (int j = 0; j < TBL_SIZE; j++) { h_texture_tbl[0][i * TBL_SIZE + j] = params[i].tbl[j]; h_texture_tbl[1][i * TBL_SIZE + j] = params[i].tmp_tbl[j]; h_texture_tbl[2][i * TBL_SIZE + j] = params[i].flt_tmp_tbl[j]; } } myCutilSafeCall(cudaMemcpy(d_texture_tbl[0], h_texture_tbl[0], size, cudaMemcpyHostToDevice)); myCutilSafeCall(cudaMemcpy(d_texture_tbl[1], h_texture_tbl[1], size, cudaMemcpyHostToDevice)); myCutilSafeCall(cudaMemcpy(d_texture_tbl[2], h_texture_tbl[2], size, cudaMemcpyHostToDevice)); tex_param_ref.filterMode = cudaFilterModePoint; tex_temper_ref.filterMode = cudaFilterModePoint; tex_single_ref.filterMode = cudaFilterModePoint; myCutilSafeCall(cudaBindTexture(0, tex_param_ref, d_texture_tbl[0], size)); myCutilSafeCall(cudaBindTexture(0, tex_temper_ref, d_texture_tbl[1], size)); myCutilSafeCall(cudaBindTexture(0, tex_single_ref, d_texture_tbl[2], size)); free(h_texture_tbl[0]); free(h_texture_tbl[1]); free(h_texture_tbl[2]); } /** * host function. * This function calls corresponding kernel function. * * @param d_status kernel I/O data. * @param num_data number of data to be generated. */ void make_uint32_random(mtgp32_kernel_status_t* d_status, int num_data, int block_num) { uint32_t* d_data; unsigned int timer = 0; uint32_t* h_data; cudaError_t e; float gputime; printf("generating 32-bit unsigned random numbers.\n"); myCutilSafeCall(cudaMalloc((void**)&d_data, sizeof(uint32_t) * num_data)); CUT_SAFE_CALL(cutCreateTimer(&timer)); h_data = (uint32_t *) malloc(sizeof(uint32_t) * num_data); if (h_data == NULL) { printf("failure in allocating host memory for output data.\n"); exit(1); } CUT_SAFE_CALL(cutStartTimer(timer)); if (cudaGetLastError() != cudaSuccess) { printf("error has been occured before kernel call.\n"); exit(1); } /* kernel call */ mtgp32_uint32_kernel<<< block_num, THREAD_NUM>>>( d_status, d_data, num_data / block_num); cudaThreadSynchronize(); e = cudaGetLastError(); if (e != cudaSuccess) { printf("failure in kernel call.\n%s\n", cudaGetErrorString(e)); exit(1); } CUT_SAFE_CALL(cutStopTimer(timer)); myCutilSafeCall( cudaMemcpy(h_data, d_data, sizeof(uint32_t) * num_data, cudaMemcpyDeviceToHost)); gputime = cutGetTimerValue(timer); print_uint32_array(h_data, num_data, block_num); printf("generated numbers: %d\n", num_data); printf("Processing time: %f (ms)\n", gputime); printf("Samples per second: %E \n", num_data / (gputime * 0.001)); CUT_SAFE_CALL(cutDeleteTimer(timer)); //free memories free(h_data); myCutilSafeCall(cudaFree(d_data)); } /** * host function. * This function calls corresponding kernel function. * * @param d_status kernel I/O data. * @param num_data number of data to be generated. */ void make_single_random(mtgp32_kernel_status_t* d_status, int num_data, int block_num) { float* d_data; unsigned int timer = 0; float* h_data; cudaError_t e; float gputime; printf("generating single precision floating point random numbers.\n"); myCutilSafeCall(cudaMalloc((void**)&d_data, sizeof(float) * num_data)); CUT_SAFE_CALL(cutCreateTimer(&timer)); h_data = (float *) malloc(sizeof(float) * num_data); if (h_data == NULL) { printf("failure in allocating host memory for output data.\n"); exit(1); } CUT_SAFE_CALL(cutStartTimer(timer)); if (cudaGetLastError() != cudaSuccess) { printf("error has been occured before kernel call.\n"); exit(1); } /* kernel call */ mtgp32_single_kernel<<< block_num, THREAD_NUM >>>( d_status, d_data, num_data / block_num); cudaThreadSynchronize(); e = cudaGetLastError(); if (e != cudaSuccess) { printf("failure in kernel call.\n%s\n", cudaGetErrorString(e)); exit(1); } CUT_SAFE_CALL(cutStopTimer(timer)); myCutilSafeCall( cudaMemcpy(h_data, d_data, sizeof(float) * num_data, cudaMemcpyDeviceToHost)); gputime = cutGetTimerValue(timer); print_float_array(h_data, num_data, block_num); printf("generated numbers: %d\n", num_data); printf("Processing time: %f (ms)\n", gputime); printf("Samples per second: %E \n", num_data / (gputime * 0.001)); CUT_SAFE_CALL(cutDeleteTimer(timer)); //free memories free(h_data); myCutilSafeCall(cudaFree(d_data)); } /** * host function. * This function calls corresponding kernel function. * * @param d_status kernel I/O data. * @param num_data number of data to be generated. */ void make_single01_random(mtgp32_kernel_status_t* d_status, int num_data, int block_num) { float* d_data; unsigned int timer = 0; float* h_data; cudaError_t e; float gputime; printf("generating single precision floating point random numbers.\n"); myCutilSafeCall(cudaMalloc((void**)&d_data, sizeof(float) * num_data)); CUT_SAFE_CALL(cutCreateTimer(&timer)); h_data = (float *) malloc(sizeof(float) * num_data); if (h_data == NULL) { printf("failure in allocating host memory for output data.\n"); exit(1); } CUT_SAFE_CALL(cutStartTimer(timer)); if (cudaGetLastError() != cudaSuccess) { printf("error has been occured before kernel call.\n"); exit(1); } /* kernel call */ mtgp32_single01_kernel<<< block_num, THREAD_NUM >>>( d_status, d_data, num_data / block_num); cudaThreadSynchronize(); e = cudaGetLastError(); if (e != cudaSuccess) { printf("failure in kernel call.\n%s\n", cudaGetErrorString(e)); exit(1); } CUT_SAFE_CALL(cutStopTimer(timer)); myCutilSafeCall( cudaMemcpy(h_data, d_data, sizeof(float) * num_data, cudaMemcpyDeviceToHost)); gputime = cutGetTimerValue(timer); print_float_array(h_data, num_data, block_num); printf("generated numbers: %d\n", num_data); printf("Processing time: %f (ms)\n", gputime); printf("Samples per second: %E \n", num_data / (gputime * 0.001)); CUT_SAFE_CALL(cutDeleteTimer(timer)); //free memories free(h_data); myCutilSafeCall(cudaFree(d_data)); } int main(int argc, char *argv[]) { // LARGE_SIZE is a multiple of 16 int num_data = 10000000; int block_num; int num_unit; int r; mtgp32_kernel_status_t *d_status; uint32_t *d_texture[3]; if (argc >= 2) { errno = 0; block_num = strtol(argv[1], NULL, 10); if (errno) { printf("%s number_of_block number_of_output\n", argv[0]); return 1; } if (block_num < 1 || block_num > BLOCK_NUM_MAX) { printf("%s block_num should be between 1 and %d\n", argv[0], BLOCK_NUM_MAX); return 1; } errno = 0; num_data = strtol(argv[2], NULL, 10); if (errno) { printf("%s number_of_block number_of_output\n", argv[0]); return 1; } argc -= 2; argv += 2; } else { CUT_DEVICE_INIT(argc, argv); printf("%s number_of_block number_of_output\n", argv[0]); block_num = get_suitable_block_num(sizeof(uint32_t), THREAD_NUM, LARGE_SIZE); if (block_num <= 0) { printf("can't calculate sutable number of blocks.\n"); return 1; } printf("the suitable number of blocks for device 0 " "will be multiple of %d\n", block_num); return 1; } CUT_DEVICE_INIT(argc, argv); num_unit = THREAD_NUM * 3 * block_num; myCutilSafeCall(cudaMalloc((void**)&d_status, sizeof(mtgp32_kernel_status_t) * block_num)); myCutilSafeCall(cudaMalloc((void**)&d_texture[0], sizeof(uint32_t) * block_num * TBL_SIZE)); myCutilSafeCall(cudaMalloc((void**)&d_texture[1], sizeof(uint32_t) * block_num * TBL_SIZE)); myCutilSafeCall(cudaMalloc((void**)&d_texture[2], sizeof(uint32_t) * block_num * TBL_SIZE)); r = num_data % num_unit; if (r != 0) { num_data = num_data + num_unit - r; } make_constant_param(MTGPDC_PARAM_TABLE, block_num); make_texture(MTGPDC_PARAM_TABLE, d_texture, block_num); make_kernel_data(d_status, MTGPDC_PARAM_TABLE, block_num); make_uint32_random(d_status, num_data, block_num); make_single_random(d_status, num_data, block_num); make_single01_random(d_status, num_data, block_num); //finalize myCutilSafeCall(cudaFree(d_status)); myCutilSafeCall(cudaFree(d_texture[0])); myCutilSafeCall(cudaFree(d_texture[1])); myCutilSafeCall(cudaFree(d_texture[2])); #ifdef NEED_PROMPT CUT_EXIT(argc, argv); #endif }
1534ec03c538d3181c439cfec049aad85db2a890.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utils.h" #include "common.h" #include "THHDeviceTensor.cuh" #include "THHDeviceTensorUtils.cuh" #include "THHDeviceUtils.cuh" template <typename Dtype> __global__ void ReArrangeBackForward(int threads, const Dtype* input_data, Dtype* output_data, int samplenum, int channels, int height, int width ) { // int i,h,w,c; int imgsize = height * width * channels; CUDA_KERNEL_LOOP(index, threads) { int i = index / imgsize; int c = (index / (height * width)) % channels; int h = (index / width) % height; int w = index % width; const Dtype* now_input = input_data + i * imgsize + (h * width + w) * channels; output_data[i * imgsize + c * height * width + h * width + w] = now_input[c]; } } template <typename Dtype> __global__ void ReArrangeBackBackward(int threads, const Dtype* gradOutput_data, Dtype* gradInput_data, int samplenum, int channels, int height, int width ) { // int i,h,w,c; int imgsize = height * width * channels; CUDA_KERNEL_LOOP(index, threads) { int i = index / imgsize; int c = (index / (height * width)) % channels; int h = (index / width) % height; int w = index % width; Dtype * now_gradInput = gradInput_data + i * imgsize + (h * width + w) * channels; now_gradInput[c] = gradOutput_data[i * imgsize + c * height * width + h * width + w]; } } static int cunn_ReArrangeBack_updateOutput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); int samplenum = luaT_getfieldcheckint(L, 1, "n"); int height = luaT_getfieldcheckint(L, 1, "h"); int width = luaT_getfieldcheckint(L, 1, "w"); int channels = input->size[1]; int imgsize = height * width * channels; THCudaTensor_resize4d(state, output, samplenum, channels, height, width); input = THCudaTensor_newContiguous(state, input); float* input_data = THCudaTensor_data(state, input); float* output_data = THCudaTensor_data(state, output); int count = samplenum * imgsize; hipLaunchKernelGGL(( ReArrangeBackForward) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) , count, input_data, output_data, samplenum, channels, height, width ); THCudaTensor_free(state, input); return 1; } static int cunn_ReArrangeBack_updateGradInput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); int samplenum = luaT_getfieldcheckint(L, 1, "n"); int height = luaT_getfieldcheckint(L, 1, "h"); int width = luaT_getfieldcheckint(L, 1, "w"); int channels = input->size[1]; int imgsize = height * width * channels; THCudaTensor_resize2d(state, gradInput, samplenum * height * width, channels); gradOutput = THCudaTensor_newContiguous(state, gradOutput); float* gradOutput_data = THCudaTensor_data(state, gradOutput); float* gradInput_data = THCudaTensor_data(state, gradInput); int count = samplenum * imgsize; hipLaunchKernelGGL(( ReArrangeBackBackward) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) , count, gradOutput_data, gradInput_data, samplenum, channels, height, width ); THCudaTensor_free(state, gradOutput); return 1; } static const struct luaL_Reg cunn_ReArrangeBack__ [] = { {"ReArrangeBack_updateOutput", cunn_ReArrangeBack_updateOutput}, {"ReArrangeBack_updateGradInput", cunn_ReArrangeBack_updateGradInput}, {NULL, NULL} }; void cunn_ReArrangeBack_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, cunn_ReArrangeBack__, "nn"); lua_pop(L,1); }
1534ec03c538d3181c439cfec049aad85db2a890.cu
#include "utils.h" #include "common.h" #include "THCDeviceTensor.cuh" #include "THCDeviceTensorUtils.cuh" #include "THCDeviceUtils.cuh" template <typename Dtype> __global__ void ReArrangeBackForward(int threads, const Dtype* input_data, Dtype* output_data, int samplenum, int channels, int height, int width ) { // int i,h,w,c; int imgsize = height * width * channels; CUDA_KERNEL_LOOP(index, threads) { int i = index / imgsize; int c = (index / (height * width)) % channels; int h = (index / width) % height; int w = index % width; const Dtype* now_input = input_data + i * imgsize + (h * width + w) * channels; output_data[i * imgsize + c * height * width + h * width + w] = now_input[c]; } } template <typename Dtype> __global__ void ReArrangeBackBackward(int threads, const Dtype* gradOutput_data, Dtype* gradInput_data, int samplenum, int channels, int height, int width ) { // int i,h,w,c; int imgsize = height * width * channels; CUDA_KERNEL_LOOP(index, threads) { int i = index / imgsize; int c = (index / (height * width)) % channels; int h = (index / width) % height; int w = index % width; Dtype * now_gradInput = gradInput_data + i * imgsize + (h * width + w) * channels; now_gradInput[c] = gradOutput_data[i * imgsize + c * height * width + h * width + w]; } } static int cunn_ReArrangeBack_updateOutput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); int samplenum = luaT_getfieldcheckint(L, 1, "n"); int height = luaT_getfieldcheckint(L, 1, "h"); int width = luaT_getfieldcheckint(L, 1, "w"); int channels = input->size[1]; int imgsize = height * width * channels; THCudaTensor_resize4d(state, output, samplenum, channels, height, width); input = THCudaTensor_newContiguous(state, input); float* input_data = THCudaTensor_data(state, input); float* output_data = THCudaTensor_data(state, output); int count = samplenum * imgsize; ReArrangeBackForward <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>> (count, input_data, output_data, samplenum, channels, height, width ); THCudaTensor_free(state, input); return 1; } static int cunn_ReArrangeBack_updateGradInput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); int samplenum = luaT_getfieldcheckint(L, 1, "n"); int height = luaT_getfieldcheckint(L, 1, "h"); int width = luaT_getfieldcheckint(L, 1, "w"); int channels = input->size[1]; int imgsize = height * width * channels; THCudaTensor_resize2d(state, gradInput, samplenum * height * width, channels); gradOutput = THCudaTensor_newContiguous(state, gradOutput); float* gradOutput_data = THCudaTensor_data(state, gradOutput); float* gradInput_data = THCudaTensor_data(state, gradInput); int count = samplenum * imgsize; ReArrangeBackBackward <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>> (count, gradOutput_data, gradInput_data, samplenum, channels, height, width ); THCudaTensor_free(state, gradOutput); return 1; } static const struct luaL_Reg cunn_ReArrangeBack__ [] = { {"ReArrangeBack_updateOutput", cunn_ReArrangeBack_updateOutput}, {"ReArrangeBack_updateGradInput", cunn_ReArrangeBack_updateGradInput}, {NULL, NULL} }; void cunn_ReArrangeBack_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, cunn_ReArrangeBack__, "nn"); lua_pop(L,1); }
47ff83fa009cd661e80b6d5cbbf183b0fe021979.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <chrono> #include <fstream> #include <iostream> #include <random> #include <sstream> #include <stdexcept> #include <vector> #include <stdio.h> #include <stdlib.h> #include <string> double std_time_used; struct Data { Data(int size) : size(size), bytes(size * sizeof(float)) { hipMallocManaged(&x, bytes); hipMallocManaged(&y, bytes); hipMemset(x, 0, bytes); hipMemset(y, 0, bytes); } Data(int size, std::vector<float>& h_x, std::vector<float>& h_y) : size(size), bytes(size * sizeof(float)) { hipMallocManaged(&x, bytes); hipMallocManaged(&y, bytes); // hipMemcpy(x, h_x.data(), bytes, hipMemcpyHostToDevice); // hipMemcpy(y, h_y.data(), bytes, hipMemcpyHostToDevice); memcpy(x, h_x.data(), bytes); memcpy(y, h_y.data(), bytes); } ~Data() { hipFree(x); hipFree(y); } float* x{nullptr}; float* y{nullptr}; int size{0}; int bytes{0}; }; __device__ float squared_l2_distance(float x_1, float y_1, float x_2, float y_2) { return (x_1 - x_2) * (x_1 - x_2) + (y_1 - y_2) * (y_1 - y_2); } __global__ void fine_reduce(const float* __restrict__ data_x, const float* __restrict__ data_y, int data_size, const float* __restrict__ means_x, const float* __restrict__ means_y, float* __restrict__ new_sums_x, float* __restrict__ new_sums_y, int k, int* __restrict__ counts) { extern __shared__ float shared_data[]; const int local_index = threadIdx.x; const int global_index = blockIdx.x * blockDim.x + threadIdx.x; if (global_index >= data_size) return; // Load the mean values into shared memory. if (local_index < k) { shared_data[local_index] = means_x[local_index]; shared_data[k + local_index] = means_y[local_index]; } __syncthreads(); // Load once here. const float x_value = data_x[global_index]; const float y_value = data_y[global_index]; float best_distance = FLT_MAX; int best_cluster = -1; for (int cluster = 0; cluster < k; ++cluster) { const float distance = squared_l2_distance(x_value, y_value, shared_data[cluster], shared_data[k + cluster]); if (distance < best_distance) { best_distance = distance; best_cluster = cluster; } } __syncthreads(); // reduction const int x = local_index; const int y = local_index + blockDim.x; const int count = local_index + blockDim.x + blockDim.x; for (int cluster = 0; cluster < k; ++cluster) { shared_data[x] = (best_cluster == cluster) ? x_value : 0; shared_data[y] = (best_cluster == cluster) ? y_value : 0; shared_data[count] = (best_cluster == cluster) ? 1 : 0; __syncthreads(); // Reduction for this cluster. for (int stride = blockDim.x / 2; stride > 0; stride /= 2) { if (local_index < stride) { shared_data[x] += shared_data[x + stride]; shared_data[y] += shared_data[y + stride]; shared_data[count] += shared_data[count + stride]; } __syncthreads(); } // Now shared_data[0] holds the sum for x. if (local_index == 0) { const int cluster_index = blockIdx.x * k + cluster; new_sums_x[cluster_index] = shared_data[x]; new_sums_y[cluster_index] = shared_data[y]; counts[cluster_index] = shared_data[count]; } __syncthreads(); } } __global__ void coarse_reduce(float* __restrict__ means_x, float* __restrict__ means_y, float* __restrict__ new_sum_x, float* __restrict__ new_sum_y, int k, int* __restrict__ counts) { extern __shared__ float shared_data[]; const int index = threadIdx.x; const int y_offset = blockDim.x; shared_data[index] = new_sum_x[index]; shared_data[y_offset + index] = new_sum_y[index]; __syncthreads(); for (int stride = blockDim.x / 2; stride >= k; stride /= 2) { if (index < stride) { shared_data[index] += shared_data[index + stride]; shared_data[y_offset + index] += shared_data[y_offset + index + stride]; } __syncthreads(); } if (index < k) { const int count = max(1, counts[index]); means_x[index] = new_sum_x[index] / count; means_y[index] = new_sum_y[index] / count; new_sum_y[index] = 0; new_sum_x[index] = 0; counts[index] = 0; } } int main(int argc, const char* argv[]) { if (argc < 4) { std::cerr << "usage: k-means <data-file> <k> [iterations]" << std::endl; std::exit(EXIT_FAILURE); } const auto k = std::atoi(argv[3]); const auto number_of_iterations = (argc == 5) ? std::atoi(argv[4]) : 300; std::vector<float> h_x; std::vector<float> h_y; std::ifstream stream(argv[2]); std::string line; while (std::getline(stream, line)) { std::istringstream line_stream(line); float x, y; uint16_t label; line_stream >> x >> y >> label; h_x.push_back(x); h_y.push_back(y); } const size_t number_of_elements = h_x.size(); Data d_data(number_of_elements, h_x, h_y); std::mt19937 rng(std::random_device{}()); std::shuffle(h_x.begin(), h_x.end(), rng); std::shuffle(h_y.begin(), h_y.end(), rng); Data d_means(k, h_x, h_y); const int threads = 1024; const int blocks = (number_of_elements + threads - 1) / threads; //std::cerr << "Processing " << number_of_elements << " points on " << blocks // << " blocks x " << threads << " threads" << std::endl; // * 3 for x, y and counts. const int fine_shared_memory = 3 * threads * sizeof(float); // * 2 for x and y. Will have k * blocks threads for the coarse reduction. const int coarse_shared_memory = 2 * k * blocks * sizeof(float); Data d_sums(k * blocks); int* d_counts; hipMalloc(&d_counts, k * blocks * sizeof(int)); hipMemset(d_counts, 0, k * blocks * sizeof(int)); const auto start = std::chrono::high_resolution_clock::now(); for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) { hipLaunchKernelGGL(( fine_reduce), dim3(blocks), dim3(threads), fine_shared_memory, 0, d_data.x, d_data.y, d_data.size, d_means.x, d_means.y, d_sums.x, d_sums.y, k, d_counts); hipDeviceSynchronize(); hipLaunchKernelGGL(( coarse_reduce), dim3(1), dim3(k * blocks), coarse_shared_memory, 0, d_means.x, d_means.y, d_sums.x, d_sums.y, k, d_counts); hipDeviceSynchronize(); } const auto end = std::chrono::high_resolution_clock::now(); const auto duration = std::chrono::duration_cast<std::chrono::duration<float>>(end - start); std::cerr << "Standard CUDA implementation Took: " << duration.count() << "s" << " for "<<h_x.size()<<" points."<<std::endl; std_time_used = duration.count(); hipFree(d_counts); std::vector<float> mean_x(k, 0); std::vector<float> mean_y(k, 0); // hipMemcpy(mean_x.data(), d_means.x, d_means.bytes, hipMemcpyDeviceToHost); // hipMemcpy(mean_y.data(), d_means.y, d_means.bytes, hipMemcpyDeviceToHost); memcpy(mean_x.data(), d_means.x, d_means.bytes); memcpy(mean_y.data(), d_means.y, d_means.bytes); for (size_t cluster = 0; cluster < k; ++cluster) { //std::cout << mean_x[cluster] << " " << mean_y[cluster] << std::endl; } FILE *fp; int i; fp = fopen("Standardtimes.txt", "a"); fprintf(fp, "%0.6f\n", std_time_used); fclose(fp); std::string str(std::to_string(h_x.size())),str1,str2; str = "results/standard/" + str; str2 = str + "_centroids.txt"; fp = fopen(str2.c_str(), "w"); for(i = 0; i < k; ++i){ fprintf(fp, "%0.6f %0.6f\n", mean_x[i], mean_y[i]); } fclose(fp); }
47ff83fa009cd661e80b6d5cbbf183b0fe021979.cu
#include <algorithm> #include <cfloat> #include <chrono> #include <fstream> #include <iostream> #include <random> #include <sstream> #include <stdexcept> #include <vector> #include <stdio.h> #include <stdlib.h> #include <string> double std_time_used; struct Data { Data(int size) : size(size), bytes(size * sizeof(float)) { cudaMallocManaged(&x, bytes); cudaMallocManaged(&y, bytes); cudaMemset(x, 0, bytes); cudaMemset(y, 0, bytes); } Data(int size, std::vector<float>& h_x, std::vector<float>& h_y) : size(size), bytes(size * sizeof(float)) { cudaMallocManaged(&x, bytes); cudaMallocManaged(&y, bytes); // cudaMemcpy(x, h_x.data(), bytes, cudaMemcpyHostToDevice); // cudaMemcpy(y, h_y.data(), bytes, cudaMemcpyHostToDevice); memcpy(x, h_x.data(), bytes); memcpy(y, h_y.data(), bytes); } ~Data() { cudaFree(x); cudaFree(y); } float* x{nullptr}; float* y{nullptr}; int size{0}; int bytes{0}; }; __device__ float squared_l2_distance(float x_1, float y_1, float x_2, float y_2) { return (x_1 - x_2) * (x_1 - x_2) + (y_1 - y_2) * (y_1 - y_2); } __global__ void fine_reduce(const float* __restrict__ data_x, const float* __restrict__ data_y, int data_size, const float* __restrict__ means_x, const float* __restrict__ means_y, float* __restrict__ new_sums_x, float* __restrict__ new_sums_y, int k, int* __restrict__ counts) { extern __shared__ float shared_data[]; const int local_index = threadIdx.x; const int global_index = blockIdx.x * blockDim.x + threadIdx.x; if (global_index >= data_size) return; // Load the mean values into shared memory. if (local_index < k) { shared_data[local_index] = means_x[local_index]; shared_data[k + local_index] = means_y[local_index]; } __syncthreads(); // Load once here. const float x_value = data_x[global_index]; const float y_value = data_y[global_index]; float best_distance = FLT_MAX; int best_cluster = -1; for (int cluster = 0; cluster < k; ++cluster) { const float distance = squared_l2_distance(x_value, y_value, shared_data[cluster], shared_data[k + cluster]); if (distance < best_distance) { best_distance = distance; best_cluster = cluster; } } __syncthreads(); // reduction const int x = local_index; const int y = local_index + blockDim.x; const int count = local_index + blockDim.x + blockDim.x; for (int cluster = 0; cluster < k; ++cluster) { shared_data[x] = (best_cluster == cluster) ? x_value : 0; shared_data[y] = (best_cluster == cluster) ? y_value : 0; shared_data[count] = (best_cluster == cluster) ? 1 : 0; __syncthreads(); // Reduction for this cluster. for (int stride = blockDim.x / 2; stride > 0; stride /= 2) { if (local_index < stride) { shared_data[x] += shared_data[x + stride]; shared_data[y] += shared_data[y + stride]; shared_data[count] += shared_data[count + stride]; } __syncthreads(); } // Now shared_data[0] holds the sum for x. if (local_index == 0) { const int cluster_index = blockIdx.x * k + cluster; new_sums_x[cluster_index] = shared_data[x]; new_sums_y[cluster_index] = shared_data[y]; counts[cluster_index] = shared_data[count]; } __syncthreads(); } } __global__ void coarse_reduce(float* __restrict__ means_x, float* __restrict__ means_y, float* __restrict__ new_sum_x, float* __restrict__ new_sum_y, int k, int* __restrict__ counts) { extern __shared__ float shared_data[]; const int index = threadIdx.x; const int y_offset = blockDim.x; shared_data[index] = new_sum_x[index]; shared_data[y_offset + index] = new_sum_y[index]; __syncthreads(); for (int stride = blockDim.x / 2; stride >= k; stride /= 2) { if (index < stride) { shared_data[index] += shared_data[index + stride]; shared_data[y_offset + index] += shared_data[y_offset + index + stride]; } __syncthreads(); } if (index < k) { const int count = max(1, counts[index]); means_x[index] = new_sum_x[index] / count; means_y[index] = new_sum_y[index] / count; new_sum_y[index] = 0; new_sum_x[index] = 0; counts[index] = 0; } } int main(int argc, const char* argv[]) { if (argc < 4) { std::cerr << "usage: k-means <data-file> <k> [iterations]" << std::endl; std::exit(EXIT_FAILURE); } const auto k = std::atoi(argv[3]); const auto number_of_iterations = (argc == 5) ? std::atoi(argv[4]) : 300; std::vector<float> h_x; std::vector<float> h_y; std::ifstream stream(argv[2]); std::string line; while (std::getline(stream, line)) { std::istringstream line_stream(line); float x, y; uint16_t label; line_stream >> x >> y >> label; h_x.push_back(x); h_y.push_back(y); } const size_t number_of_elements = h_x.size(); Data d_data(number_of_elements, h_x, h_y); std::mt19937 rng(std::random_device{}()); std::shuffle(h_x.begin(), h_x.end(), rng); std::shuffle(h_y.begin(), h_y.end(), rng); Data d_means(k, h_x, h_y); const int threads = 1024; const int blocks = (number_of_elements + threads - 1) / threads; //std::cerr << "Processing " << number_of_elements << " points on " << blocks // << " blocks x " << threads << " threads" << std::endl; // * 3 for x, y and counts. const int fine_shared_memory = 3 * threads * sizeof(float); // * 2 for x and y. Will have k * blocks threads for the coarse reduction. const int coarse_shared_memory = 2 * k * blocks * sizeof(float); Data d_sums(k * blocks); int* d_counts; cudaMalloc(&d_counts, k * blocks * sizeof(int)); cudaMemset(d_counts, 0, k * blocks * sizeof(int)); const auto start = std::chrono::high_resolution_clock::now(); for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) { fine_reduce<<<blocks, threads, fine_shared_memory>>>(d_data.x, d_data.y, d_data.size, d_means.x, d_means.y, d_sums.x, d_sums.y, k, d_counts); cudaDeviceSynchronize(); coarse_reduce<<<1, k * blocks, coarse_shared_memory>>>(d_means.x, d_means.y, d_sums.x, d_sums.y, k, d_counts); cudaDeviceSynchronize(); } const auto end = std::chrono::high_resolution_clock::now(); const auto duration = std::chrono::duration_cast<std::chrono::duration<float>>(end - start); std::cerr << "Standard CUDA implementation Took: " << duration.count() << "s" << " for "<<h_x.size()<<" points."<<std::endl; std_time_used = duration.count(); cudaFree(d_counts); std::vector<float> mean_x(k, 0); std::vector<float> mean_y(k, 0); // cudaMemcpy(mean_x.data(), d_means.x, d_means.bytes, cudaMemcpyDeviceToHost); // cudaMemcpy(mean_y.data(), d_means.y, d_means.bytes, cudaMemcpyDeviceToHost); memcpy(mean_x.data(), d_means.x, d_means.bytes); memcpy(mean_y.data(), d_means.y, d_means.bytes); for (size_t cluster = 0; cluster < k; ++cluster) { //std::cout << mean_x[cluster] << " " << mean_y[cluster] << std::endl; } FILE *fp; int i; fp = fopen("Standardtimes.txt", "a"); fprintf(fp, "%0.6f\n", std_time_used); fclose(fp); std::string str(std::to_string(h_x.size())),str1,str2; str = "results/standard/" + str; str2 = str + "_centroids.txt"; fp = fopen(str2.c_str(), "w"); for(i = 0; i < k; ++i){ fprintf(fp, "%0.6f %0.6f\n", mean_x[i], mean_y[i]); } fclose(fp); }
a50b8ec94c2226994c12bd94288711e4dc1b1d8e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* GPU Kernels for the mesh to particles functions @author: Stefan Hegglin, Adrian Oeftiger */ extern "C" { __global__ void mesh_to_particles_2d( int nparticles, double* particles_quantity, double *mesh_quantity, const int nx, const int ny, double *wij, double *wi1j, double *wij1, double *wi1j1, int *i, int *j) { int pidx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y* blockDim.x + threadIdx.x; int ix = i[pidx]; int jx = j[pidx]; if (pidx < nparticles) { if (jx >= 0 && jx < nx - 1 && ix >= 0 && ix < ny - 1) { particles_quantity[pidx] = ( wij[pidx] * mesh_quantity[jx + ix*nx] + wij1[pidx] * mesh_quantity[jx+1 + ix*nx] + wi1j[pidx] * mesh_quantity[jx+ + (ix+1)*nx] + wi1j1[pidx] * mesh_quantity[jx+1 + (ix+1)*nx]); } else { particles_quantity[pidx] = 0; } } } __global__ void field_to_particles_2d( int nparticles, double* forcex, double* forcey, double* fieldx, double* fieldy, const int nx, const int ny, double *wij, double *wi1j, double *wij1, double *wi1j1, int *i, int *j) { int pidx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y* blockDim.x + threadIdx.x; int jx = j[pidx]; int ix = i[pidx]; if (pidx < nparticles) { if (jx >= 0 && jx < nx - 1 && ix >= 0 && ix < ny - 1) { forcex[pidx] = ( wij[pidx] * fieldx[jx + ix*nx] + wij1[pidx] * fieldx[jx+1 + ix*nx] + wi1j[pidx] * fieldx[jx + (ix+1)*nx] + wi1j1[pidx] *fieldx[jx+1 + (ix+1)*nx]); forcey[pidx] = ( wij[pidx] * fieldy[jx + ix*nx] + wij1[pidx] * fieldy[jx+1 + ix*nx] + wi1j[pidx] * fieldy[jx + (ix+1)*nx] + wi1j1[pidx] *fieldy[jx+1 + (ix+1)*nx]); } else { forcex[pidx] = 0; forcey[pidx] = 0; } } } __global__ void field_to_particles_3d( int nparticles, double* forcex, double* forcey, double* forcez, double* fieldx, double* fieldy, double* fieldz, const int nx, const int ny, const int nz, double *wijk, double *wi1jk, double *wij1k, double *wi1j1k, double *wijk1, double *wi1jk1, double* wij1k1, double* wi1j1k1, int *i, int *j, int* k) { int pidx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y* blockDim.x + threadIdx.x; int ix = i[pidx]; int jx = j[pidx]; int kx = k[pidx]; if (pidx < nparticles) { if (jx >= 0 && jx < nx - 1 && ix >= 0 && ix < ny - 1 && kx >= 0 && kx < nz - 1) { forcex[pidx] = ( wijk[pidx] * fieldx[jx + ix*nx + kx*nx*ny] + wij1k[pidx] * fieldx[jx+1 + ix*nx + kx*nx*ny] + wi1jk[pidx] * fieldx[jx+ + (ix+1)*nx + kx*nx*ny] + wi1j1k[pidx] * fieldx[jx+1 + (ix+1)*nx + kx*nx*ny] + wijk1[pidx] * fieldx[jx + ix*nx + (kx+1)*nx*ny] + wij1k1[pidx] * fieldx[jx+1 + ix*nx + (kx+1)*nx*ny] + wi1jk1[pidx] * fieldx[jx+ + (ix+1)*nx + (kx+1)*nx*ny] + wi1j1k1[pidx]* fieldx[jx+1 + (ix+1)*nx + (kx+1)*nx*ny]); forcey[pidx] = ( wijk[pidx] * fieldy[jx + ix*nx + kx*nx*ny] + wij1k[pidx] * fieldy[jx+1 + ix*nx + kx*nx*ny] + wi1jk[pidx] * fieldy[jx+ + (ix+1)*nx + kx*nx*ny] + wi1j1k[pidx] * fieldy[jx+1 + (ix+1)*nx + kx*nx*ny] + wijk1[pidx] * fieldy[jx + ix*nx + (kx+1)*nx*ny] + wij1k1[pidx] * fieldy[jx+1 + ix*nx + (kx+1)*nx*ny] + wi1jk1[pidx] * fieldy[jx+ + (ix+1)*nx + (kx+1)*nx*ny] + wi1j1k1[pidx]* fieldy[jx+1 + (ix+1)*nx + (kx+1)*nx*ny]); forcez[pidx] = ( wijk[pidx] * fieldz[jx + ix*nx + kx*nx*ny] + wij1k[pidx] * fieldz[jx+1 + ix*nx + kx*nx*ny] + wi1jk[pidx] * fieldz[jx+ + (ix+1)*nx + kx*nx*ny] + wi1j1k[pidx] * fieldz[jx+1 + (ix+1)*nx + kx*nx*ny] + wijk1[pidx] * fieldz[jx + ix*nx + (kx+1)*nx*ny] + wij1k1[pidx] * fieldz[jx+1 + ix*nx + (kx+1)*nx*ny] + wi1jk1[pidx] * fieldz[jx+ + (ix+1)*nx + (kx+1)*nx*ny] + wi1j1k1[pidx]* fieldz[jx+1 + (ix+1)*nx + (kx+1)*nx*ny]); } else { forcex[pidx] = 0; forcey[pidx] = 0; forcez[pidx] = 0; } } } __global__ void mesh_to_particles_3d( int nparticles, double* particles_quantity, double *mesh_quantity, const int nx, const int ny, const int nz, double *wijk, double *wi1jk, double *wij1k, double *wi1j1k, double *wijk1, double *wi1jk1, double* wij1k1, double* wi1j1k1, int *i, int *j, int* k) { int pidx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y* blockDim.x + threadIdx.x; int ix = i[pidx]; int jx = j[pidx]; int kx = k[pidx]; if (pidx < nparticles) { if (jx >= 0 && jx < nx - 1 && ix >= 0 && ix < ny - 1 && kx >= 0 && kx < nz - 1) { particles_quantity[pidx] = ( wijk[pidx] * mesh_quantity[jx + ix*nx + kx*nx*ny] + wij1k[pidx] * mesh_quantity[jx+1 + ix*nx + kx*nx*ny] + wi1jk[pidx] * mesh_quantity[jx+ + (ix+1)*nx + kx*nx*ny] + wi1j1k[pidx] * mesh_quantity[jx+1 + (ix+1)*nx + kx*nx*ny] + wijk1[pidx] * mesh_quantity[jx + ix*nx + (kx+1)*nx*ny] + wij1k1[pidx] * mesh_quantity[jx+1 + ix*nx + (kx+1)*nx*ny] + wi1jk1[pidx] * mesh_quantity[jx+ + (ix+1)*nx + (kx+1)*nx*ny] + wi1j1k1[pidx]* mesh_quantity[jx+1 + (ix+1)*nx + (kx+1)*nx*ny]); } else { particles_quantity[pidx] = 0; } } } } /* end extern C */
a50b8ec94c2226994c12bd94288711e4dc1b1d8e.cu
/* GPU Kernels for the mesh to particles functions @author: Stefan Hegglin, Adrian Oeftiger */ extern "C" { __global__ void mesh_to_particles_2d( int nparticles, double* particles_quantity, double *mesh_quantity, const int nx, const int ny, double *wij, double *wi1j, double *wij1, double *wi1j1, int *i, int *j) { int pidx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y* blockDim.x + threadIdx.x; int ix = i[pidx]; int jx = j[pidx]; if (pidx < nparticles) { if (jx >= 0 && jx < nx - 1 && ix >= 0 && ix < ny - 1) { particles_quantity[pidx] = ( wij[pidx] * mesh_quantity[jx + ix*nx] + wij1[pidx] * mesh_quantity[jx+1 + ix*nx] + wi1j[pidx] * mesh_quantity[jx+ + (ix+1)*nx] + wi1j1[pidx] * mesh_quantity[jx+1 + (ix+1)*nx]); } else { particles_quantity[pidx] = 0; } } } __global__ void field_to_particles_2d( int nparticles, double* forcex, double* forcey, double* fieldx, double* fieldy, const int nx, const int ny, double *wij, double *wi1j, double *wij1, double *wi1j1, int *i, int *j) { int pidx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y* blockDim.x + threadIdx.x; int jx = j[pidx]; int ix = i[pidx]; if (pidx < nparticles) { if (jx >= 0 && jx < nx - 1 && ix >= 0 && ix < ny - 1) { forcex[pidx] = ( wij[pidx] * fieldx[jx + ix*nx] + wij1[pidx] * fieldx[jx+1 + ix*nx] + wi1j[pidx] * fieldx[jx + (ix+1)*nx] + wi1j1[pidx] *fieldx[jx+1 + (ix+1)*nx]); forcey[pidx] = ( wij[pidx] * fieldy[jx + ix*nx] + wij1[pidx] * fieldy[jx+1 + ix*nx] + wi1j[pidx] * fieldy[jx + (ix+1)*nx] + wi1j1[pidx] *fieldy[jx+1 + (ix+1)*nx]); } else { forcex[pidx] = 0; forcey[pidx] = 0; } } } __global__ void field_to_particles_3d( int nparticles, double* forcex, double* forcey, double* forcez, double* fieldx, double* fieldy, double* fieldz, const int nx, const int ny, const int nz, double *wijk, double *wi1jk, double *wij1k, double *wi1j1k, double *wijk1, double *wi1jk1, double* wij1k1, double* wi1j1k1, int *i, int *j, int* k) { int pidx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y* blockDim.x + threadIdx.x; int ix = i[pidx]; int jx = j[pidx]; int kx = k[pidx]; if (pidx < nparticles) { if (jx >= 0 && jx < nx - 1 && ix >= 0 && ix < ny - 1 && kx >= 0 && kx < nz - 1) { forcex[pidx] = ( wijk[pidx] * fieldx[jx + ix*nx + kx*nx*ny] + wij1k[pidx] * fieldx[jx+1 + ix*nx + kx*nx*ny] + wi1jk[pidx] * fieldx[jx+ + (ix+1)*nx + kx*nx*ny] + wi1j1k[pidx] * fieldx[jx+1 + (ix+1)*nx + kx*nx*ny] + wijk1[pidx] * fieldx[jx + ix*nx + (kx+1)*nx*ny] + wij1k1[pidx] * fieldx[jx+1 + ix*nx + (kx+1)*nx*ny] + wi1jk1[pidx] * fieldx[jx+ + (ix+1)*nx + (kx+1)*nx*ny] + wi1j1k1[pidx]* fieldx[jx+1 + (ix+1)*nx + (kx+1)*nx*ny]); forcey[pidx] = ( wijk[pidx] * fieldy[jx + ix*nx + kx*nx*ny] + wij1k[pidx] * fieldy[jx+1 + ix*nx + kx*nx*ny] + wi1jk[pidx] * fieldy[jx+ + (ix+1)*nx + kx*nx*ny] + wi1j1k[pidx] * fieldy[jx+1 + (ix+1)*nx + kx*nx*ny] + wijk1[pidx] * fieldy[jx + ix*nx + (kx+1)*nx*ny] + wij1k1[pidx] * fieldy[jx+1 + ix*nx + (kx+1)*nx*ny] + wi1jk1[pidx] * fieldy[jx+ + (ix+1)*nx + (kx+1)*nx*ny] + wi1j1k1[pidx]* fieldy[jx+1 + (ix+1)*nx + (kx+1)*nx*ny]); forcez[pidx] = ( wijk[pidx] * fieldz[jx + ix*nx + kx*nx*ny] + wij1k[pidx] * fieldz[jx+1 + ix*nx + kx*nx*ny] + wi1jk[pidx] * fieldz[jx+ + (ix+1)*nx + kx*nx*ny] + wi1j1k[pidx] * fieldz[jx+1 + (ix+1)*nx + kx*nx*ny] + wijk1[pidx] * fieldz[jx + ix*nx + (kx+1)*nx*ny] + wij1k1[pidx] * fieldz[jx+1 + ix*nx + (kx+1)*nx*ny] + wi1jk1[pidx] * fieldz[jx+ + (ix+1)*nx + (kx+1)*nx*ny] + wi1j1k1[pidx]* fieldz[jx+1 + (ix+1)*nx + (kx+1)*nx*ny]); } else { forcex[pidx] = 0; forcey[pidx] = 0; forcez[pidx] = 0; } } } __global__ void mesh_to_particles_3d( int nparticles, double* particles_quantity, double *mesh_quantity, const int nx, const int ny, const int nz, double *wijk, double *wi1jk, double *wij1k, double *wi1j1k, double *wijk1, double *wi1jk1, double* wij1k1, double* wi1j1k1, int *i, int *j, int* k) { int pidx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y* blockDim.x + threadIdx.x; int ix = i[pidx]; int jx = j[pidx]; int kx = k[pidx]; if (pidx < nparticles) { if (jx >= 0 && jx < nx - 1 && ix >= 0 && ix < ny - 1 && kx >= 0 && kx < nz - 1) { particles_quantity[pidx] = ( wijk[pidx] * mesh_quantity[jx + ix*nx + kx*nx*ny] + wij1k[pidx] * mesh_quantity[jx+1 + ix*nx + kx*nx*ny] + wi1jk[pidx] * mesh_quantity[jx+ + (ix+1)*nx + kx*nx*ny] + wi1j1k[pidx] * mesh_quantity[jx+1 + (ix+1)*nx + kx*nx*ny] + wijk1[pidx] * mesh_quantity[jx + ix*nx + (kx+1)*nx*ny] + wij1k1[pidx] * mesh_quantity[jx+1 + ix*nx + (kx+1)*nx*ny] + wi1jk1[pidx] * mesh_quantity[jx+ + (ix+1)*nx + (kx+1)*nx*ny] + wi1j1k1[pidx]* mesh_quantity[jx+1 + (ix+1)*nx + (kx+1)*nx*ny]); } else { particles_quantity[pidx] = 0; } } } } /* end extern C */
ff85812893d64722d4336d92ac85c3d11f0e9c8f.hip
// !!! This is a file automatically generated by hipify!!! /*! \file Preconditioner.cu \brief Implementation of member functions of classes JacobiPreconditioner, RichardsonPreconditioner and other functions and kernels related to preconditioners */ #include <stdio.h> #include <iostream> #include <cassert> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "double_complex.h" #include "Matrix.h" #include "Preconditioner.h" #include "kernels.h" #include "header.h" //------------------------------------------------------------------------------------------------------------------------ /* Functions amd kernels for JacobiPreconditioner class */ //! GPU kernel which fills in the Jacobi Preconditioner values /*! Per element of the preconditioner's internal array, one thread is used. All elements are filled in, in parallel. \param[in] N dimension of the square sparse matrix \param[in] row_ptr_matrix base address of array allocated on GPU which stores CSR matrix row pointers \param[in] col_ind_matrix base address of array allocated on GPU which stores CSR matrix column indices \param[in] values_matrix base address of array allocated on GPU which stores CSR matrix values \param[out] d_inverse base address of array allocated on GPU which is to be filled with Jacobi preconditioner values */ __global__ void Diagonal_Scaling_Jacobi(const int N, const int* row_ptr_matrix, const int* col_ind_matrix, const DoubleComplex* values_matrix, DoubleComplex* d_inverse) { int gid = blockDim.x * blockIdx.x + threadIdx.x; if (gid < N) { int flag = -1; int row = gid; int start_index = (row_ptr_matrix)[row]; int end_index = (row_ptr_matrix)[row + 1]; for (int i = start_index; i < end_index; i++) { if ((col_ind_matrix)[i] == row) { if ((values_matrix)[i] != 0) { flag = 0; // double re = real((csr_mat->gpu_values)[i]); d_inverse[row] = 1.0 / (values_matrix)[i]; } break; } } if (flag == -1) { printf("\nJacobi preconditioner generation failed!\n"); // __threadfence(); // asm("trap;"); // [font = "Courier New"] __threadfence()[/ font]; // [font = "Courier New"] asm("trap; ")[/ font]; assert(0); } } } //! Initializes the Jacobi Preconditioner internal array(on GPU) based on the input sparse matrix using a GPU kernel /*! \param[out] A reference to CSR Matrix object \return error code 0 if the initialization is successful else returns error code = -1 */ int JacobiPreconditioner::Initialize_Preconditioner(const CSR_Matrix& A) { if (Exists_gpu() == false) Allocate_Memory(LOCATION::GPU); int err_code = 0; int N = Get_Diag_Length(); dim3 block(THREADS_PER_BLOCK); dim3 grid(ceil((double)N / (double)THREADS_PER_BLOCK)); Diagonal_Scaling_Jacobi << < grid, block >> > (N, A.GetGPURowPtr(), A.GetGPUColInd(), A.GetGPUValues(), Get_GPU_d_inverse()); // printf("\n"); // printf(hipGetErrorString(hipDeviceSynchronize())); // printf("\n"); if (hipDeviceSynchronize() != hipSuccess) { err_code = -1; } return err_code; //0 for successful preconditioner generation , return -1 if it fails. } //! Parameterized constructor for Jacobi Preconditioner class /*! Generates jacobi preconditioner based on the input sparse matrix It exits the program, in case, the generation fails. \param[in] A refernce to CSR Matrix object */ JacobiPreconditioner::JacobiPreconditioner(const CSR_Matrix& A) : Preconditioner(PRECONDITIONER_TYPE::JACOBI), diag_length{ A.GetRows() } { Allocate_Memory(LOCATION::GPU); int err_code = Initialize_Preconditioner(A); if (err_code != 0) { std::cout << "\nError while initilizing preconditioner\n"; exit(1); } else { std::cout << "\nJacobi preconditioner generated successfully!\n"; } } //! Allocates memory for Jacobi Preconditioner object's internel array on the specified location /*! \param[in] loc enum type variable which indicates the location(CPU/GPU) where the memory is to be allocated */ void JacobiPreconditioner::Allocate_Memory(const LOCATION loc) { if (loc == LOCATION::GPU && Exists_gpu()==false) { hipMalloc((void**)&gpu_d_inverse, Get_Diag_Length() * sizeof(DoubleComplex)); this->gpu_exists = GPU_EXISTENCE::EXISTENT; } else if (loc == LOCATION::CPU && Exists_cpu()==true) { cpu_d_inverse = new DoubleComplex[Get_Diag_Length()]; this->cpu_exists = CPU_EXISTENCE::EXISTENT; } } //! Deallocates resources of the preconditioner object /*! \param[in] loc enum type variable which indicates the location, where the resources are to be deallocated */ void JacobiPreconditioner::Deallocate_Memory(const LOCATION loc) { if (loc == LOCATION::GPU && Exists_gpu() == true) { hipFree(gpu_d_inverse); this->gpu_exists = GPU_EXISTENCE::NON_EXISTENT; gpu_d_inverse = nullptr; } else if (loc == LOCATION::CPU && Exists_cpu() == true) { delete[] cpu_d_inverse; this->cpu_exists = CPU_EXISTENCE::NON_EXISTENT; cpu_d_inverse = nullptr; } } //! Destructor for JacobiPreconditioner class /*! It deallocates acquires resources, if any. */ JacobiPreconditioner:: ~JacobiPreconditioner() { if (Exists_gpu() == true) Deallocate_Memory(LOCATION::GPU); else if (Exists_cpu() == true) Deallocate_Memory(LOCATION::CPU); } //! Copies jacobi preconditioner object's internal arrays from CPU to GPU memory void JacobiPreconditioner::CopyPreconditioner_cpu_to_gpu() { assert(Exists_cpu() == true); if (Exists_gpu() == false) Allocate_Memory(LOCATION::GPU); hipMemcpy(Get_GPU_d_inverse(), Get_CPU_d_inverse(), Get_Diag_Length() * sizeof(DoubleComplex), hipMemcpyHostToDevice); } //! Copies jacobi preconditioner object's internal arrays from GPU to CPU memory void JacobiPreconditioner::CopyPreconditioner_gpu_to_cpu() { assert(Exists_gpu() == true); if (Exists_cpu() == false) Allocate_Memory(LOCATION::CPU); hipMemcpy(Get_CPU_d_inverse(), Get_GPU_d_inverse(), Get_Diag_Length() * sizeof(DoubleComplex), hipMemcpyDeviceToHost); } //! Generates a preconditioner based on the preconditioner type and sparse matrix received as input /*! \param[in] precond_type enum varaible which describes the type of the preconditioner \param[in] A reference to CSR matrix object \return a pointer to the generated preconditioner */ Preconditioner* Generate_Preconditioner(const PRECONDITIONER_TYPE precond_type, const CSR_Matrix& A) { Preconditioner* precond = nullptr; switch (precond_type) { case PRECONDITIONER_TYPE::JACOBI: precond = new JacobiPreconditioner(A); break; case PRECONDITIONER_TYPE::RICHARDSON: precond = new RichardsonPreconditioner(A); break; } return precond; } //! GPU kernel which performs the jacobi preconditioning operation /*! Each element of the result is handled using a different thread. All elements are computed in parallel. result = jacobiPreconditioner(d_inverse) * vector \param[in] N length of vector \param[in] d_inverse base address of array allocated on GPU which stores jacobi preconditioner values \param[in] vec the array allocated on GPU with which preconditioner is multiplied \param[out] result the base address of the array allocated on GPU where result of mutiplication of preconditioner and the vector */ __global__ void Jacobi_precond_matrix_vector_multiplication(const int N, const DoubleComplex* d_inverse, const DoubleComplex* vec, DoubleComplex* result) { int gid = blockDim.x * blockIdx.x + threadIdx.x; if (gid < N) { result[gid] = d_inverse[gid] * vec[gid]; } } //! Performs jacobi preconditioning operation using GPU kernel /*! \param[in] vec reference to a dense matrix object( a vector) \param[out] result formed by result = jacobi_preconditioner matrix(d_inverse) * vec Operates on internal GPU arrays. */ void JacobiPreconditioner::ApplyPreconditioner(const Dense_Matrix& vec, Dense_Matrix& result) const { assert(this->Exists_gpu() == true); assert(vec.ExistsGPU() == true); assert(result.ExistsGPU() == true); dim3 block(THREADS_PER_BLOCK); int N = Get_Diag_Length(); dim3 grid(ceil((double)N / (double)THREADS_PER_BLOCK)); Jacobi_precond_matrix_vector_multiplication << < grid, block >> > (N, Get_GPU_d_inverse(), vec.GetGPUValues(), result.GetGPUValues()); } //------------------------------------------------------------------------------------------------------------------------ /* Functions amd kernels for RichardsonPreconditioner class */ //! Parameterized constructor for Richardson Preconditioner class /*! Generates richardson preconditioner based on the input sparse matrix. The preconditioner values are not explicitly stored as it is an Identity matrix \param[in] A reference to CSR Matrix object */ RichardsonPreconditioner::RichardsonPreconditioner(const CSR_Matrix& A) : Preconditioner(PRECONDITIONER_TYPE::RICHARDSON), Identity_dim{ A.GetRows() } { } //! A GPU kernel which performs richardson preconditioning operation /*! Each element of the result is handled using a different thread. All elements are computed in parallel. \param[in] N length of vector \param[in] vec array allocated on GPU storing vector values \param[out] result array allocated on GPU where the result of the preconditioning operation is to stored */ __global__ void Richardson_Precond_applicn(const int N , const DoubleComplex* vec , DoubleComplex* result) { int gid = blockDim.x * blockIdx.x + threadIdx.x; if (gid < N) { result[gid] = vec[gid]; } } //! Performs richardson preconditioning operation using a GPU kernel /*! \param[in] vec reference to a dense matrix object(vector) \param[out] result reference to a dense matrix object which is going to store the result of preconditioning Operates on internal GPU arrays. */ void RichardsonPreconditioner::ApplyPreconditioner(const Dense_Matrix& vec, Dense_Matrix& result) const { assert(vec.ExistsGPU() == true); assert(result.ExistsGPU() == true); dim3 block(THREADS_PER_BLOCK); int N = Get_Identity_Dimension(); dim3 grid(ceil(static_cast<double>(N) / static_cast<double>(THREADS_PER_BLOCK))); hipLaunchKernelGGL(( Richardson_Precond_applicn) , dim3(grid) , dim3(block), 0, 0, N,vec.GetGPUValues(),result.GetGPUValues()); }
ff85812893d64722d4336d92ac85c3d11f0e9c8f.cu
/*! \file Preconditioner.cu \brief Implementation of member functions of classes JacobiPreconditioner, RichardsonPreconditioner and other functions and kernels related to preconditioners */ #include <stdio.h> #include <iostream> #include <cassert> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "double_complex.h" #include "Matrix.h" #include "Preconditioner.h" #include "kernels.h" #include "header.h" //------------------------------------------------------------------------------------------------------------------------ /* Functions amd kernels for JacobiPreconditioner class */ //! GPU kernel which fills in the Jacobi Preconditioner values /*! Per element of the preconditioner's internal array, one thread is used. All elements are filled in, in parallel. \param[in] N dimension of the square sparse matrix \param[in] row_ptr_matrix base address of array allocated on GPU which stores CSR matrix row pointers \param[in] col_ind_matrix base address of array allocated on GPU which stores CSR matrix column indices \param[in] values_matrix base address of array allocated on GPU which stores CSR matrix values \param[out] d_inverse base address of array allocated on GPU which is to be filled with Jacobi preconditioner values */ __global__ void Diagonal_Scaling_Jacobi(const int N, const int* row_ptr_matrix, const int* col_ind_matrix, const DoubleComplex* values_matrix, DoubleComplex* d_inverse) { int gid = blockDim.x * blockIdx.x + threadIdx.x; if (gid < N) { int flag = -1; int row = gid; int start_index = (row_ptr_matrix)[row]; int end_index = (row_ptr_matrix)[row + 1]; for (int i = start_index; i < end_index; i++) { if ((col_ind_matrix)[i] == row) { if ((values_matrix)[i] != 0) { flag = 0; // double re = real((csr_mat->gpu_values)[i]); d_inverse[row] = 1.0 / (values_matrix)[i]; } break; } } if (flag == -1) { printf("\nJacobi preconditioner generation failed!\n"); // __threadfence(); // asm("trap;"); // [font = "Courier New"] __threadfence()[/ font]; // [font = "Courier New"] asm("trap; ")[/ font]; assert(0); } } } //! Initializes the Jacobi Preconditioner internal array(on GPU) based on the input sparse matrix using a GPU kernel /*! \param[out] A reference to CSR Matrix object \return error code 0 if the initialization is successful else returns error code = -1 */ int JacobiPreconditioner::Initialize_Preconditioner(const CSR_Matrix& A) { if (Exists_gpu() == false) Allocate_Memory(LOCATION::GPU); int err_code = 0; int N = Get_Diag_Length(); dim3 block(THREADS_PER_BLOCK); dim3 grid(ceil((double)N / (double)THREADS_PER_BLOCK)); Diagonal_Scaling_Jacobi << < grid, block >> > (N, A.GetGPURowPtr(), A.GetGPUColInd(), A.GetGPUValues(), Get_GPU_d_inverse()); // printf("\n"); // printf(cudaGetErrorString(cudaDeviceSynchronize())); // printf("\n"); if (cudaDeviceSynchronize() != cudaSuccess) { err_code = -1; } return err_code; //0 for successful preconditioner generation , return -1 if it fails. } //! Parameterized constructor for Jacobi Preconditioner class /*! Generates jacobi preconditioner based on the input sparse matrix It exits the program, in case, the generation fails. \param[in] A refernce to CSR Matrix object */ JacobiPreconditioner::JacobiPreconditioner(const CSR_Matrix& A) : Preconditioner(PRECONDITIONER_TYPE::JACOBI), diag_length{ A.GetRows() } { Allocate_Memory(LOCATION::GPU); int err_code = Initialize_Preconditioner(A); if (err_code != 0) { std::cout << "\nError while initilizing preconditioner\n"; exit(1); } else { std::cout << "\nJacobi preconditioner generated successfully!\n"; } } //! Allocates memory for Jacobi Preconditioner object's internel array on the specified location /*! \param[in] loc enum type variable which indicates the location(CPU/GPU) where the memory is to be allocated */ void JacobiPreconditioner::Allocate_Memory(const LOCATION loc) { if (loc == LOCATION::GPU && Exists_gpu()==false) { cudaMalloc((void**)&gpu_d_inverse, Get_Diag_Length() * sizeof(DoubleComplex)); this->gpu_exists = GPU_EXISTENCE::EXISTENT; } else if (loc == LOCATION::CPU && Exists_cpu()==true) { cpu_d_inverse = new DoubleComplex[Get_Diag_Length()]; this->cpu_exists = CPU_EXISTENCE::EXISTENT; } } //! Deallocates resources of the preconditioner object /*! \param[in] loc enum type variable which indicates the location, where the resources are to be deallocated */ void JacobiPreconditioner::Deallocate_Memory(const LOCATION loc) { if (loc == LOCATION::GPU && Exists_gpu() == true) { cudaFree(gpu_d_inverse); this->gpu_exists = GPU_EXISTENCE::NON_EXISTENT; gpu_d_inverse = nullptr; } else if (loc == LOCATION::CPU && Exists_cpu() == true) { delete[] cpu_d_inverse; this->cpu_exists = CPU_EXISTENCE::NON_EXISTENT; cpu_d_inverse = nullptr; } } //! Destructor for JacobiPreconditioner class /*! It deallocates acquires resources, if any. */ JacobiPreconditioner:: ~JacobiPreconditioner() { if (Exists_gpu() == true) Deallocate_Memory(LOCATION::GPU); else if (Exists_cpu() == true) Deallocate_Memory(LOCATION::CPU); } //! Copies jacobi preconditioner object's internal arrays from CPU to GPU memory void JacobiPreconditioner::CopyPreconditioner_cpu_to_gpu() { assert(Exists_cpu() == true); if (Exists_gpu() == false) Allocate_Memory(LOCATION::GPU); cudaMemcpy(Get_GPU_d_inverse(), Get_CPU_d_inverse(), Get_Diag_Length() * sizeof(DoubleComplex), cudaMemcpyHostToDevice); } //! Copies jacobi preconditioner object's internal arrays from GPU to CPU memory void JacobiPreconditioner::CopyPreconditioner_gpu_to_cpu() { assert(Exists_gpu() == true); if (Exists_cpu() == false) Allocate_Memory(LOCATION::CPU); cudaMemcpy(Get_CPU_d_inverse(), Get_GPU_d_inverse(), Get_Diag_Length() * sizeof(DoubleComplex), cudaMemcpyDeviceToHost); } //! Generates a preconditioner based on the preconditioner type and sparse matrix received as input /*! \param[in] precond_type enum varaible which describes the type of the preconditioner \param[in] A reference to CSR matrix object \return a pointer to the generated preconditioner */ Preconditioner* Generate_Preconditioner(const PRECONDITIONER_TYPE precond_type, const CSR_Matrix& A) { Preconditioner* precond = nullptr; switch (precond_type) { case PRECONDITIONER_TYPE::JACOBI: precond = new JacobiPreconditioner(A); break; case PRECONDITIONER_TYPE::RICHARDSON: precond = new RichardsonPreconditioner(A); break; } return precond; } //! GPU kernel which performs the jacobi preconditioning operation /*! Each element of the result is handled using a different thread. All elements are computed in parallel. result = jacobiPreconditioner(d_inverse) * vector \param[in] N length of vector \param[in] d_inverse base address of array allocated on GPU which stores jacobi preconditioner values \param[in] vec the array allocated on GPU with which preconditioner is multiplied \param[out] result the base address of the array allocated on GPU where result of mutiplication of preconditioner and the vector */ __global__ void Jacobi_precond_matrix_vector_multiplication(const int N, const DoubleComplex* d_inverse, const DoubleComplex* vec, DoubleComplex* result) { int gid = blockDim.x * blockIdx.x + threadIdx.x; if (gid < N) { result[gid] = d_inverse[gid] * vec[gid]; } } //! Performs jacobi preconditioning operation using GPU kernel /*! \param[in] vec reference to a dense matrix object( a vector) \param[out] result formed by result = jacobi_preconditioner matrix(d_inverse) * vec Operates on internal GPU arrays. */ void JacobiPreconditioner::ApplyPreconditioner(const Dense_Matrix& vec, Dense_Matrix& result) const { assert(this->Exists_gpu() == true); assert(vec.ExistsGPU() == true); assert(result.ExistsGPU() == true); dim3 block(THREADS_PER_BLOCK); int N = Get_Diag_Length(); dim3 grid(ceil((double)N / (double)THREADS_PER_BLOCK)); Jacobi_precond_matrix_vector_multiplication << < grid, block >> > (N, Get_GPU_d_inverse(), vec.GetGPUValues(), result.GetGPUValues()); } //------------------------------------------------------------------------------------------------------------------------ /* Functions amd kernels for RichardsonPreconditioner class */ //! Parameterized constructor for Richardson Preconditioner class /*! Generates richardson preconditioner based on the input sparse matrix. The preconditioner values are not explicitly stored as it is an Identity matrix \param[in] A reference to CSR Matrix object */ RichardsonPreconditioner::RichardsonPreconditioner(const CSR_Matrix& A) : Preconditioner(PRECONDITIONER_TYPE::RICHARDSON), Identity_dim{ A.GetRows() } { } //! A GPU kernel which performs richardson preconditioning operation /*! Each element of the result is handled using a different thread. All elements are computed in parallel. \param[in] N length of vector \param[in] vec array allocated on GPU storing vector values \param[out] result array allocated on GPU where the result of the preconditioning operation is to stored */ __global__ void Richardson_Precond_applicn(const int N , const DoubleComplex* vec , DoubleComplex* result) { int gid = blockDim.x * blockIdx.x + threadIdx.x; if (gid < N) { result[gid] = vec[gid]; } } //! Performs richardson preconditioning operation using a GPU kernel /*! \param[in] vec reference to a dense matrix object(vector) \param[out] result reference to a dense matrix object which is going to store the result of preconditioning Operates on internal GPU arrays. */ void RichardsonPreconditioner::ApplyPreconditioner(const Dense_Matrix& vec, Dense_Matrix& result) const { assert(vec.ExistsGPU() == true); assert(result.ExistsGPU() == true); dim3 block(THREADS_PER_BLOCK); int N = Get_Identity_Dimension(); dim3 grid(ceil(static_cast<double>(N) / static_cast<double>(THREADS_PER_BLOCK))); Richardson_Precond_applicn <<< grid , block>>>(N,vec.GetGPUValues(),result.GetGPUValues()); }
50a39972f6d2a0425580e0c598b9ef145d828404.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "cudakernel.h" #include "triangle.cpp" #include "tetrahedron.cpp" //__device__ static const float divider[] = {1.0, 0.5, 0.25}; __host__ hipError_t drawTriangleOnGPU(Triangle triangle, float invVolum, Canvas canvas){ Point2d topleft; Point2d bottomright; topleft.x = min(min(triangle.a.x, triangle.b.x), triangle.c.x); topleft.y = min(min(triangle.a.y, triangle.b.y), triangle.c.y); bottomright.x = max(max(triangle.a.x, triangle.b.x), triangle.c.x); bottomright.y = max(max(triangle.a.y, triangle.b.y), triangle.c.y); Point2d vt = topleft - canvas.topleft; Point2d vb = bottomright - canvas.topleft; int xind = (int) floor(vt.x / canvas.dx); int yind = (int) floor(vt.y / canvas.dx); int xend = (int) ceil(vb.x / canvas.dx); int yend = (int) ceil(vb.y / canvas.dx); dim3 block_size; block_size.x = 4; block_size.y = 4; dim3 grid_size; grid_size.x = (int)ceil((float)(xend - xind) / (float)block_size.x); grid_size.y = (int)ceil((float)(yend - yind) / (float)block_size.y); hipLaunchKernelGGL(( renderTriangle), dim3(grid_size), dim3(block_size), 0, 0, triangle, invVolum, canvas, xind, yind); return hipDeviceSynchronize(); } hipError_t Canvas::copyDeviceDataToHost(int zind){ hipError_t err = hipSuccess; for(int i = 0; i < numRenderTypes; i++){ hipError_t err1 = hipMemcpy(hostCanvasData[i] + zind * imagesize * imagesize, *(getDeviceCanvas(i)), imagesize * imagesize * sizeof(float), hipMemcpyDeviceToHost); if(err1 != hipSuccess){ err = err1; } } return err; } hipError_t Canvas::copyHostDataToDevice(int zind){ hipError_t err = hipSuccess; for(int i = 0; i < numRenderTypes; i++){ hipError_t err1 = hipMemcpy(*(getDeviceCanvas(i)), hostCanvasData[i] + zind * imagesize * imagesize, imagesize * imagesize * sizeof(float),hipMemcpyHostToDevice); if(err1 != hipSuccess){ err = err1; } } return err; } //return -1 no //return 0 yes //return 1 on the edge //return 2 on the vertex __device__ int isInTriangle(Triangle & triangle, Point2d &p, float &u, float &v){ if(p.x > triangle.a.x && p.x > triangle.b.x && p.x > triangle.c.x) return -1; if(p.x < triangle.a.x && p.x < triangle.b.x && p.x < triangle.c.x) return -1; if(p.y > triangle.a.y && p.y > triangle.b.y && p.y > triangle.c.y) return -1; if(p.y < triangle.a.y && p.y < triangle.b.y && p.y < triangle.c.y) return -1; if(p.x == triangle.a.x && p.y == triangle.a.y) return 2; if(p.x == triangle.b.x && p.y == triangle.b.y) return 2; if(p.x == triangle.c.x && p.y == triangle.c.y) return 2; // Compute vectors Point2d v0 = triangle.c - triangle.a; Point2d v1 = triangle.b - triangle.a; Point2d v2 = p - triangle.a; // Compute dot products float dot00 = v0.dot(v0); float dot01 = v0.dot(v1); float dot02 = v0.dot(v2); float dot11 = v1.dot(v1); float dot12 = v1.dot(v2); // Compute barycentric coordinates float volume = (dot00 * dot11 - dot01 * dot01); if(volume == 0) return 2; //excluded by the vetex test //float invDenom = 1.0 / volume; u = (dot11 * dot02 - dot01 * dot12) / volume; v = (dot00 * dot12 - dot01 * dot02) / volume; // Check if point is in triangle if( (u > 0) && (v > 0) && (u + v < 1)){ return 0; }else if((u == 0) || (v == 0) || (u + v) == 1){ return 1; }else{ return -1; } } __global__ void renderTriangle(Triangle triangle, float invVolum, Canvas canvas, int xind, int yind){ const float divider[] = {1.0, 0.5, 0.25}; int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; int xxi = idx + xind; int yyi = idy + yind; if(xxi < 0 || xxi >= canvas.imagesize || yyi <0 || yyi >= canvas.imagesize){ return; } int ind = xxi + yyi * canvas.imagesize; Point2d p; p.x = xxi * canvas.dx + canvas.topleft.x; p.y = yyi * canvas.dx + canvas.topleft.y; float u, v; int inTriangle = isInTriangle(triangle, p, u, v); if(inTriangle != -1){ Point velocity = triangle.val1 * (1.0 - u - v) + triangle.val2 * u + triangle.val3 * v; float values[] = {invVolum, 1.0, velocity.x, velocity.y, velocity.z}; for(int i = 0; i < canvas.numRenderTypes; i++){ *(*(canvas.getDeviceCanvas(i)) + ind) += values[canvas.renderTypes(i)] * divider[inTriangle]; //*(*(canvas.getDeviceCanvas(i)) + ind) = idx + idy + xind + yind; } } }
50a39972f6d2a0425580e0c598b9ef145d828404.cu
#include <stdio.h> #include <cuda.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cudakernel.h" #include "triangle.cpp" #include "tetrahedron.cpp" //__device__ static const float divider[] = {1.0, 0.5, 0.25}; __host__ cudaError_t drawTriangleOnGPU(Triangle triangle, float invVolum, Canvas canvas){ Point2d topleft; Point2d bottomright; topleft.x = min(min(triangle.a.x, triangle.b.x), triangle.c.x); topleft.y = min(min(triangle.a.y, triangle.b.y), triangle.c.y); bottomright.x = max(max(triangle.a.x, triangle.b.x), triangle.c.x); bottomright.y = max(max(triangle.a.y, triangle.b.y), triangle.c.y); Point2d vt = topleft - canvas.topleft; Point2d vb = bottomright - canvas.topleft; int xind = (int) floor(vt.x / canvas.dx); int yind = (int) floor(vt.y / canvas.dx); int xend = (int) ceil(vb.x / canvas.dx); int yend = (int) ceil(vb.y / canvas.dx); dim3 block_size; block_size.x = 4; block_size.y = 4; dim3 grid_size; grid_size.x = (int)ceil((float)(xend - xind) / (float)block_size.x); grid_size.y = (int)ceil((float)(yend - yind) / (float)block_size.y); renderTriangle<<<grid_size, block_size>>>(triangle, invVolum, canvas, xind, yind); return cudaThreadSynchronize(); } cudaError_t Canvas::copyDeviceDataToHost(int zind){ cudaError_t err = cudaSuccess; for(int i = 0; i < numRenderTypes; i++){ cudaError_t err1 = cudaMemcpy(hostCanvasData[i] + zind * imagesize * imagesize, *(getDeviceCanvas(i)), imagesize * imagesize * sizeof(float), cudaMemcpyDeviceToHost); if(err1 != cudaSuccess){ err = err1; } } return err; } cudaError_t Canvas::copyHostDataToDevice(int zind){ cudaError_t err = cudaSuccess; for(int i = 0; i < numRenderTypes; i++){ cudaError_t err1 = cudaMemcpy(*(getDeviceCanvas(i)), hostCanvasData[i] + zind * imagesize * imagesize, imagesize * imagesize * sizeof(float),cudaMemcpyHostToDevice); if(err1 != cudaSuccess){ err = err1; } } return err; } //return -1 no //return 0 yes //return 1 on the edge //return 2 on the vertex __device__ int isInTriangle(Triangle & triangle, Point2d &p, float &u, float &v){ if(p.x > triangle.a.x && p.x > triangle.b.x && p.x > triangle.c.x) return -1; if(p.x < triangle.a.x && p.x < triangle.b.x && p.x < triangle.c.x) return -1; if(p.y > triangle.a.y && p.y > triangle.b.y && p.y > triangle.c.y) return -1; if(p.y < triangle.a.y && p.y < triangle.b.y && p.y < triangle.c.y) return -1; if(p.x == triangle.a.x && p.y == triangle.a.y) return 2; if(p.x == triangle.b.x && p.y == triangle.b.y) return 2; if(p.x == triangle.c.x && p.y == triangle.c.y) return 2; // Compute vectors Point2d v0 = triangle.c - triangle.a; Point2d v1 = triangle.b - triangle.a; Point2d v2 = p - triangle.a; // Compute dot products float dot00 = v0.dot(v0); float dot01 = v0.dot(v1); float dot02 = v0.dot(v2); float dot11 = v1.dot(v1); float dot12 = v1.dot(v2); // Compute barycentric coordinates float volume = (dot00 * dot11 - dot01 * dot01); if(volume == 0) return 2; //excluded by the vetex test //float invDenom = 1.0 / volume; u = (dot11 * dot02 - dot01 * dot12) / volume; v = (dot00 * dot12 - dot01 * dot02) / volume; // Check if point is in triangle if( (u > 0) && (v > 0) && (u + v < 1)){ return 0; }else if((u == 0) || (v == 0) || (u + v) == 1){ return 1; }else{ return -1; } } __global__ void renderTriangle(Triangle triangle, float invVolum, Canvas canvas, int xind, int yind){ const float divider[] = {1.0, 0.5, 0.25}; int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; int xxi = idx + xind; int yyi = idy + yind; if(xxi < 0 || xxi >= canvas.imagesize || yyi <0 || yyi >= canvas.imagesize){ return; } int ind = xxi + yyi * canvas.imagesize; Point2d p; p.x = xxi * canvas.dx + canvas.topleft.x; p.y = yyi * canvas.dx + canvas.topleft.y; float u, v; int inTriangle = isInTriangle(triangle, p, u, v); if(inTriangle != -1){ Point velocity = triangle.val1 * (1.0 - u - v) + triangle.val2 * u + triangle.val3 * v; float values[] = {invVolum, 1.0, velocity.x, velocity.y, velocity.z}; for(int i = 0; i < canvas.numRenderTypes; i++){ *(*(canvas.getDeviceCanvas(i)) + ind) += values[canvas.renderTypes(i)] * divider[inTriangle]; //*(*(canvas.getDeviceCanvas(i)) + ind) = idx + idy + xind + yind; } } }
d52ece9fbc0bbd71c4b9b90ee283f359f01e9fb7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cmath> #include "sortcu.h" using namespace std; #define THREADS_PER_BLOCK 8 #include <stdlib.h> #include <math.h> #include "sortcu.h" #include <stdio.h> __global__ void Egpu_merge(int *ndata) { //invert elements of second sublist //printf("%d\n", blockDim.x); int swap_pos; //int temp; //int nstep=0; int shift=0; int scale=*ndata; for(int i=0;i<*ndata;i++) { scale=(scale+1)/2; for (int j=0;j<=(*ndata/2)/blockDim.x;j++){ if(threadIdx.x>=scale) shift=(threadIdx.x/scale)*scale+(j*blockDim.x); swap_pos=threadIdx.x+scale+shift; // printf(" MERGE at %d:%d %d %d \n",i,j,threadIdx.x+shift,swap_pos); } //need to have loops of threads till //if(data[threadIdx.x+shift]>=data[swap_pos]){ //printf("swap done: %d %d\n",data[threadIdx.x+shift],data[swap_pos]); //temp=data[threadIdx.x+shift]; //data[threadIdx.x+shift]=data[swap_pos]; //data[swap_pos]=temp; //} __syncthreads(); }//for } __global__ void gpu_sort_inner(int *data,int iter2, int iter, int ndata) { int k=(-1*((2*threadIdx.x)/(iter))); int shift=0; int scale=iter; int swap_pos; for(int i=0;i<iter+1;i++) { scale=(scale+1)/2; if(threadIdx.x>=scale) shift=(threadIdx.x/scale)*scale; swap_pos=threadIdx.x+scale+shift; if (k<0){ //printf("SWAP at %d>%d: %d %d UP k: %d \n",iter,i,threadIdx.x+shift,swap_pos, k); if(data[threadIdx.x+shift]<data[swap_pos]){ //printf("swap done: %d %d\n",data[threadIdx.x+shift],data[swap_pos]); /*temp=data[threadIdx.x+shift]; data[threadIdx.x+shift]=data[swap_pos]; data[swap_pos]=temp;*/ } }else{ //printf("SWAP at %d>%d: %d %d DOWN k: %d \n",iter,i,threadIdx.x+shift,swap_pos,k); if(data[threadIdx.x+shift]>data[swap_pos]){ //printf("swap done: %d %d\n",data[threadIdx.x+shift],data[swap_pos]); /*temp=data[threadIdx.x+shift]; data[threadIdx.x+shift]=data[swap_pos]; data[swap_pos]=temp;*/ } } //__syncthreads(); if(scale<2) break; }//for //bitonic merge: NOT REQUIRED /*int shift=0; int scale=*ndata; for(int i=0;i<nstep;i++) { scale=(scale+1)/2; if(threadIdx.x>=scale) shift=(threadIdx.x/scale)*scale; swap_pos=threadIdx.x+scale+shift; printf("SWAP at %d: %d %d \n",i,threadIdx.x+shift,swap_pos); if(data[threadIdx.x+shift]>=data[swap_pos]){ //printf("swap done: %d %d\n",data[threadIdx.x+shift],data[swap_pos]); temp=data[threadIdx.x+shift]; data[threadIdx.x+shift]=data[swap_pos]; data[swap_pos]=temp; } __syncthreads(); }//for //}*/ int index= threadIdx.x + blockDim.x * blockIdx.x; int direction = index^iter2; if ((direction)>index) { /* UP ARROW*/ if ((index&iter)==0) { if (data[index]>data[direction]) { int temp; temp=data[index]; data[index]=data[direction]; data[direction]=temp; } } /*DOWN ARROW*/ if ((index&iter)!=0) { if (data[index]<data[direction]) { int temp; temp=data[index]; data[index]=data[direction]; data[direction]=temp; } } } __syncthreads(); } __global__ void gpu_sort(int *data, int ndata) { //cout<<"GPU_SORT called\n"; int num_thread=512; int num_blocks=ndata/512; int swap_pos; //int temp; //int nstep=0; int k=-1; for (int iter=2;iter<=ndata;iter=iter*2){ for (int iter2=iter/2;iter2>0;iter2=iter2/2){ hipLaunchKernelGGL(( gpu_sort_inner), dim3(num_blocks), dim3(num_thread), 0, 0, data,iter2, iter, ndata); } } } void sort(int *data, int ndata){ int *d_data; //int *d_ndata; hipMalloc((void **)&d_data, sizeof(int)*ndata); //hipMalloc((void **)&d_ndata, sizeof(int)); hipMemcpy(d_data, data, sizeof(int)*ndata, hipMemcpyHostToDevice); //hipMemcpy(d_ndata, &ndata, sizeof(int), hipMemcpyHostToDevice); //call gpu_sort with input size of power 2 serialy: hipLaunchKernelGGL(( gpu_sort), dim3(1),dim3(1), 0, 0, d_data,ndata); //call gpu_sort //Egpu_merge<<<2,(ndata+1)/4>>>(d_ndata); hipMemcpy(data, d_data, sizeof(int)*ndata, hipMemcpyDeviceToHost); //for (int i=0; i<ndata; i++) //cout<<"%d\n", data[i]; }
d52ece9fbc0bbd71c4b9b90ee283f359f01e9fb7.cu
#include <iostream> #include <cmath> #include "sortcu.h" using namespace std; #define THREADS_PER_BLOCK 8 #include <stdlib.h> #include <math.h> #include "sortcu.h" #include <stdio.h> __global__ void Egpu_merge(int *ndata) { //invert elements of second sublist //printf("%d\n", blockDim.x); int swap_pos; //int temp; //int nstep=0; int shift=0; int scale=*ndata; for(int i=0;i<*ndata;i++) { scale=(scale+1)/2; for (int j=0;j<=(*ndata/2)/blockDim.x;j++){ if(threadIdx.x>=scale) shift=(threadIdx.x/scale)*scale+(j*blockDim.x); swap_pos=threadIdx.x+scale+shift; // printf(" MERGE at %d:%d %d %d \n",i,j,threadIdx.x+shift,swap_pos); } //need to have loops of threads till //if(data[threadIdx.x+shift]>=data[swap_pos]){ //printf("swap done: %d %d\n",data[threadIdx.x+shift],data[swap_pos]); //temp=data[threadIdx.x+shift]; //data[threadIdx.x+shift]=data[swap_pos]; //data[swap_pos]=temp; //} __syncthreads(); }//for } __global__ void gpu_sort_inner(int *data,int iter2, int iter, int ndata) { int k=(-1*((2*threadIdx.x)/(iter))); int shift=0; int scale=iter; int swap_pos; for(int i=0;i<iter+1;i++) { scale=(scale+1)/2; if(threadIdx.x>=scale) shift=(threadIdx.x/scale)*scale; swap_pos=threadIdx.x+scale+shift; if (k<0){ //printf("SWAP at %d>%d: %d %d UP k: %d \n",iter,i,threadIdx.x+shift,swap_pos, k); if(data[threadIdx.x+shift]<data[swap_pos]){ //printf("swap done: %d %d\n",data[threadIdx.x+shift],data[swap_pos]); /*temp=data[threadIdx.x+shift]; data[threadIdx.x+shift]=data[swap_pos]; data[swap_pos]=temp;*/ } }else{ //printf("SWAP at %d>%d: %d %d DOWN k: %d \n",iter,i,threadIdx.x+shift,swap_pos,k); if(data[threadIdx.x+shift]>data[swap_pos]){ //printf("swap done: %d %d\n",data[threadIdx.x+shift],data[swap_pos]); /*temp=data[threadIdx.x+shift]; data[threadIdx.x+shift]=data[swap_pos]; data[swap_pos]=temp;*/ } } //__syncthreads(); if(scale<2) break; }//for //bitonic merge: NOT REQUIRED /*int shift=0; int scale=*ndata; for(int i=0;i<nstep;i++) { scale=(scale+1)/2; if(threadIdx.x>=scale) shift=(threadIdx.x/scale)*scale; swap_pos=threadIdx.x+scale+shift; printf("SWAP at %d: %d %d \n",i,threadIdx.x+shift,swap_pos); if(data[threadIdx.x+shift]>=data[swap_pos]){ //printf("swap done: %d %d\n",data[threadIdx.x+shift],data[swap_pos]); temp=data[threadIdx.x+shift]; data[threadIdx.x+shift]=data[swap_pos]; data[swap_pos]=temp; } __syncthreads(); }//for //}*/ int index= threadIdx.x + blockDim.x * blockIdx.x; int direction = index^iter2; if ((direction)>index) { /* UP ARROW*/ if ((index&iter)==0) { if (data[index]>data[direction]) { int temp; temp=data[index]; data[index]=data[direction]; data[direction]=temp; } } /*DOWN ARROW*/ if ((index&iter)!=0) { if (data[index]<data[direction]) { int temp; temp=data[index]; data[index]=data[direction]; data[direction]=temp; } } } __syncthreads(); } __global__ void gpu_sort(int *data, int ndata) { //cout<<"GPU_SORT called\n"; int num_thread=512; int num_blocks=ndata/512; int swap_pos; //int temp; //int nstep=0; int k=-1; for (int iter=2;iter<=ndata;iter=iter*2){ for (int iter2=iter/2;iter2>0;iter2=iter2/2){ gpu_sort_inner<<<num_blocks, num_thread>>>(data,iter2, iter, ndata); } } } void sort(int *data, int ndata){ int *d_data; //int *d_ndata; cudaMalloc((void **)&d_data, sizeof(int)*ndata); //cudaMalloc((void **)&d_ndata, sizeof(int)); cudaMemcpy(d_data, data, sizeof(int)*ndata, cudaMemcpyHostToDevice); //cudaMemcpy(d_ndata, &ndata, sizeof(int), cudaMemcpyHostToDevice); //call gpu_sort with input size of power 2 serialy: gpu_sort<<<1,1>>>(d_data,ndata); //call gpu_sort //Egpu_merge<<<2,(ndata+1)/4>>>(d_ndata); cudaMemcpy(data, d_data, sizeof(int)*ndata, cudaMemcpyDeviceToHost); //for (int i=0; i<ndata; i++) //cout<<"%d\n", data[i]; }
0164cb3cd56f855a3474674a4df19c0f43af0cf8.hip
// !!! This is a file automatically generated by hipify!!! // includes, cuda #include <cstdint> #include <climits> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <cudaDefs.h> #include <imageManager.h> #include "arrayUtils.cuh" #define BENCHMARK_NUM_REPS 100 // number of repetitions for benchmarking #define TPB_1D 32 // ThreadsPerBlock in one dimension #define TPB_2D 1024 // ThreadsPerBlock = 16*16 (2D block) #define TPB_REDUCTION 512 // ThreadsPerBlock (1D block) hipError_t error = hipSuccess; hipDeviceProp_t deviceProp = hipDeviceProp_t(); using DT = uint8_t; // Working data type struct alignas(8) ResultType { float fitness; uint32_t idx; ResultType& operator=(ResultType&&) = default; //Forcing a move assignment operator to be generated by the compiler __host__ __device__ volatile ResultType& operator=(volatile const ResultType& other) volatile { fitness = other.fitness; idx = other.idx; return *this; } }; struct Image { uint32_t width = 0; uint32_t height = 0; uint32_t pitch = 0; DT* ptr = nullptr; }; void prepareData(const char* imageFileName, Image& img) { FIBITMAP* tmpA = ImageManager::GenericLoader(imageFileName, 0); img.width = FreeImage_GetWidth(tmpA); img.height = FreeImage_GetHeight(tmpA); img.pitch = FreeImage_GetPitch(tmpA); // FREEIMAGE align row data ... You have to use pitch instead of width //Create a memory block using UNIFIED MEMORY to store original image. This is a redundant copy, however the data will be ready to use directly by GPU. uint8_t* tmpB = nullptr; size_t imageSize = static_cast<size_t>(img.pitch * img.height * FreeImage_GetBPP(tmpA)) >> 3; checkCudaErrors(hipMallocManaged(&tmpB, imageSize)); checkCudaErrors(hipMemcpy(tmpB, FreeImage_GetBits(tmpA), imageSize, hipMemcpyHostToDevice)); //checkHostMatrix(tmpB, img.pitch, img.height, img.width, "%d ", "Reference"); FreeImage_Unload(tmpA); //Create a memory block using UNIFIED MEMORY to store DT data and convert tmpB -> img.ptr checkCudaErrors(hipMallocManaged(&img.ptr, img.width * img.height * sizeof(DT))); dim3 block{ 256,1,1 }; dim3 grid{ getNumberOfParts(img.width * img.height, 256), 1, 1 }; hipLaunchKernelGGL(( arrayReshape<uint8_t, DT>) , dim3(grid), block>> > (tmpB, img.width, img.height, img.pitch, img.width, img.height, img.width*sizeof(DT), img.ptr); //From now, we have a new pitch of the final data. img.pitch = img.width * sizeof(DT); //Some synchronization must be called when using UNIFIED MEMORY in async. Previous kernel was called asynchronously!!! hipDeviceSynchronize(); //checkHostMatrix(img.ptr, img.width * sizeof(DT), img.height, img.width, "%0.2f ", "Reference"); } //Every THREAD of 2D block [16x16] computes one final fitness value for a single pixel of the reference image. One corner of the query image is "virtually" attached to this pixel position. //A SINGLE THREAD compares the query image with the given region of the reference image. __global__ void find(const DT* __restrict__ ref, const uint32_t rWidth, const uint32_t rHeight, const DT* __restrict__ query, const uint32_t qWidth, const uint32_t qHeight, ResultType* __restrict__ blockResults) { uint32_t tid = threadIdx.x + threadIdx.y * blockDim.x; uint32_t rx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t ry = blockIdx.y * blockDim.y + threadIdx.y; const uint32_t rxOffset = gridDim.x * blockDim.x; const uint32_t ryOffset = gridDim.y * blockDim.y; uint32_t qx, qy; const DT* r = nullptr; const DT* q = nullptr; __shared__ ResultType sData[TPB_2D]; ResultType tmp; sData[tid] = { FLT_MAX, ry * rWidth + rx }; while (ry <= rHeight-qHeight) { rx = blockIdx.x * blockDim.x + threadIdx.x; while (rx <= rWidth - qWidth) //It is supposed that we want to compare the whole pattern. It means that the query image must be completely inside the reference one. { tmp = {0.0f, ry * rWidth + rx }; r = &ref[ry * rWidth + rx]; //Pointer to starting ROW position in the reference image. q = &query[0]; //Pointer to starting ROW position in the query image. for (qy=0; qy < qHeight; qy++) //Each thread will process the whole query image { for (qx = 0; qx < qWidth; qx++) //Each thread will process the whole query image { tmp.fitness += (r[qx] - q[qx]) * (r[qx] - q[qx]); //Cummulate the value } r += rWidth; //Move one row down in the reference image. q += qWidth; //Move one row down in the query image. } if (tmp.fitness < sData[tid].fitness) { sData[tid] = tmp; } rx+= rxOffset; //Move to another pixel that will be the starting position for the comparison. } //Move down ry+= ryOffset; } __syncthreads(); //The parallel reduction will start here, all WARPS has to finish previous instructions. for (uint32_t s = (TPB_2D >> 1); s > 32; s >>= 1) //This can be UNROLLED when the TPB is fixed for the application { if (tid < s) { if (sData[tid + s].fitness < sData[tid].fitness) { sData[tid] = sData[tid + s]; } } __syncthreads(); } if (tid < 32) //Only one warm is active here, no sync is needed. { volatile ResultType* vsData = sData; vsData[tid] = (vsData[tid].fitness < vsData[tid + 32].fitness) ? vsData[tid] : vsData[tid + 32]; vsData[tid] = (vsData[tid].fitness < vsData[tid + 16].fitness) ? vsData[tid] : vsData[tid + 16]; vsData[tid] = (vsData[tid].fitness < vsData[tid + 8].fitness) ? vsData[tid] : vsData[tid + 8]; vsData[tid] = (vsData[tid].fitness < vsData[tid + 4].fitness) ? vsData[tid] : vsData[tid + 4]; vsData[tid] = (vsData[tid].fitness < vsData[tid + 2].fitness) ? vsData[tid] : vsData[tid + 2]; vsData[tid] = (vsData[tid].fitness < vsData[tid + 1].fitness) ? vsData[tid] : vsData[tid + 1]; } if (tid == 0) //0-th thread stores the final BEST result for a given block { blockResults[blockIdx.x] = sData[0]; } } //One 1D block reduction __global__ void getBest(ResultType* data, const uint32_t length) { __shared__ ResultType sData[TPB_REDUCTION]; uint32_t tid = threadIdx.x; const uint32_t offset = blockDim.x; sData[tid] = { FLT_MAX , tid }; //Initial fill of the shared memory if (tid < length) { sData[tid] = data[tid]; } uint32_t nextId = tid + offset; ResultType* ptr = &data[nextId]; //Pointer to global mem; while (nextId < length) //Compare rest of data from the global memory { if (ptr->fitness < sData[tid].fitness) { sData[tid] = *ptr; } ptr += offset; nextId += offset; } __syncthreads(); //Start reduction from now for (uint32_t s = (TPB_REDUCTION >> 1); s > 32; s >>= 1) //This can be UNROLLED when the TPB is fixed for the application { if (tid < s) { if (sData[tid + s].fitness < sData[tid].fitness) { sData[tid] = sData[tid + s]; } } __syncthreads(); } if (tid < 32) //Only one warp is active here, no sync is needed. { volatile ResultType* vsData = sData; vsData[tid] = (vsData[tid].fitness < vsData[tid + 32].fitness) ? vsData[tid] : vsData[tid + 32]; vsData[tid] = (vsData[tid].fitness < vsData[tid + 16].fitness) ? vsData[tid] : vsData[tid + 16]; vsData[tid] = (vsData[tid].fitness < vsData[tid + 8].fitness) ? vsData[tid] : vsData[tid + 8]; vsData[tid] = (vsData[tid].fitness < vsData[tid + 4].fitness) ? vsData[tid] : vsData[tid + 4]; vsData[tid] = (vsData[tid].fitness < vsData[tid + 2].fitness) ? vsData[tid] : vsData[tid + 2]; vsData[tid] = (vsData[tid].fitness < vsData[tid + 1].fitness) ? vsData[tid] : vsData[tid + 1]; } if (tid == 0) //The zero thread saves the result into Global mem { data[0] = sData[0]; } } int main(int argc, char* argv[]) { initializeCUDA(deviceProp); Image ref; Image query; FreeImage_Initialise(); prepareData("./Data/reference.tif", ref); prepareData("./Data/query.tif", query); FreeImage_DeInitialise(); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); //How many block of the size of [16x16] will process the reference image? //Too much to manage. That's we use a 1D grid of [16x16] blocks that will move down the image. //This we need (((ref.width - query.width + 1) + 16 - 1)/16) blocks!!! uint32_t noBlocks = ((ref.width - query.width + 1) + TPB_1D - 1) / TPB_1D; ResultType* blockResults = nullptr; size_t blockResultsSize = static_cast<size_t>(noBlocks *sizeof(ResultType)); checkCudaErrors(hipMallocManaged(&blockResults, blockResultsSize)); checkCudaErrors(hipEventRecord(start, 0)); //1. Try to compute all possible matches. dim3 block{ TPB_1D , TPB_1D ,1 }; dim3 grid{ noBlocks, 1, 1 }; find, grid, block, 0, ref.ptr, ref.width, ref.height, query.ptr, query.width, query.height, blockResults); //2. Search for the best match block = { TPB_REDUCTION ,1,1 }; grid = { 1, 1, 1 }; hipLaunchKernelGGL(( getBest), dim3(grid), dim3(block), 0, 0, blockResults, noBlocks); hipDeviceSynchronize(); checkCudaErrors(hipEventRecord(stop, 0)); checkCudaErrors(hipEventSynchronize(stop)); float elapsedTime; checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); hipEventDestroy(start); hipEventDestroy(stop); printf("Best fitness value: %f\n", blockResults[0].fitness); printf("Winner index: %u\n", blockResults[0].idx); printf("Winner's LEFT-TOP CORNER X: %u\n", blockResults[0].idx % ref.width); printf("Winner's LEFT-TOP CORNER Y: %u\n", ref.height - (blockResults[0].idx / ref.width) - query.height); printf("Computation time: %f ms\n", elapsedTime); if (ref.ptr) hipFree(ref.ptr); if (query.ptr) hipFree(query.ptr); if (blockResults) hipFree(blockResults); }
0164cb3cd56f855a3474674a4df19c0f43af0cf8.cu
// includes, cuda #include <cstdint> #include <climits> #include <cuda_runtime.h> #include <helper_cuda.h> #include <cudaDefs.h> #include <imageManager.h> #include "arrayUtils.cuh" #define BENCHMARK_NUM_REPS 100 // number of repetitions for benchmarking #define TPB_1D 32 // ThreadsPerBlock in one dimension #define TPB_2D 1024 // ThreadsPerBlock = 16*16 (2D block) #define TPB_REDUCTION 512 // ThreadsPerBlock (1D block) cudaError_t error = cudaSuccess; cudaDeviceProp deviceProp = cudaDeviceProp(); using DT = uint8_t; // Working data type struct alignas(8) ResultType { float fitness; uint32_t idx; ResultType& operator=(ResultType&&) = default; //Forcing a move assignment operator to be generated by the compiler __host__ __device__ volatile ResultType& operator=(volatile const ResultType& other) volatile { fitness = other.fitness; idx = other.idx; return *this; } }; struct Image { uint32_t width = 0; uint32_t height = 0; uint32_t pitch = 0; DT* ptr = nullptr; }; void prepareData(const char* imageFileName, Image& img) { FIBITMAP* tmpA = ImageManager::GenericLoader(imageFileName, 0); img.width = FreeImage_GetWidth(tmpA); img.height = FreeImage_GetHeight(tmpA); img.pitch = FreeImage_GetPitch(tmpA); // FREEIMAGE align row data ... You have to use pitch instead of width //Create a memory block using UNIFIED MEMORY to store original image. This is a redundant copy, however the data will be ready to use directly by GPU. uint8_t* tmpB = nullptr; size_t imageSize = static_cast<size_t>(img.pitch * img.height * FreeImage_GetBPP(tmpA)) >> 3; checkCudaErrors(cudaMallocManaged(&tmpB, imageSize)); checkCudaErrors(cudaMemcpy(tmpB, FreeImage_GetBits(tmpA), imageSize, cudaMemcpyHostToDevice)); //checkHostMatrix(tmpB, img.pitch, img.height, img.width, "%d ", "Reference"); FreeImage_Unload(tmpA); //Create a memory block using UNIFIED MEMORY to store DT data and convert tmpB -> img.ptr checkCudaErrors(cudaMallocManaged(&img.ptr, img.width * img.height * sizeof(DT))); dim3 block{ 256,1,1 }; dim3 grid{ getNumberOfParts(img.width * img.height, 256), 1, 1 }; arrayReshape<uint8_t, DT> <<<grid, block>> > (tmpB, img.width, img.height, img.pitch, img.width, img.height, img.width*sizeof(DT), img.ptr); //From now, we have a new pitch of the final data. img.pitch = img.width * sizeof(DT); //Some synchronization must be called when using UNIFIED MEMORY in async. Previous kernel was called asynchronously!!! cudaDeviceSynchronize(); //checkHostMatrix(img.ptr, img.width * sizeof(DT), img.height, img.width, "%0.2f ", "Reference"); } //Every THREAD of 2D block [16x16] computes one final fitness value for a single pixel of the reference image. One corner of the query image is "virtually" attached to this pixel position. //A SINGLE THREAD compares the query image with the given region of the reference image. __global__ void find(const DT* __restrict__ ref, const uint32_t rWidth, const uint32_t rHeight, const DT* __restrict__ query, const uint32_t qWidth, const uint32_t qHeight, ResultType* __restrict__ blockResults) { uint32_t tid = threadIdx.x + threadIdx.y * blockDim.x; uint32_t rx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t ry = blockIdx.y * blockDim.y + threadIdx.y; const uint32_t rxOffset = gridDim.x * blockDim.x; const uint32_t ryOffset = gridDim.y * blockDim.y; uint32_t qx, qy; const DT* r = nullptr; const DT* q = nullptr; __shared__ ResultType sData[TPB_2D]; ResultType tmp; sData[tid] = { FLT_MAX, ry * rWidth + rx }; while (ry <= rHeight-qHeight) { rx = blockIdx.x * blockDim.x + threadIdx.x; while (rx <= rWidth - qWidth) //It is supposed that we want to compare the whole pattern. It means that the query image must be completely inside the reference one. { tmp = {0.0f, ry * rWidth + rx }; r = &ref[ry * rWidth + rx]; //Pointer to starting ROW position in the reference image. q = &query[0]; //Pointer to starting ROW position in the query image. for (qy=0; qy < qHeight; qy++) //Each thread will process the whole query image { for (qx = 0; qx < qWidth; qx++) //Each thread will process the whole query image { tmp.fitness += (r[qx] - q[qx]) * (r[qx] - q[qx]); //Cummulate the value } r += rWidth; //Move one row down in the reference image. q += qWidth; //Move one row down in the query image. } if (tmp.fitness < sData[tid].fitness) { sData[tid] = tmp; } rx+= rxOffset; //Move to another pixel that will be the starting position for the comparison. } //Move down ry+= ryOffset; } __syncthreads(); //The parallel reduction will start here, all WARPS has to finish previous instructions. for (uint32_t s = (TPB_2D >> 1); s > 32; s >>= 1) //This can be UNROLLED when the TPB is fixed for the application { if (tid < s) { if (sData[tid + s].fitness < sData[tid].fitness) { sData[tid] = sData[tid + s]; } } __syncthreads(); } if (tid < 32) //Only one warm is active here, no sync is needed. { volatile ResultType* vsData = sData; vsData[tid] = (vsData[tid].fitness < vsData[tid + 32].fitness) ? vsData[tid] : vsData[tid + 32]; vsData[tid] = (vsData[tid].fitness < vsData[tid + 16].fitness) ? vsData[tid] : vsData[tid + 16]; vsData[tid] = (vsData[tid].fitness < vsData[tid + 8].fitness) ? vsData[tid] : vsData[tid + 8]; vsData[tid] = (vsData[tid].fitness < vsData[tid + 4].fitness) ? vsData[tid] : vsData[tid + 4]; vsData[tid] = (vsData[tid].fitness < vsData[tid + 2].fitness) ? vsData[tid] : vsData[tid + 2]; vsData[tid] = (vsData[tid].fitness < vsData[tid + 1].fitness) ? vsData[tid] : vsData[tid + 1]; } if (tid == 0) //0-th thread stores the final BEST result for a given block { blockResults[blockIdx.x] = sData[0]; } } //One 1D block reduction __global__ void getBest(ResultType* data, const uint32_t length) { __shared__ ResultType sData[TPB_REDUCTION]; uint32_t tid = threadIdx.x; const uint32_t offset = blockDim.x; sData[tid] = { FLT_MAX , tid }; //Initial fill of the shared memory if (tid < length) { sData[tid] = data[tid]; } uint32_t nextId = tid + offset; ResultType* ptr = &data[nextId]; //Pointer to global mem; while (nextId < length) //Compare rest of data from the global memory { if (ptr->fitness < sData[tid].fitness) { sData[tid] = *ptr; } ptr += offset; nextId += offset; } __syncthreads(); //Start reduction from now for (uint32_t s = (TPB_REDUCTION >> 1); s > 32; s >>= 1) //This can be UNROLLED when the TPB is fixed for the application { if (tid < s) { if (sData[tid + s].fitness < sData[tid].fitness) { sData[tid] = sData[tid + s]; } } __syncthreads(); } if (tid < 32) //Only one warp is active here, no sync is needed. { volatile ResultType* vsData = sData; vsData[tid] = (vsData[tid].fitness < vsData[tid + 32].fitness) ? vsData[tid] : vsData[tid + 32]; vsData[tid] = (vsData[tid].fitness < vsData[tid + 16].fitness) ? vsData[tid] : vsData[tid + 16]; vsData[tid] = (vsData[tid].fitness < vsData[tid + 8].fitness) ? vsData[tid] : vsData[tid + 8]; vsData[tid] = (vsData[tid].fitness < vsData[tid + 4].fitness) ? vsData[tid] : vsData[tid + 4]; vsData[tid] = (vsData[tid].fitness < vsData[tid + 2].fitness) ? vsData[tid] : vsData[tid + 2]; vsData[tid] = (vsData[tid].fitness < vsData[tid + 1].fitness) ? vsData[tid] : vsData[tid + 1]; } if (tid == 0) //The zero thread saves the result into Global mem { data[0] = sData[0]; } } int main(int argc, char* argv[]) { initializeCUDA(deviceProp); Image ref; Image query; FreeImage_Initialise(); prepareData("./Data/reference.tif", ref); prepareData("./Data/query.tif", query); FreeImage_DeInitialise(); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //How many block of the size of [16x16] will process the reference image? //Too much to manage. That's we use a 1D grid of [16x16] blocks that will move down the image. //This we need (((ref.width - query.width + 1) + 16 - 1)/16) blocks!!! uint32_t noBlocks = ((ref.width - query.width + 1) + TPB_1D - 1) / TPB_1D; ResultType* blockResults = nullptr; size_t blockResultsSize = static_cast<size_t>(noBlocks *sizeof(ResultType)); checkCudaErrors(cudaMallocManaged(&blockResults, blockResultsSize)); checkCudaErrors(cudaEventRecord(start, 0)); //1. Try to compute all possible matches. dim3 block{ TPB_1D , TPB_1D ,1 }; dim3 grid{ noBlocks, 1, 1 }; find<<<grid, block>>>(ref.ptr, ref.width, ref.height, query.ptr, query.width, query.height, blockResults); //2. Search for the best match block = { TPB_REDUCTION ,1,1 }; grid = { 1, 1, 1 }; getBest<<<grid, block>>>(blockResults, noBlocks); cudaDeviceSynchronize(); checkCudaErrors(cudaEventRecord(stop, 0)); checkCudaErrors(cudaEventSynchronize(stop)); float elapsedTime; checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); cudaEventDestroy(start); cudaEventDestroy(stop); printf("Best fitness value: %f\n", blockResults[0].fitness); printf("Winner index: %u\n", blockResults[0].idx); printf("Winner's LEFT-TOP CORNER X: %u\n", blockResults[0].idx % ref.width); printf("Winner's LEFT-TOP CORNER Y: %u\n", ref.height - (blockResults[0].idx / ref.width) - query.height); printf("Computation time: %f ms\n", elapsedTime); if (ref.ptr) cudaFree(ref.ptr); if (query.ptr) cudaFree(query.ptr); if (blockResults) cudaFree(blockResults); }
b2916dda1ff63b2346af0dff90f2c090e3c271fb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Version 20180514-01: Fixed bug in flaton initial conditions. #include <fstream> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <hipfft.h> #include <hipfftXt.h> #include <common/select_GPU.cu> #include <common/complex.cu> #include <1D/parameters.hpp> using namespace std; //////////////////////////// //Initialization Functions// //////////////////////////// __global__ void PreFFTInitPhi(float2 d_PhiC[X], int f) { const uint index = {blockIdx.x * blockDim.x + threadIdx.x}; const uint stride = {blockDim.x * gridDim.x}; hiprandState_t state; hiprand_init(index*(f+1) + 2*X*seed, 0, 0, &state); int kx; double qsq,r,theta; for (int x=index;x<X;x+= stride) { kx = (x+X/2-1)%X - X/2 + 1; qsq = 2*M_PI * 2*M_PI * (kx/(X*dx)) * (kx/(X*dx)); if (thermal_spectrum) { r = (1/sqrt(X*dx)) * ((hiprand(&state)%1000+0.5)/1000) / sqrt( exp( sqrt(qsq+1) ) - 1 ); // crude thermal spectrum } else { r = (1/sqrt(X*dx)) * ((hiprand(&state)%1000+0.5)/1000) / sqrt(qsq+1); // arbitrary non-thermal spectrum } theta = 2*M_PI * (hiprand(&state)%1000+0.5)/1000; // random phase d_PhiC[x].x = r*cos(theta); // Real part d_PhiC[x].y = r*sin(theta); // Imaginary part } } __global__ void PreFFTInitPhiDot(float2 d_PhiDotC[X], int f) { const uint index = {blockIdx.x * blockDim.x + threadIdx.x}; const uint stride = {blockDim.x * gridDim.x}; hiprandState_t state; hiprand_init(index*(f+1) + X + 2*X*seed, 0, 0, &state); int kx; double qsq,r,theta; for (int x=index;x<X;x+= stride) { kx = (x+X/2-1)%X - X/2 + 1; qsq = 2*M_PI * 2*M_PI * (kx/(X*dx)) * (kx/(X*dx)); if (thermal_spectrum) { r = (1/sqrt(X*dx)) * ((hiprand(&state)%1000+0.5)/1000) / sqrt( exp( sqrt(qsq+1) ) - 1 ); // crude thermal spectrum } else { r = (1/sqrt(X*dx)) * ((hiprand(&state)%1000+0.5)/1000) / sqrt(qsq+1); // arbitrary non-thermal spectrum } theta = 2*M_PI * (hiprand(&state)%1000+0.5)/1000; // random phase d_PhiDotC[x].x = r*cos(theta); // Real part d_PhiDotC[x].y = r*sin(theta); // Imaginary part } } __global__ void cpy(float2 d_PhiC[X], float2 d_PhiDotC[X], Complex<double> d_Phi[F][X], Complex<double> d_PhiDot[F][X], int f) { const uint index = {blockIdx.x * blockDim.x + threadIdx.x}; const uint stride = {blockDim.x * gridDim.x}; for (int x=index; x<X; x+=stride) { d_Phi[f][x].x = d_PhiC[x].x; d_Phi[f][x].y = d_PhiC[x].y; d_PhiDot[f][x].x = d_PhiDotC[x].x; d_PhiDot[f][x].y = d_PhiDotC[x].y; } } __global__ void PostFFTInit(Complex<double> d_Phi[F][X], Complex<double> d_PhiDot[F][X]) { const uint index = {blockIdx.x * blockDim.x + threadIdx.x}; const uint stride = {blockDim.x * gridDim.x}; // Initial conditions for (int x=index; x<X; x+=stride) { d_Phi[0][x] = (m0/phi0) * d_Phi[0][x]; d_PhiDot[0][x] = (m0/phi0) * d_PhiDot[0][x]; for (short int f=1; f<5; f++) { d_Phi[f][x] = (m0/l0) * d_Phi[f][x]; d_PhiDot[f][x] = (m0/l0) * d_PhiDot[f][x]; } d_Phi[3][x].x += cos((M_PI-argAnu)/2.0); // real l d_Phi[3][x].y += sin((M_PI-argAnu)/2.0); // imag l d_Phi[1][x].x = sqrt( modsq(d_Phi[2][x]) + modsq(d_Phi[3][x]) - 0.5*modsq(d_Phi[4][x]) + epsilon*epsilon ); // real h_u d_Phi[1][x].y = 0.0; // imag h_u } } void print(Complex<double> Phi[F][X], Complex<double> PhiDot[F][X], float ndt, ofstream &PhiFile, ofstream &PhiDotFile) { Complex<float> tmp[F]; for (uint32_t x=0; x<X; x++) { PhiFile.write( reinterpret_cast<const char*>(&ndt), sizeof(ndt) ); PhiFile.write( reinterpret_cast<const char*>(&x), sizeof(x) ); for (int f=0; f<F; f++) { tmp[f].x = Phi[f][x].x; tmp[f].y = Phi[f][x].y; } PhiFile.write( reinterpret_cast<const char*>(&tmp), sizeof(tmp) ); } for (uint32_t x=0; x<X; x++) { PhiDotFile.write( reinterpret_cast<const char*>(&ndt), sizeof(ndt) ); PhiDotFile.write( reinterpret_cast<const char*>(&x), sizeof(x) ); for (int f=0; f<F; f++) { tmp[f].x = PhiDot[f][x].x; tmp[f].y = PhiDot[f][x].y; } PhiDotFile.write( reinterpret_cast<const char*>(&tmp), sizeof(tmp) ); } } Complex<double> Phi[F][X]; Complex<double> PhiDot[F][X]; int main() { cout << "\n////////////////////////////////////////////////////////////////////////////////\n\n"; SelectDevice(); Complex<double> (*d_Phi)[X]; Complex<double> (*d_PhiDot)[X]; hipMalloc(&d_Phi, (F*X)*sizeof(Complex<double>)); hipMalloc(&d_PhiDot, (F*X)*sizeof(Complex<double>)); float2 (*d_PhiC); float2 (*d_PhiDotC); hipMalloc(&d_PhiC, (X)*sizeof(float2)); hipMalloc(&d_PhiDotC, (X)*sizeof(float2)); dim3 block = {1024}; dim3 grid = {(X+block.x-1)/block.x}; hipError_t __err; cout << "\n=============================================\n"; cout << "|| ||"; cout << "\n|| Initializing ||" << endl; cout << "|| ||"; __err = hipGetLastError(); if (__err != hipSuccess) { cout << "|| ||\n"; cout << "|| Failed to create variables ||\n"; cout << "|| " << setw(40) << left << hipGetErrorString(__err) << "||\n"; cout << "=============================================\n"; return -1; } ////////////////// //Initialization// ////////////////// // FFT initialitation cout << "\n|| Running inverse fast fourier transform ||\n"; cout << "|| ||\n"; hipfftHandle plan; hipfftPlan1d( &plan, X, HIPFFT_C2C, 1); for (short int f=0; f<F; f++) { // Pre FFT initialization hipLaunchKernelGGL(( PreFFTInitPhi), dim3(grid), dim3(block), 0, 0, d_PhiC, f); hipLaunchKernelGGL(( PreFFTInitPhiDot), dim3(grid), dim3(block), 0, 0, d_PhiDotC, f); hipDeviceSynchronize(); hipfftExecC2C( plan, (hipfftComplex *) d_PhiC, (hipfftComplex *) d_PhiC, HIPFFT_BACKWARD ); hipfftExecC2C( plan, (hipfftComplex *) d_PhiDotC, (hipfftComplex *) d_PhiDotC, HIPFFT_BACKWARD ); hipLaunchKernelGGL(( cpy), dim3(grid), dim3(block), 0, 0, d_PhiC, d_PhiDotC, d_Phi, d_PhiDot, f); hipDeviceSynchronize(); } hipfftDestroy(plan); hipFree(d_PhiC); hipFree(d_PhiDotC); // Post FFT initialization hipLaunchKernelGGL(( PostFFTInit), dim3(grid), dim3(block), 0, 0, d_Phi, d_PhiDot); hipDeviceSynchronize(); hipMemcpy(Phi, d_Phi, (F*X)*sizeof(Complex<double>), hipMemcpyDeviceToHost); hipMemcpy(PhiDot, d_PhiDot, (F*X)*sizeof(Complex<double>), hipMemcpyDeviceToHost); // Printing data ofstream PhiFile("initialPhi.bin", ios::binary); ofstream PhiDotFile("initialPhiDot.bin", ios::binary); print(Phi, PhiDot, 0, PhiFile, PhiDotFile); PhiFile.close(); PhiDotFile.close(); hipFree(d_Phi); hipFree(d_PhiDot); __err = hipGetLastError(); if (__err != hipSuccess) { cout << "|| ||\n"; cout << "|| Failed to create variables ||\n"; cout << "|| " << setw(40) << left << hipGetErrorString(__err) << "||\n"; cout << "=============================================\n"; return -1; } cout << "|| Completed Initializing ||\n"; cout << "=============================================\n"; cout << "\n\n////////////////////////////////////////////////////////////////////////////////\n" << endl; return 0; }
b2916dda1ff63b2346af0dff90f2c090e3c271fb.cu
// Version 20180514-01: Fixed bug in flaton initial conditions. #include <fstream> #include <curand.h> #include <curand_kernel.h> #include <cufft.h> #include <cufftXt.h> #include <common/select_GPU.cu> #include <common/complex.cu> #include <1D/parameters.hpp> using namespace std; //////////////////////////// //Initialization Functions// //////////////////////////// __global__ void PreFFTInitPhi(float2 d_PhiC[X], int f) { const uint index = {blockIdx.x * blockDim.x + threadIdx.x}; const uint stride = {blockDim.x * gridDim.x}; curandState_t state; curand_init(index*(f+1) + 2*X*seed, 0, 0, &state); int kx; double qsq,r,theta; for (int x=index;x<X;x+= stride) { kx = (x+X/2-1)%X - X/2 + 1; qsq = 2*M_PI * 2*M_PI * (kx/(X*dx)) * (kx/(X*dx)); if (thermal_spectrum) { r = (1/sqrt(X*dx)) * ((curand(&state)%1000+0.5)/1000) / sqrt( exp( sqrt(qsq+1) ) - 1 ); // crude thermal spectrum } else { r = (1/sqrt(X*dx)) * ((curand(&state)%1000+0.5)/1000) / sqrt(qsq+1); // arbitrary non-thermal spectrum } theta = 2*M_PI * (curand(&state)%1000+0.5)/1000; // random phase d_PhiC[x].x = r*cos(theta); // Real part d_PhiC[x].y = r*sin(theta); // Imaginary part } } __global__ void PreFFTInitPhiDot(float2 d_PhiDotC[X], int f) { const uint index = {blockIdx.x * blockDim.x + threadIdx.x}; const uint stride = {blockDim.x * gridDim.x}; curandState_t state; curand_init(index*(f+1) + X + 2*X*seed, 0, 0, &state); int kx; double qsq,r,theta; for (int x=index;x<X;x+= stride) { kx = (x+X/2-1)%X - X/2 + 1; qsq = 2*M_PI * 2*M_PI * (kx/(X*dx)) * (kx/(X*dx)); if (thermal_spectrum) { r = (1/sqrt(X*dx)) * ((curand(&state)%1000+0.5)/1000) / sqrt( exp( sqrt(qsq+1) ) - 1 ); // crude thermal spectrum } else { r = (1/sqrt(X*dx)) * ((curand(&state)%1000+0.5)/1000) / sqrt(qsq+1); // arbitrary non-thermal spectrum } theta = 2*M_PI * (curand(&state)%1000+0.5)/1000; // random phase d_PhiDotC[x].x = r*cos(theta); // Real part d_PhiDotC[x].y = r*sin(theta); // Imaginary part } } __global__ void cpy(float2 d_PhiC[X], float2 d_PhiDotC[X], Complex<double> d_Phi[F][X], Complex<double> d_PhiDot[F][X], int f) { const uint index = {blockIdx.x * blockDim.x + threadIdx.x}; const uint stride = {blockDim.x * gridDim.x}; for (int x=index; x<X; x+=stride) { d_Phi[f][x].x = d_PhiC[x].x; d_Phi[f][x].y = d_PhiC[x].y; d_PhiDot[f][x].x = d_PhiDotC[x].x; d_PhiDot[f][x].y = d_PhiDotC[x].y; } } __global__ void PostFFTInit(Complex<double> d_Phi[F][X], Complex<double> d_PhiDot[F][X]) { const uint index = {blockIdx.x * blockDim.x + threadIdx.x}; const uint stride = {blockDim.x * gridDim.x}; // Initial conditions for (int x=index; x<X; x+=stride) { d_Phi[0][x] = (m0/phi0) * d_Phi[0][x]; d_PhiDot[0][x] = (m0/phi0) * d_PhiDot[0][x]; for (short int f=1; f<5; f++) { d_Phi[f][x] = (m0/l0) * d_Phi[f][x]; d_PhiDot[f][x] = (m0/l0) * d_PhiDot[f][x]; } d_Phi[3][x].x += cos((M_PI-argAnu)/2.0); // real l d_Phi[3][x].y += sin((M_PI-argAnu)/2.0); // imag l d_Phi[1][x].x = sqrt( modsq(d_Phi[2][x]) + modsq(d_Phi[3][x]) - 0.5*modsq(d_Phi[4][x]) + epsilon*epsilon ); // real h_u d_Phi[1][x].y = 0.0; // imag h_u } } void print(Complex<double> Phi[F][X], Complex<double> PhiDot[F][X], float ndt, ofstream &PhiFile, ofstream &PhiDotFile) { Complex<float> tmp[F]; for (uint32_t x=0; x<X; x++) { PhiFile.write( reinterpret_cast<const char*>(&ndt), sizeof(ndt) ); PhiFile.write( reinterpret_cast<const char*>(&x), sizeof(x) ); for (int f=0; f<F; f++) { tmp[f].x = Phi[f][x].x; tmp[f].y = Phi[f][x].y; } PhiFile.write( reinterpret_cast<const char*>(&tmp), sizeof(tmp) ); } for (uint32_t x=0; x<X; x++) { PhiDotFile.write( reinterpret_cast<const char*>(&ndt), sizeof(ndt) ); PhiDotFile.write( reinterpret_cast<const char*>(&x), sizeof(x) ); for (int f=0; f<F; f++) { tmp[f].x = PhiDot[f][x].x; tmp[f].y = PhiDot[f][x].y; } PhiDotFile.write( reinterpret_cast<const char*>(&tmp), sizeof(tmp) ); } } Complex<double> Phi[F][X]; Complex<double> PhiDot[F][X]; int main() { cout << "\n////////////////////////////////////////////////////////////////////////////////\n\n"; SelectDevice(); Complex<double> (*d_Phi)[X]; Complex<double> (*d_PhiDot)[X]; cudaMalloc(&d_Phi, (F*X)*sizeof(Complex<double>)); cudaMalloc(&d_PhiDot, (F*X)*sizeof(Complex<double>)); float2 (*d_PhiC); float2 (*d_PhiDotC); cudaMalloc(&d_PhiC, (X)*sizeof(float2)); cudaMalloc(&d_PhiDotC, (X)*sizeof(float2)); dim3 block = {1024}; dim3 grid = {(X+block.x-1)/block.x}; cudaError_t __err; cout << "\n=============================================\n"; cout << "|| ||"; cout << "\n|| Initializing ||" << endl; cout << "|| ||"; __err = cudaGetLastError(); if (__err != cudaSuccess) { cout << "|| ||\n"; cout << "|| Failed to create variables ||\n"; cout << "|| " << setw(40) << left << cudaGetErrorString(__err) << "||\n"; cout << "=============================================\n"; return -1; } ////////////////// //Initialization// ////////////////// // FFT initialitation cout << "\n|| Running inverse fast fourier transform ||\n"; cout << "|| ||\n"; cufftHandle plan; cufftPlan1d( &plan, X, CUFFT_C2C, 1); for (short int f=0; f<F; f++) { // Pre FFT initialization PreFFTInitPhi<<<grid, block>>>(d_PhiC, f); PreFFTInitPhiDot<<<grid, block>>>(d_PhiDotC, f); cudaDeviceSynchronize(); cufftExecC2C( plan, (cufftComplex *) d_PhiC, (cufftComplex *) d_PhiC, CUFFT_INVERSE ); cufftExecC2C( plan, (cufftComplex *) d_PhiDotC, (cufftComplex *) d_PhiDotC, CUFFT_INVERSE ); cpy<<<grid, block>>>(d_PhiC, d_PhiDotC, d_Phi, d_PhiDot, f); cudaDeviceSynchronize(); } cufftDestroy(plan); cudaFree(d_PhiC); cudaFree(d_PhiDotC); // Post FFT initialization PostFFTInit<<<grid, block>>>( d_Phi, d_PhiDot); cudaDeviceSynchronize(); cudaMemcpy(Phi, d_Phi, (F*X)*sizeof(Complex<double>), cudaMemcpyDeviceToHost); cudaMemcpy(PhiDot, d_PhiDot, (F*X)*sizeof(Complex<double>), cudaMemcpyDeviceToHost); // Printing data ofstream PhiFile("initialPhi.bin", ios::binary); ofstream PhiDotFile("initialPhiDot.bin", ios::binary); print(Phi, PhiDot, 0, PhiFile, PhiDotFile); PhiFile.close(); PhiDotFile.close(); cudaFree(d_Phi); cudaFree(d_PhiDot); __err = cudaGetLastError(); if (__err != cudaSuccess) { cout << "|| ||\n"; cout << "|| Failed to create variables ||\n"; cout << "|| " << setw(40) << left << cudaGetErrorString(__err) << "||\n"; cout << "=============================================\n"; return -1; } cout << "|| Completed Initializing ||\n"; cout << "=============================================\n"; cout << "\n\n////////////////////////////////////////////////////////////////////////////////\n" << endl; return 0; }
a314bedbe5305a00853bd0abb77d3c9834f97517.hip
// !!! This is a file automatically generated by hipify!!! #include "match.h" #include "nan.h" #include <algorithm> #include <limits> #include <iostream> const int Match::OCCLUDED = std::numeric_limits<int>::max(); Match::Match(cv::Mat left, cv::Mat right, bool color) { originalHeightL = imGetYSize(left); int height = ::min(imGetYSize(left), imGetYSize(right)); imSizeL = Coord(imGetXSize(left), height); imSizeR = Coord(imGetXSize(right), height); imColorLeft = left; imColorRight = right; imColorLeftMin = imColorLeftMax = 0;//? imColorRightMin = imColorRightMax = 0; //? dispMin = dispMax = 0; d_left = cv::Mat(imSizeL.y, imSizeL.x, CV_32SC1); vars0 = cv::Mat(imSizeL.y, imSizeL.x, CV_32SC1); varsA = cv::Mat(imSizeL.y, imSizeL.x, CV_32SC1); if (d_left.cols == 0 || vars0.cols == 0 || varsA.cols == 0) { std::cerr << "Not enough memory!" << std::endl; exit(1); } } void Match::InitMatch(cv::Mat& left, cv::Mat& right) { originalHeightL = imGetYSize(left); int height = ::min(imGetYSize(left), imGetYSize(right)); imSizeL = Coord(imGetXSize(left), height); imSizeR = Coord(imGetXSize(right), height); imColorLeft = left; imColorRight = right; imColorLeftMin = imColorLeftMax = 0;//? imColorRightMin = imColorRightMax = 0; //? dispMin = dispMax = 0; d_left = cv::Mat(imSizeL.y, imSizeL.x, CV_32SC1); vars0 = cv::Mat(imSizeL.y, imSizeL.x, CV_32SC1); varsA = cv::Mat(imSizeL.y, imSizeL.x, CV_32SC1); if (d_left.cols == 0 || vars0.cols == 0 || varsA.cols == 0) { std::cerr << "Not enough memory!" << std::endl; exit(1); } } Match::~Match() { // imfree imColorLeftMin.release(); imColorLeftMax.release(); imColorRightMin.release(); imColorRightMax.release(); d_left.release(); vars0.release(); varsA.release(); } void Match::SetMehod(int m) { assert(m == 0 || m == 1 || m == 2); switch (m) { case 0: method = SAD; break; case 1: method = NCC; break; case 2: method = GRAPH; break; default: break; } } void Match::SaveXLeft(const char* filename) { Coord outSize(imSizeL.x, originalHeightL); cv::Mat out = cv::Mat(outSize.y, outSize.x, CV_8UC1); for (int w = 0; w < imSizeL.x; w++) { for (int h = 0; h < imSizeL.y; h++) { int d = d_left.at<int>(h, w); out.at<uchar>(h, w) = ((d == OCCLUDED) ? 0 : static_cast<uchar>(std::abs(d))); } } cv::imwrite(filename, out); out.release(); } cv::Mat Match::GetResultDisparity() { Coord outSize(imSizeL.x, originalHeightL); //this->out = cv::Mat(outSize.y, outSize.x, CV_8UC1); if (this->method == GRAPH) { for (int w = 0; w < imSizeL.x; w++) { for (int h = 0; h < imSizeL.y; h++) { int d = d_left.at<int>(h, w); out.at<uchar>(h, w) = ((d == OCCLUDED) ? 0 : static_cast<uchar>(std::abs(d))); } } } else { } return out; } void Match::SaveScaledXLeft(const char*filename, bool flag) { } /// disparitydisparity map d_left void Match::SetDispRange(int dMin, int dMax) { dispMin = dMin; dispMax = dMax; if (!(dispMin <= dispMax)) { std::cerr << "Error: wrong disparity range!\n" << std::endl; exit(1); } for (int w = 0; w < imSizeL.x; w++) for (int h = 0; h < imSizeL.y; h++) d_left.at<int>(h, w) = OCCLUDED; } cv::Mat Match::PerformMatchAllMethods(QProgressBar* progressBar) { switch (this->method) { case SAD: RunSAD(); break; case NCC: RunNCC(); break; case GRAPH: KZ2(progressBar); break; default: KZ2(progressBar); break; } progressBar->setValue(100); return GetResultDisparity(); } void Match::RunLocalCUDA(bool useSAD) { int height = imColorLeft.rows, width = imColorLeft.cols; // host memory cv::Mat left, right; cv::cvtColor(imColorLeft, left, cv::COLOR_BGR2GRAY); cv::cvtColor(imColorRight, right, cv::COLOR_BGR2GRAY); // device memory uchar *d_left, *d_right, *d_out; hipMalloc((void**)&d_left, width*height); hipMalloc((void**)&d_right, width*height); hipMalloc((void**)&d_out, width*height); hipMemcpy(d_left, left.data, width*height, hipMemcpyHostToDevice); hipMemcpy(d_right, right.data, width*height, hipMemcpyHostToDevice); hipMemset(d_out, 0, width*height * sizeof(uchar)); // launch kernel dim3 block_size, grid_size; block_size = dim3(32, 32, 1); grid_size = dim3((width + block_size.x - 1) / block_size.x, (height + block_size.y - 1) / block_size.y, 1); hipEvent_t start_cuda, finish_cuda; hipEventCreate(&start_cuda, 0); hipEventCreate(&finish_cuda, 0); hipEventRecord(start_cuda, 0); int win_size = 6; if (useSAD) SADMatch << <grid_size, block_size >> > (d_left, d_right, d_out, 64, win_size, width, height); else NCCMatch << <grid_size, block_size >> > (d_left, d_right, d_out, 64, win_size, width, height); hipEventRecord(finish_cuda, 0); hipEventSynchronize(finish_cuda); hipEventElapsedTime(&time_ms, start_cuda, finish_cuda); // copy result back this->out.release(); this->out = cv::Mat(height, width, CV_8UC1); hipMemcpy((this->out).data, d_out, width*height * sizeof(uchar), hipMemcpyDeviceToHost); hipFree(d_left); hipFree(d_right); hipFree(d_out); hipEventDestroy(start_cuda); hipEventDestroy(finish_cuda); left.release(); right.release(); } void Match::RunSAD() { RunLocalCUDA(true); } void Match::RunNCC() { RunLocalCUDA(false); }
a314bedbe5305a00853bd0abb77d3c9834f97517.cu
#include "match.h" #include "nan.h" #include <algorithm> #include <limits> #include <iostream> const int Match::OCCLUDED = std::numeric_limits<int>::max(); Match::Match(cv::Mat left, cv::Mat right, bool color) { originalHeightL = imGetYSize(left); int height = std::min(imGetYSize(left), imGetYSize(right)); imSizeL = Coord(imGetXSize(left), height); imSizeR = Coord(imGetXSize(right), height); imColorLeft = left; imColorRight = right; imColorLeftMin = imColorLeftMax = 0;//? imColorRightMin = imColorRightMax = 0; //? dispMin = dispMax = 0; d_left = cv::Mat(imSizeL.y, imSizeL.x, CV_32SC1); vars0 = cv::Mat(imSizeL.y, imSizeL.x, CV_32SC1); varsA = cv::Mat(imSizeL.y, imSizeL.x, CV_32SC1); if (d_left.cols == 0 || vars0.cols == 0 || varsA.cols == 0) { std::cerr << "Not enough memory!" << std::endl; exit(1); } } void Match::InitMatch(cv::Mat& left, cv::Mat& right) { originalHeightL = imGetYSize(left); int height = std::min(imGetYSize(left), imGetYSize(right)); imSizeL = Coord(imGetXSize(left), height); imSizeR = Coord(imGetXSize(right), height); imColorLeft = left; imColorRight = right; imColorLeftMin = imColorLeftMax = 0;//? imColorRightMin = imColorRightMax = 0; //? dispMin = dispMax = 0; d_left = cv::Mat(imSizeL.y, imSizeL.x, CV_32SC1); vars0 = cv::Mat(imSizeL.y, imSizeL.x, CV_32SC1); varsA = cv::Mat(imSizeL.y, imSizeL.x, CV_32SC1); if (d_left.cols == 0 || vars0.cols == 0 || varsA.cols == 0) { std::cerr << "Not enough memory!" << std::endl; exit(1); } } Match::~Match() { // imfree imColorLeftMin.release(); imColorLeftMax.release(); imColorRightMin.release(); imColorRightMax.release(); d_left.release(); vars0.release(); varsA.release(); } void Match::SetMehod(int m) { assert(m == 0 || m == 1 || m == 2); switch (m) { case 0: method = SAD; break; case 1: method = NCC; break; case 2: method = GRAPH; break; default: break; } } void Match::SaveXLeft(const char* filename) { Coord outSize(imSizeL.x, originalHeightL); cv::Mat out = cv::Mat(outSize.y, outSize.x, CV_8UC1); for (int w = 0; w < imSizeL.x; w++) { for (int h = 0; h < imSizeL.y; h++) { int d = d_left.at<int>(h, w); out.at<uchar>(h, w) = ((d == OCCLUDED) ? 0 : static_cast<uchar>(std::abs(d))); } } cv::imwrite(filename, out); out.release(); } cv::Mat Match::GetResultDisparity() { Coord outSize(imSizeL.x, originalHeightL); //this->out = cv::Mat(outSize.y, outSize.x, CV_8UC1); if (this->method == GRAPH) { for (int w = 0; w < imSizeL.x; w++) { for (int h = 0; h < imSizeL.y; h++) { int d = d_left.at<int>(h, w); out.at<uchar>(h, w) = ((d == OCCLUDED) ? 0 : static_cast<uchar>(std::abs(d))); } } } else { } return out; } void Match::SaveScaledXLeft(const char*filename, bool flag) { } /// 设置disparity的搜索范围,初始时disparity map d_left设为最大值 void Match::SetDispRange(int dMin, int dMax) { dispMin = dMin; dispMax = dMax; if (!(dispMin <= dispMax)) { std::cerr << "Error: wrong disparity range!\n" << std::endl; exit(1); } for (int w = 0; w < imSizeL.x; w++) for (int h = 0; h < imSizeL.y; h++) d_left.at<int>(h, w) = OCCLUDED; } cv::Mat Match::PerformMatchAllMethods(QProgressBar* progressBar) { switch (this->method) { case SAD: RunSAD(); break; case NCC: RunNCC(); break; case GRAPH: KZ2(progressBar); break; default: KZ2(progressBar); break; } progressBar->setValue(100); return GetResultDisparity(); } void Match::RunLocalCUDA(bool useSAD) { int height = imColorLeft.rows, width = imColorLeft.cols; // host memory cv::Mat left, right; cv::cvtColor(imColorLeft, left, cv::COLOR_BGR2GRAY); cv::cvtColor(imColorRight, right, cv::COLOR_BGR2GRAY); // device memory uchar *d_left, *d_right, *d_out; cudaMalloc((void**)&d_left, width*height); cudaMalloc((void**)&d_right, width*height); cudaMalloc((void**)&d_out, width*height); cudaMemcpy(d_left, left.data, width*height, cudaMemcpyHostToDevice); cudaMemcpy(d_right, right.data, width*height, cudaMemcpyHostToDevice); cudaMemset(d_out, 0, width*height * sizeof(uchar)); // launch kernel dim3 block_size, grid_size; block_size = dim3(32, 32, 1); grid_size = dim3((width + block_size.x - 1) / block_size.x, (height + block_size.y - 1) / block_size.y, 1); cudaEvent_t start_cuda, finish_cuda; cudaEventCreate(&start_cuda, 0); cudaEventCreate(&finish_cuda, 0); cudaEventRecord(start_cuda, 0); int win_size = 6; if (useSAD) SADMatch << <grid_size, block_size >> > (d_left, d_right, d_out, 64, win_size, width, height); else NCCMatch << <grid_size, block_size >> > (d_left, d_right, d_out, 64, win_size, width, height); cudaEventRecord(finish_cuda, 0); cudaEventSynchronize(finish_cuda); cudaEventElapsedTime(&time_ms, start_cuda, finish_cuda); // copy result back this->out.release(); this->out = cv::Mat(height, width, CV_8UC1); cudaMemcpy((this->out).data, d_out, width*height * sizeof(uchar), cudaMemcpyDeviceToHost); cudaFree(d_left); cudaFree(d_right); cudaFree(d_out); cudaEventDestroy(start_cuda); cudaEventDestroy(finish_cuda); left.release(); right.release(); } void Match::RunSAD() { RunLocalCUDA(true); } void Match::RunNCC() { RunLocalCUDA(false); }
b8f65c50cf03dd33288da6992c7ef3fc555d16b6.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/types.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> #include <stdint-gcc.h> #include "stdio.h" #define SIGN(val) ((0 < val) - (val < 0)) __global__ void __varfloat_cuda__(const float* in, const int N, float* out, int fraction_bits, float max_sat, float min_sat, bool round) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { auto value = in[i]; auto value_hex = *reinterpret_cast<const int32_t *>(&value); // Round to nearest even if (round) { int mant_lsb = 0x1u << (23 - fraction_bits); int trunc_msb = 0x1u << (23 - 1 - fraction_bits); int trunc_rest = trunc_msb - 1; if ((value_hex & trunc_msb) and (value_hex & (mant_lsb | trunc_rest))){ value_hex += mant_lsb; } } // Adjust fraction bits int mask = ((1u << (9 + fraction_bits)) - 1) << ((23 - fraction_bits)); value_hex &= mask; out[i] = *reinterpret_cast<float *>(&(value_hex)); // Verify exponent with-in allowed range auto vabs = fabs(out[i]); if (vabs > max_sat) out[i] = SIGN(value) * max_sat; if (vabs < min_sat) if (vabs > min_sat / 2) out[i] = min_sat; else out[i] = 0.; } } #define block_count 32 #define thread_per_block 1024 // Wrapper for ATen torch::Tensor fromFloat(torch::Tensor in, int exp_bits, int fraction_bits, bool round) { if (in.device().type() != torch::kCUDA) throw std::runtime_error("Invalid device. Only CUDA supported."); if (in.type().scalarType() != torch::kFloat32) throw std::runtime_error("Invalid dtype. Only FloatTensor supported."); int N = in.numel(); auto out = at::empty_like(in); int one_hex = 0x3f800000 | (((0x1u << fraction_bits) - 1) << (23 - fraction_bits)); auto one_truncated = *reinterpret_cast<float *>(&(one_hex)); int max_exp = ((1u << (exp_bits - 1)) + 127) << 23; auto max_sat = *reinterpret_cast<float *>(&(max_exp)) * one_truncated; auto min_exp = (-((1u << (exp_bits - 1)) - 1) + 127) << 23; auto min_sat = *reinterpret_cast<float *>(&(min_exp)); hipLaunchKernelGGL(( __varfloat_cuda__), dim3(block_count), dim3(thread_per_block), 0, 0, in.data<float>(), N, out.data<float>(), fraction_bits, max_sat, min_sat, round); return out; } torch::Tensor satBounds(int exp_bits, int fraction_bits) { auto out = torch::zeros(2); int one_hex = 0x3f800000 | (((0x1u << fraction_bits) - 1) << (23 - fraction_bits)); auto one_truncated = *reinterpret_cast<float *>(&(one_hex)); int max_exp = ((1u << (exp_bits - 1)) + 127) << 23; auto max_sat = *reinterpret_cast<float *>(&(max_exp)) * one_truncated; auto min_exp = (-((1u << (exp_bits - 1)) - 1) + 127) << 23; auto min_sat = *reinterpret_cast<float *>(&(min_exp)); out[0] = min_sat; out[1] = max_sat; return out; }
b8f65c50cf03dd33288da6992c7ef3fc555d16b6.cu
#include <torch/types.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> #include <stdint-gcc.h> #include "stdio.h" #define SIGN(val) ((0 < val) - (val < 0)) __global__ void __varfloat_cuda__(const float* in, const int N, float* out, int fraction_bits, float max_sat, float min_sat, bool round) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { auto value = in[i]; auto value_hex = *reinterpret_cast<const int32_t *>(&value); // Round to nearest even if (round) { int mant_lsb = 0x1u << (23 - fraction_bits); int trunc_msb = 0x1u << (23 - 1 - fraction_bits); int trunc_rest = trunc_msb - 1; if ((value_hex & trunc_msb) and (value_hex & (mant_lsb | trunc_rest))){ value_hex += mant_lsb; } } // Adjust fraction bits int mask = ((1u << (9 + fraction_bits)) - 1) << ((23 - fraction_bits)); value_hex &= mask; out[i] = *reinterpret_cast<float *>(&(value_hex)); // Verify exponent with-in allowed range auto vabs = fabs(out[i]); if (vabs > max_sat) out[i] = SIGN(value) * max_sat; if (vabs < min_sat) if (vabs > min_sat / 2) out[i] = min_sat; else out[i] = 0.; } } #define block_count 32 #define thread_per_block 1024 // Wrapper for ATen torch::Tensor fromFloat(torch::Tensor in, int exp_bits, int fraction_bits, bool round) { if (in.device().type() != torch::kCUDA) throw std::runtime_error("Invalid device. Only CUDA supported."); if (in.type().scalarType() != torch::kFloat32) throw std::runtime_error("Invalid dtype. Only FloatTensor supported."); int N = in.numel(); auto out = at::empty_like(in); int one_hex = 0x3f800000 | (((0x1u << fraction_bits) - 1) << (23 - fraction_bits)); auto one_truncated = *reinterpret_cast<float *>(&(one_hex)); int max_exp = ((1u << (exp_bits - 1)) + 127) << 23; auto max_sat = *reinterpret_cast<float *>(&(max_exp)) * one_truncated; auto min_exp = (-((1u << (exp_bits - 1)) - 1) + 127) << 23; auto min_sat = *reinterpret_cast<float *>(&(min_exp)); __varfloat_cuda__<<<block_count, thread_per_block>>>(in.data<float>(), N, out.data<float>(), fraction_bits, max_sat, min_sat, round); return out; } torch::Tensor satBounds(int exp_bits, int fraction_bits) { auto out = torch::zeros(2); int one_hex = 0x3f800000 | (((0x1u << fraction_bits) - 1) << (23 - fraction_bits)); auto one_truncated = *reinterpret_cast<float *>(&(one_hex)); int max_exp = ((1u << (exp_bits - 1)) + 127) << 23; auto max_sat = *reinterpret_cast<float *>(&(max_exp)) * one_truncated; auto min_exp = (-((1u << (exp_bits - 1)) - 1) + 127) << 23; auto min_sat = *reinterpret_cast<float *>(&(min_exp)); out[0] = min_sat; out[1] = max_sat; return out; }
5c90b92337740e4630d6138542595aa77e595bfb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // Copyright (c) 2018-2023 www.open3d.org // SPDX-License-Identifier: MIT // ---------------------------------------------------------------------------- // // Reference: // https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/ops/iou3d_nms/src/iou3d_nms_kernel.cu // // Reference: // https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/ops/iou3d/src/iou3d_kernel.cu // 3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) // Written by Shaoshuai Shi // All Rights Reserved 2019-2020. #include <thrust/device_vector.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include "open3d/ml/Helper.h" #include "open3d/ml/contrib/IoUImpl.h" #include "open3d/ml/contrib/Nms.h" #include "open3d/utility/Helper.h" namespace open3d { namespace ml { namespace contrib { template <typename T> static void SortIndices(T *values, int64_t *sort_indices, int64_t n, bool descending = false) { // Cast to thrust device pointer. thrust::device_ptr<T> values_dptr = thrust::device_pointer_cast(values); thrust::device_ptr<int64_t> sort_indices_dptr = thrust::device_pointer_cast(sort_indices); // Fill sort_indices with 0, 1, ..., n-1. thrust::sequence(sort_indices_dptr, sort_indices_dptr + n, 0); // Sort values and sort_indices together. if (descending) { thrust::stable_sort_by_key(values_dptr, values_dptr + n, sort_indices_dptr, thrust::greater<T>()); } else { thrust::stable_sort_by_key(values_dptr, values_dptr + n, sort_indices_dptr); } } __global__ void NmsKernel(const float *boxes, const int64_t *sort_indices, uint64_t *mask, const int n, const double nms_overlap_thresh, const int num_block_cols) { // Row-wise block index. const int block_row_idx = blockIdx.y; // Column-wise block index. const int block_col_idx = blockIdx.x; // Local block row size. const int row_size = fminf(n - block_row_idx * NMS_BLOCK_SIZE, NMS_BLOCK_SIZE); // Local block col size. const int col_size = fminf(n - block_col_idx * NMS_BLOCK_SIZE, NMS_BLOCK_SIZE); // Fill local block_boxes by fetching the global box memory. // block_boxes = boxes[NBS*block_col_idx : NBS*block_col_idx+col_size, :]. // // TODO: It is also possible to load the comparison target to the shared // memory as well. __shared__ float block_boxes[NMS_BLOCK_SIZE * 5]; if (threadIdx.x < col_size) { float *dst = block_boxes + threadIdx.x * 5; const int src_idx = NMS_BLOCK_SIZE * block_col_idx + threadIdx.x; const float *src = boxes + sort_indices[src_idx] * 5; dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; dst[4] = src[4]; } __syncthreads(); // Comparing src and dst. In one block, the following src and dst indices // are compared: // - src: BS * block_row_idx : BS * block_row_idx + row_size // - dst: BS * block_col_idx : BS * block_col_idx + col_size // // With all blocks, all src and dst indices are compared. // // Result: // mask[i, j] is a 64-bit integer where mask[i, j][k] (k counted from right) // is 1 iff box[i] overlaps with box[BS*j+k]. if (threadIdx.x < row_size) { // src_idx indices the global memory. const int src_idx = NMS_BLOCK_SIZE * block_row_idx + threadIdx.x; // dst_idx indices the shared memory. int dst_idx = block_row_idx == block_col_idx ? threadIdx.x + 1 : 0; uint64_t t = 0; while (dst_idx < col_size) { if (IoUBev2DWithMinAndMax(boxes + sort_indices[src_idx] * 5, block_boxes + dst_idx * 5) > nms_overlap_thresh) { t |= 1ULL << dst_idx; } dst_idx++; } mask[src_idx * num_block_cols + block_col_idx] = t; } } std::vector<int64_t> NmsCUDAKernel(const float *boxes, const float *scores, int n, double nms_overlap_thresh) { if (n == 0) { return {}; } // Cololum-wise number of blocks. const int num_block_cols = utility::DivUp(n, NMS_BLOCK_SIZE); // Compute sort indices. float *scores_copy = nullptr; OPEN3D_CUDA_CHECK(hipMalloc((void **)&scores_copy, n * sizeof(float))); OPEN3D_CUDA_CHECK(hipMemcpy(scores_copy, scores, n * sizeof(float), hipMemcpyDeviceToDevice)); int64_t *sort_indices = nullptr; OPEN3D_CUDA_CHECK(hipMalloc((void **)&sort_indices, n * sizeof(int64_t))); SortIndices(scores_copy, sort_indices, n, true); OPEN3D_CUDA_CHECK(hipFree(scores_copy)); // Allocate masks on device. uint64_t *mask_ptr = nullptr; OPEN3D_CUDA_CHECK(hipMalloc((void **)&mask_ptr, n * num_block_cols * sizeof(uint64_t))); // Launch kernel. dim3 blocks(utility::DivUp(n, NMS_BLOCK_SIZE), utility::DivUp(n, NMS_BLOCK_SIZE)); dim3 threads(NMS_BLOCK_SIZE); hipLaunchKernelGGL(( NmsKernel), dim3(blocks), dim3(threads), 0, 0, boxes, sort_indices, mask_ptr, n, nms_overlap_thresh, num_block_cols); // Copy cuda masks to cpu. std::vector<uint64_t> mask_vec(n * num_block_cols); uint64_t *mask = mask_vec.data(); OPEN3D_CUDA_CHECK(hipMemcpy(mask_vec.data(), mask_ptr, n * num_block_cols * sizeof(uint64_t), hipMemcpyDeviceToHost)); OPEN3D_CUDA_CHECK(hipFree(mask_ptr)); // Copy sort_indices to cpu. std::vector<int64_t> sort_indices_cpu(n); OPEN3D_CUDA_CHECK(hipMemcpy(sort_indices_cpu.data(), sort_indices, n * sizeof(int64_t), hipMemcpyDeviceToHost)); // Write to keep_indices in CPU. // remv_cpu has n bits in total. If the bit is 1, the corresponding // box will be removed. // TODO: This part can be implemented in CUDA. We use the original author's // implementation here. std::vector<uint64_t> remv_cpu(num_block_cols, 0); std::vector<int64_t> keep_indices; for (int i = 0; i < n; i++) { int block_col_idx = i / NMS_BLOCK_SIZE; int inner_block_col_idx = i % NMS_BLOCK_SIZE; // threadIdx.x // Querying the i-th bit in remv_cpu, counted from the right. // - remv_cpu[block_col_idx]: the block bitmap containing the query // - 1ULL << inner_block_col_idx: the one-hot bitmap to extract i if (!(remv_cpu[block_col_idx] & (1ULL << inner_block_col_idx))) { // Keep the i-th box. keep_indices.push_back(sort_indices_cpu[i]); // Any box that overlaps with the i-th box will be removed. uint64_t *p = mask + i * num_block_cols; for (int j = block_col_idx; j < num_block_cols; j++) { remv_cpu[j] |= p[j]; } } } OPEN3D_CUDA_CHECK(hipFree(sort_indices)); return keep_indices; } } // namespace contrib } // namespace ml } // namespace open3d
5c90b92337740e4630d6138542595aa77e595bfb.cu
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // Copyright (c) 2018-2023 www.open3d.org // SPDX-License-Identifier: MIT // ---------------------------------------------------------------------------- // // Reference: // https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/ops/iou3d_nms/src/iou3d_nms_kernel.cu // // Reference: // https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/ops/iou3d/src/iou3d_kernel.cu // 3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) // Written by Shaoshuai Shi // All Rights Reserved 2019-2020. #include <thrust/device_vector.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include "open3d/ml/Helper.h" #include "open3d/ml/contrib/IoUImpl.h" #include "open3d/ml/contrib/Nms.h" #include "open3d/utility/Helper.h" namespace open3d { namespace ml { namespace contrib { template <typename T> static void SortIndices(T *values, int64_t *sort_indices, int64_t n, bool descending = false) { // Cast to thrust device pointer. thrust::device_ptr<T> values_dptr = thrust::device_pointer_cast(values); thrust::device_ptr<int64_t> sort_indices_dptr = thrust::device_pointer_cast(sort_indices); // Fill sort_indices with 0, 1, ..., n-1. thrust::sequence(sort_indices_dptr, sort_indices_dptr + n, 0); // Sort values and sort_indices together. if (descending) { thrust::stable_sort_by_key(values_dptr, values_dptr + n, sort_indices_dptr, thrust::greater<T>()); } else { thrust::stable_sort_by_key(values_dptr, values_dptr + n, sort_indices_dptr); } } __global__ void NmsKernel(const float *boxes, const int64_t *sort_indices, uint64_t *mask, const int n, const double nms_overlap_thresh, const int num_block_cols) { // Row-wise block index. const int block_row_idx = blockIdx.y; // Column-wise block index. const int block_col_idx = blockIdx.x; // Local block row size. const int row_size = fminf(n - block_row_idx * NMS_BLOCK_SIZE, NMS_BLOCK_SIZE); // Local block col size. const int col_size = fminf(n - block_col_idx * NMS_BLOCK_SIZE, NMS_BLOCK_SIZE); // Fill local block_boxes by fetching the global box memory. // block_boxes = boxes[NBS*block_col_idx : NBS*block_col_idx+col_size, :]. // // TODO: It is also possible to load the comparison target to the shared // memory as well. __shared__ float block_boxes[NMS_BLOCK_SIZE * 5]; if (threadIdx.x < col_size) { float *dst = block_boxes + threadIdx.x * 5; const int src_idx = NMS_BLOCK_SIZE * block_col_idx + threadIdx.x; const float *src = boxes + sort_indices[src_idx] * 5; dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; dst[4] = src[4]; } __syncthreads(); // Comparing src and dst. In one block, the following src and dst indices // are compared: // - src: BS * block_row_idx : BS * block_row_idx + row_size // - dst: BS * block_col_idx : BS * block_col_idx + col_size // // With all blocks, all src and dst indices are compared. // // Result: // mask[i, j] is a 64-bit integer where mask[i, j][k] (k counted from right) // is 1 iff box[i] overlaps with box[BS*j+k]. if (threadIdx.x < row_size) { // src_idx indices the global memory. const int src_idx = NMS_BLOCK_SIZE * block_row_idx + threadIdx.x; // dst_idx indices the shared memory. int dst_idx = block_row_idx == block_col_idx ? threadIdx.x + 1 : 0; uint64_t t = 0; while (dst_idx < col_size) { if (IoUBev2DWithMinAndMax(boxes + sort_indices[src_idx] * 5, block_boxes + dst_idx * 5) > nms_overlap_thresh) { t |= 1ULL << dst_idx; } dst_idx++; } mask[src_idx * num_block_cols + block_col_idx] = t; } } std::vector<int64_t> NmsCUDAKernel(const float *boxes, const float *scores, int n, double nms_overlap_thresh) { if (n == 0) { return {}; } // Cololum-wise number of blocks. const int num_block_cols = utility::DivUp(n, NMS_BLOCK_SIZE); // Compute sort indices. float *scores_copy = nullptr; OPEN3D_CUDA_CHECK(cudaMalloc((void **)&scores_copy, n * sizeof(float))); OPEN3D_CUDA_CHECK(cudaMemcpy(scores_copy, scores, n * sizeof(float), cudaMemcpyDeviceToDevice)); int64_t *sort_indices = nullptr; OPEN3D_CUDA_CHECK(cudaMalloc((void **)&sort_indices, n * sizeof(int64_t))); SortIndices(scores_copy, sort_indices, n, true); OPEN3D_CUDA_CHECK(cudaFree(scores_copy)); // Allocate masks on device. uint64_t *mask_ptr = nullptr; OPEN3D_CUDA_CHECK(cudaMalloc((void **)&mask_ptr, n * num_block_cols * sizeof(uint64_t))); // Launch kernel. dim3 blocks(utility::DivUp(n, NMS_BLOCK_SIZE), utility::DivUp(n, NMS_BLOCK_SIZE)); dim3 threads(NMS_BLOCK_SIZE); NmsKernel<<<blocks, threads>>>(boxes, sort_indices, mask_ptr, n, nms_overlap_thresh, num_block_cols); // Copy cuda masks to cpu. std::vector<uint64_t> mask_vec(n * num_block_cols); uint64_t *mask = mask_vec.data(); OPEN3D_CUDA_CHECK(cudaMemcpy(mask_vec.data(), mask_ptr, n * num_block_cols * sizeof(uint64_t), cudaMemcpyDeviceToHost)); OPEN3D_CUDA_CHECK(cudaFree(mask_ptr)); // Copy sort_indices to cpu. std::vector<int64_t> sort_indices_cpu(n); OPEN3D_CUDA_CHECK(cudaMemcpy(sort_indices_cpu.data(), sort_indices, n * sizeof(int64_t), cudaMemcpyDeviceToHost)); // Write to keep_indices in CPU. // remv_cpu has n bits in total. If the bit is 1, the corresponding // box will be removed. // TODO: This part can be implemented in CUDA. We use the original author's // implementation here. std::vector<uint64_t> remv_cpu(num_block_cols, 0); std::vector<int64_t> keep_indices; for (int i = 0; i < n; i++) { int block_col_idx = i / NMS_BLOCK_SIZE; int inner_block_col_idx = i % NMS_BLOCK_SIZE; // threadIdx.x // Querying the i-th bit in remv_cpu, counted from the right. // - remv_cpu[block_col_idx]: the block bitmap containing the query // - 1ULL << inner_block_col_idx: the one-hot bitmap to extract i if (!(remv_cpu[block_col_idx] & (1ULL << inner_block_col_idx))) { // Keep the i-th box. keep_indices.push_back(sort_indices_cpu[i]); // Any box that overlaps with the i-th box will be removed. uint64_t *p = mask + i * num_block_cols; for (int j = block_col_idx; j < num_block_cols; j++) { remv_cpu[j] |= p[j]; } } } OPEN3D_CUDA_CHECK(cudaFree(sort_indices)); return keep_indices; } } // namespace contrib } // namespace ml } // namespace open3d
50b82815c6439357d1c62703ec20b4004736e604.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #define SIZE 1024 /* must use .cu otherwise .c and .cpp will send to host compiler and global would have issues */ __global__ void VectorAdd(int *a, int *b, int *c, int n) { int i = threadIdx.x; // no loop for (i = 0; i < n; ++i) if (i < n) c[i] = a[i] + b[i]; } int main(int argc, char *argv[]) { int noOfRun; if (argc > 1) { noOfRun = atoi(argv[1]); printf("\nargv[1] in intger=%d\n\n", noOfRun); } // use SIZE here instead of noofRun int *a, *b, *c; a = (int *)malloc(SIZE * sizeof(int)); b = (int *)malloc(SIZE * sizeof(int)); c = (int *)malloc(SIZE * sizeof(int)); int *d_a, *d_b, *d_c; hipMalloc(&d_a, SIZE * sizeof(int)); hipMalloc(&d_b, SIZE * sizeof(int)); hipMalloc(&d_c, SIZE * sizeof(int)); for (int i = 0; i < SIZE; ++i) { a[i] = i; b[i] = i + 1; c[i] = 0; } hipMemcpy(d_a, a, SIZE * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_b, b, SIZE * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_c, c, SIZE * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( VectorAdd), dim3(1), dim3(SIZE), 0, 0, d_a, d_b, d_c, SIZE); hipMemcpy(a, d_a, SIZE * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(b, d_b, SIZE * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(c, d_c, SIZE * sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < 10; ++i) printf("host = %d: a[%d] + b[%d] = %d + %d = c[%d] = %d\n", i, i, i, a[i], b[i], i, c[i]); /* you cannot directly address the gpu memory !!! for (int i = 0; i < 10; ++i) printf("device = %d: d_a[%d] + d_b[%d] = %d + %d = d_c[%d] = %d\n", i, i, i, d_a[i], d_b[i], i, d_c[i]); */ free(a); free(b); free(c); hipFree(d_a); hipFree(d_b); hipFree(d_c); // hipProfilerStop(); and _syncthreads(); and device level close ???? return 0; } /* #include <stdio.h> #include <stdlib.h> #define SIZE 1024 void VectorAdd(int *a, int *b, int *c, int n) { int i; for (i = 0; i < n; ++i) c[i] = a[i] + b[i]; } int main(int argc, char *argv[]) { int noOfRun; if (argc > 1) { noOfRun = atoi(argv[1]); printf("\nargv[1] in intger=%d\n\n", noOfRun); } // use SIZE here instead of noofRun int *a, *b, *c; a = (int *)malloc(SIZE * sizeof(int)); b = (int *)malloc(SIZE * sizeof(int)); c = (int *)malloc(SIZE * sizeof(int)); for (int i = 0; i < SIZE; ++i) { a[i] = i; b[i] = i + 1; c[i] = 0; } VectorAdd(a, b, c, SIZE); for (int i = 0; i < 10; ++i) printf("%d: a[%d] + b[%d] = %d + %d = c[%d] = %d\n", i, i, i, a[i], b[i], i, c[i]); free(a); free(b); free(c); return 0; } */ /* #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; } */
50b82815c6439357d1c62703ec20b4004736e604.cu
#include <stdio.h> #include <stdlib.h> #define SIZE 1024 /* must use .cu otherwise .c and .cpp will send to host compiler and global would have issues */ __global__ void VectorAdd(int *a, int *b, int *c, int n) { int i = threadIdx.x; // no loop for (i = 0; i < n; ++i) if (i < n) c[i] = a[i] + b[i]; } int main(int argc, char *argv[]) { int noOfRun; if (argc > 1) { noOfRun = atoi(argv[1]); printf("\nargv[1] in intger=%d\n\n", noOfRun); } // use SIZE here instead of noofRun int *a, *b, *c; a = (int *)malloc(SIZE * sizeof(int)); b = (int *)malloc(SIZE * sizeof(int)); c = (int *)malloc(SIZE * sizeof(int)); int *d_a, *d_b, *d_c; cudaMalloc(&d_a, SIZE * sizeof(int)); cudaMalloc(&d_b, SIZE * sizeof(int)); cudaMalloc(&d_c, SIZE * sizeof(int)); for (int i = 0; i < SIZE; ++i) { a[i] = i; b[i] = i + 1; c[i] = 0; } cudaMemcpy(d_a, a, SIZE * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, SIZE * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, SIZE * sizeof(int), cudaMemcpyHostToDevice); VectorAdd<<<1, SIZE>>>(d_a, d_b, d_c, SIZE); cudaMemcpy(a, d_a, SIZE * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(b, d_b, SIZE * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(c, d_c, SIZE * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < 10; ++i) printf("host = %d: a[%d] + b[%d] = %d + %d = c[%d] = %d\n", i, i, i, a[i], b[i], i, c[i]); /* you cannot directly address the gpu memory !!! for (int i = 0; i < 10; ++i) printf("device = %d: d_a[%d] + d_b[%d] = %d + %d = d_c[%d] = %d\n", i, i, i, d_a[i], d_b[i], i, d_c[i]); */ free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // cudaProfilerStop(); and _syncthreads(); and device level close ???? return 0; } /* #include <stdio.h> #include <stdlib.h> #define SIZE 1024 void VectorAdd(int *a, int *b, int *c, int n) { int i; for (i = 0; i < n; ++i) c[i] = a[i] + b[i]; } int main(int argc, char *argv[]) { int noOfRun; if (argc > 1) { noOfRun = atoi(argv[1]); printf("\nargv[1] in intger=%d\n\n", noOfRun); } // use SIZE here instead of noofRun int *a, *b, *c; a = (int *)malloc(SIZE * sizeof(int)); b = (int *)malloc(SIZE * sizeof(int)); c = (int *)malloc(SIZE * sizeof(int)); for (int i = 0; i < SIZE; ++i) { a[i] = i; b[i] = i + 1; c[i] = 0; } VectorAdd(a, b, c, SIZE); for (int i = 0; i < 10; ++i) printf("%d: a[%d] + b[%d] = %d + %d = c[%d] = %d\n", i, i, i, a[i], b[i], i, c[i]); free(a); free(b); free(c); return 0; } */ /* #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; } */
37ea565d6e5f6a4cad19a8ccd875ea84bedc20ef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> __global__ void square(float* d_out, float* d_in){ int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f*f; } int main(int argc, char** argv){ const int ARRAY_SIZE = 64; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); float h_in[ARRAY_SIZE]; for(int i=0;i<ARRAY_SIZE;i++){ h_in[i] = float(i); } float h_out[ARRAY_SIZE]; float* d_in; float* d_out; hipMalloc((void **) &d_in, ARRAY_BYTES); hipMalloc((void **) &d_out, ARRAY_BYTES); hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); hipLaunchKernelGGL(( square), dim3(1),dim3(ARRAY_SIZE), 0, 0, d_out, d_in); hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost); for(int i=0;i<ARRAY_SIZE;i++){ printf("%f\n", h_out[i]); } hipFree(d_in); hipFree(d_out); return 0; }
37ea565d6e5f6a4cad19a8ccd875ea84bedc20ef.cu
#include<stdio.h> __global__ void square(float* d_out, float* d_in){ int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f*f; } int main(int argc, char** argv){ const int ARRAY_SIZE = 64; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); float h_in[ARRAY_SIZE]; for(int i=0;i<ARRAY_SIZE;i++){ h_in[i] = float(i); } float h_out[ARRAY_SIZE]; float* d_in; float* d_out; cudaMalloc((void **) &d_in, ARRAY_BYTES); cudaMalloc((void **) &d_out, ARRAY_BYTES); cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); square<<<1,ARRAY_SIZE>>>(d_out, d_in); cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); for(int i=0;i<ARRAY_SIZE;i++){ printf("%f\n", h_out[i]); } cudaFree(d_in); cudaFree(d_out); return 0; }
a27c8a5146716694cfcfb9f5c1085a2d9eb61a0a.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THH/generic/THHTensorMathPointwise.hip" #else #include <ATen/MemoryOverlap.h> #include <ATen/NamedTensorUtils.h> #if !defined(THC_REAL_IS_BOOL) static void propagate_names_if_named_tensor_enabled(THCTensor* result, THCTensor* src) { at::namedinference::propagate_names(result, src); } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \ struct Tensor_##NAME##_##REAL##_Op { \ __device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \ *out = CFUNC(*in); \ } \ \ __device__ __forceinline__ void operator()(scalar_t* v) const { \ *v = CFUNC(*v); \ } \ }; \ \ void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \ at::assert_no_internal_overlap(self_); \ if (self_ == src) { \ if (!THC_pointwiseApply1<scalar_t>(state, self_, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } else { \ THCTensor_(resizeAs)(state, self_, src); \ \ if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } \ \ THCudaCheck(hipGetLastError()); \ propagate_names_if_named_tensor_enabled(self_, src); \ } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \ IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<scalar_t>::sqrt, Real) #endif #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_ #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC void THCTensor_(crossKernel)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y)); int64_t sx = THCTensor_(stride)(state, x, dimension); int64_t sy = THCTensor_(stride)(state, y, dimension); int64_t so = THCTensor_(stride)(state, self, dimension); THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1); THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1); THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, nself, nx, ny, TensorCrossOp<scalar_t>(sx, sy, so))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCTensor_(free)(state, nx); THCTensor_(free)(state, ny); THCTensor_(free)(state, nself); } namespace { c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl> retainTensorImpl(THCTensor* self) { c10::raw::intrusive_ptr::incref(self); return c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(self); } } void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); at::mul_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2))); } void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCFmodOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCFmodOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } #endif #endif
a27c8a5146716694cfcfb9f5c1085a2d9eb61a0a.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THC/generic/THCTensorMathPointwise.cu" #else #include <ATen/MemoryOverlap.h> #include <ATen/NamedTensorUtils.h> #if !defined(THC_REAL_IS_BOOL) static void propagate_names_if_named_tensor_enabled(THCTensor* result, THCTensor* src) { at::namedinference::propagate_names(result, src); } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \ struct Tensor_##NAME##_##REAL##_Op { \ __device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \ *out = CFUNC(*in); \ } \ \ __device__ __forceinline__ void operator()(scalar_t* v) const { \ *v = CFUNC(*v); \ } \ }; \ \ void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \ at::assert_no_internal_overlap(self_); \ if (self_ == src) { \ if (!THC_pointwiseApply1<scalar_t>(state, self_, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } else { \ THCTensor_(resizeAs)(state, self_, src); \ \ if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } \ \ THCudaCheck(cudaGetLastError()); \ propagate_names_if_named_tensor_enabled(self_, src); \ } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \ IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<scalar_t>::sqrt, Real) #endif #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_ #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC void THCTensor_(crossKernel)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y)); int64_t sx = THCTensor_(stride)(state, x, dimension); int64_t sy = THCTensor_(stride)(state, y, dimension); int64_t so = THCTensor_(stride)(state, self, dimension); THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1); THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1); THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, nself, nx, ny, TensorCrossOp<scalar_t>(sx, sy, so))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCTensor_(free)(state, nx); THCTensor_(free)(state, ny); THCTensor_(free)(state, nself); } namespace { c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl> retainTensorImpl(THCTensor* self) { c10::raw::intrusive_ptr::incref(self); return c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(self); } } void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); at::mul_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2))); } void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCFmodOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCFmodOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } #endif #endif
29e77f19d37b1676832ca59e3182fb762c7c5642.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #if defined (WITH_GRAPHICS) #include <interopManager.hpp> #include <Array.hpp> #include <plot3.hpp> #include <err_cuda.hpp> #include <debug_cuda.hpp> #include <join.hpp> #include <reduce.hpp> #include <reorder.hpp> using af::dim4; namespace cuda { template<typename T> void copy_plot3(const Array<T> &P, fg::Plot3* plot3) { const T *d_P = P.get(); InteropManager& intrpMngr = InteropManager::getInstance(); cudaGraphicsResource *cudaVBOResource = intrpMngr.getBufferResource(plot3); // Map resource. Copy data to VBO. Unmap resource. size_t num_bytes = plot3->size(); T* d_vbo = NULL; hipGraphicsMapResources(1, &cudaVBOResource, 0); hipGraphicsResourceGetMappedPointer((void **)&d_vbo, &num_bytes, cudaVBOResource); hipMemcpyAsync(d_vbo, d_P, num_bytes, hipMemcpyDeviceToDevice, cuda::getStream(cuda::getActiveDeviceId())); hipGraphicsUnmapResources(1, &cudaVBOResource, 0); CheckGL("After cuda resource copy"); POST_LAUNCH_CHECK(); } #define INSTANTIATE(T) \ template void copy_plot3<T>(const Array<T> &P, fg::Plot3* plot3); INSTANTIATE(float) INSTANTIATE(double) INSTANTIATE(int) INSTANTIATE(uint) INSTANTIATE(short) INSTANTIATE(ushort) INSTANTIATE(uchar) } #endif // WITH_GRAPHICS
29e77f19d37b1676832ca59e3182fb762c7c5642.cu
/******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #if defined (WITH_GRAPHICS) #include <interopManager.hpp> #include <Array.hpp> #include <plot3.hpp> #include <err_cuda.hpp> #include <debug_cuda.hpp> #include <join.hpp> #include <reduce.hpp> #include <reorder.hpp> using af::dim4; namespace cuda { template<typename T> void copy_plot3(const Array<T> &P, fg::Plot3* plot3) { const T *d_P = P.get(); InteropManager& intrpMngr = InteropManager::getInstance(); cudaGraphicsResource *cudaVBOResource = intrpMngr.getBufferResource(plot3); // Map resource. Copy data to VBO. Unmap resource. size_t num_bytes = plot3->size(); T* d_vbo = NULL; cudaGraphicsMapResources(1, &cudaVBOResource, 0); cudaGraphicsResourceGetMappedPointer((void **)&d_vbo, &num_bytes, cudaVBOResource); cudaMemcpyAsync(d_vbo, d_P, num_bytes, cudaMemcpyDeviceToDevice, cuda::getStream(cuda::getActiveDeviceId())); cudaGraphicsUnmapResources(1, &cudaVBOResource, 0); CheckGL("After cuda resource copy"); POST_LAUNCH_CHECK(); } #define INSTANTIATE(T) \ template void copy_plot3<T>(const Array<T> &P, fg::Plot3* plot3); INSTANTIATE(float) INSTANTIATE(double) INSTANTIATE(int) INSTANTIATE(uint) INSTANTIATE(short) INSTANTIATE(ushort) INSTANTIATE(uchar) } #endif // WITH_GRAPHICS
72d4ba877a940afad8142db25e462e198c223280.hip
// !!! This is a file automatically generated by hipify!!! /* Inverse PFB following Richard Shaw's original python/LAPACK routine: https://github.com/jrs65/pfb-inverse Beware: This implementation runs ~4x slower than the python version on hamster! @author Katherine Rosenfeld @date 8/2015 To compile: nvcc pfb_inverse.cu -o pfb_inverse.out -lcublas -lcurand -lcufft -llapack */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include <rocblas.h> #include <hiprand/hiprand.h> #include <hipfft.h> #define BENG_CHANNELS_ 16384 #define BENG_SNAPSHOTS 128 #define PI 3.14159265359 extern "C" { void dpbtrf_(char* uplo, int *n, int* kd, double* ab, int* ldab, int* info); } #define CUDA_CALL(x) do { if((x)!=hipSuccess) { \ printf("Error at %s:%d: %s\n",__FILE__,__LINE__,hipGetErrorString(x));\ return EXIT_FAILURE;}} while(0) #define CURAND_CALL(x) do { if((x)!=HIPRAND_STATUS_SUCCESS) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) #define CUFFT_CALL(x) do { if((x)!=HIPFFT_SUCCESS) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) #define CUBLAS_CALL(x) do { if((x)!=HIPBLAS_STATUS_SUCCESS) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) __host__ __device__ float hamming(int n, int m){ return 0.54 - 0.46*cos(2.*PI*n/(m-1.)); } // decimation kernel __global__ void decimate(hipfftComplex *in, hipfftComplex *out, int M, int N){ int tid = blockIdx.x*blockDim.x + threadIdx.x; for (int i=tid; i<N; i+= gridDim.x*blockDim.x){ if (i % M == 0) { out[i / M] = in[i]; } } } // multiple kernel __global__ void multiply(float *a, float b, int N){ int tid = blockIdx.x*blockDim.x + threadIdx.x; for (int i=tid; i<N; i+=gridDim.x*blockDim.x){ a[i] *= b; } } // cross multiply kernel __global__ void cross_multiply(hipfftComplex *S_0x1, hipfftComplex *X0, hipfftComplex *X1, int N){ // returns S_0x1 = X0 * conj(X1) int tid = blockIdx.x*blockDim.x + threadIdx.x; for (int i = tid; i < N; i += blockDim.x*gridDim.x){ S_0x1[i].x = X0[i].x*X1[i].x + X0[i].y*X1[i].y; S_0x1[i].y = X0[i].y*X1[i].x - X0[i].x*X1[i].y; } } // compute mean along column [m x n, row major format] __global__ void col_mean(hipfftComplex *in, int m, int n){ int cid = blockIdx.x*blockDim.x + threadIdx.x; // stride along column id for (int i = cid; i < n; i += gridDim.x*blockDim.x){ float avg_re = 0; float avg_im = 0; for (int j = 0 ; j < m; j++){ avg_re += in[i + j*n].x; avg_im += in[i + j*n].y; } //in[i] = make_cuComplex(avg_re / m, avg_im / m); in[i].x = avg_re/m; in[i].y = avg_im/m; } } // apply window function __global__ void window(float *in, float *out, int N){ int tid = blockIdx.x*blockDim.x + threadIdx.x; for (int i=tid; i<N; i+= gridDim.x*blockDim.x){ out[i] = in[i]*hamming(i,N); } } float corr_FXt(float *d_x0, float *d_x1, int num_samples){ int idx,window_size = 32768*64; hipfftHandle plan,iplan; hipblasHandle_t handle; int batch = num_samples / window_size; hipfftComplex *d_S,*d_X0, *d_X1; dim3 blocks(64,1,1),threads(256,1,1); float *d_s; float s0x0_max, s1x1_max, corr_coeff; printf("%s : batch = %d \n",__FUNCTION__, batch); // allocate device arrays CUDA_CALL( hipMalloc((void **) &d_X0, (window_size/2+1)*batch*sizeof(hipfftComplex)) ); CUDA_CALL( hipMalloc((void **) &d_X1, (window_size/2+1)*batch*sizeof(hipfftComplex)) ); CUDA_CALL( hipMalloc((void **) &d_S, (window_size/2+1)*batch*sizeof(hipfftComplex)) ); CUDA_CALL( hipMalloc((void **) &d_s, window_size*sizeof(float)) ); // create FFT plans and cuBLAS handle CUFFT_CALL( hipfftPlanMany(&plan, 1, &window_size, NULL,1,0,NULL,1,0,HIPFFT_R2C,batch) ); CUFFT_CALL( hipfftPlanMany(&iplan, 1, &window_size, NULL,1,0,NULL,1,0,HIPFFT_C2R,1) ); CUBLAS_CALL( hipblasCreate(&handle) ); // execute R2C FFT CUFFT_CALL( hipfftExecR2C(plan, d_x0, d_X0) ); CUFFT_CALL( hipfftExecR2C(plan, d_x1, d_X1) ); // auto-corr X0, X0 hipLaunchKernelGGL(( cross_multiply), dim3(blocks),dim3(threads), 0, 0, d_S,d_X0,d_X0,batch*(window_size/2+1)); hipLaunchKernelGGL(( col_mean), dim3(blocks),dim3(threads), 0, 0, d_S,batch,window_size/2+1); CUFFT_CALL( hipfftExecC2R(iplan, d_S, d_s) ); CUBLAS_CALL( hipblasIsamax(handle, window_size, d_s, 1, &idx) ); CUDA_CALL( hipMemcpy( &s0x0_max, d_s + (idx-1), 1*sizeof(float), hipMemcpyDeviceToHost) ); // auto-corr X1, X1 hipLaunchKernelGGL(( cross_multiply), dim3(blocks),dim3(threads), 0, 0, d_S,d_X1,d_X1,batch*(window_size/2+1)); hipLaunchKernelGGL(( col_mean), dim3(blocks),dim3(threads), 0, 0, d_S,batch,window_size/2+1); CUFFT_CALL( hipfftExecC2R(iplan, d_S, d_s) ); CUBLAS_CALL( hipblasIsamax(handle, window_size, d_s, 1, &idx) ); CUDA_CALL( hipMemcpy( &s1x1_max, d_s + (idx-1), 1*sizeof(float), hipMemcpyDeviceToHost) ); // cross-corr X0, X1 hipLaunchKernelGGL(( cross_multiply), dim3(blocks),dim3(threads), 0, 0, d_S,d_X0,d_X1,batch*(window_size/2+1)); hipLaunchKernelGGL(( col_mean), dim3(blocks),dim3(threads), 0, 0, d_S,batch,window_size/2+1); CUFFT_CALL( hipfftExecC2R(iplan, d_S, d_s) ); CUBLAS_CALL( hipblasIsamax(handle, window_size, d_s, 1, &idx) ); CUDA_CALL( hipMemcpy( &corr_coeff, d_s + (idx-1), 1*sizeof(float), hipMemcpyDeviceToHost) ); printf("corr coeff: %0.4f %d \n",corr_coeff/sqrt(s1x1_max*s0x0_max), idx); // clean up CUFFT_CALL( hipfftDestroy(plan) ); CUFFT_CALL( hipfftDestroy(iplan) ); CUDA_CALL( hipFree(d_X0) ); CUDA_CALL( hipFree(d_X1) ); CUDA_CALL( hipFree(d_S) ); CUDA_CALL( hipFree(d_s) ); CUBLAS_CALL( hipblasDestroy(handle) ); return corr_coeff/sqrt(s1x1_max*s0x0_max); } int PPT(int nblock, int lblock, int ntap, float *d_uPPT, float *d_band_P){ // http://www.physics.orst.edu/~rubin/nacphy/lapack/routines/spbtrf.html double *ab, *coeff_P, *coeff_PPT; float *uPPT, *band_P; char uplo = 'U'; // store upper triangle int n = nblock, kd = ntap-1, ldab = ntap, info; int ntsblock = nblock + ntap - 1; // allocate memory coeff_P = (double *) malloc(ntap*lblock*sizeof(double)); coeff_PPT = (double *) malloc(lblock*ntap*sizeof(double)); ab = (double*) malloc(ntap*nblock*sizeof(double)); uPPT = (float*) malloc(lblock*ntap*nblock*sizeof(float)); band_P = (float*) malloc(lblock*ntap*ntsblock*sizeof(float)); // generate window function for (int i=0; i<ntap*lblock; i++){ coeff_P[i] = hamming(i,ntap*lblock); } for (int i=0; i<lblock*ntap; i++) coeff_PPT[i] = 0.; // initialize array for (int k=0; k < ntap; k++){ for (int j=0; j < lblock; j++){ for (int i=0; i < ntap - k; i++){ coeff_PPT[k*lblock + j] += coeff_P[(i+k)*lblock + j] * coeff_P[i*lblock + j]; } } } // compute Cholesky factorization of each coeff_PPT submatrix // remember that lapack has column major format for (int i=0; i<lblock; i++){ // band_PPT for (int j=0; j<ntap; j++){ for (int k=0; k<nblock; k++){ ab[k*ntap + j] = coeff_PPT[(ntap-1-j)*lblock + i]; } } dpbtrf_(&uplo, &n, &kd, ab, &ldab, &info); if (info != 0){ printf("pbtrf error :%d\n",info); } for (int j=0; j<ntap; j++){ for (int k=0; k<nblock; k++){ uPPT[i*ntap*nblock + k*ntap + j] = (float) ab[k*ntap + j]; // cuBLAS also has column major format } } } // fill host arrays for (int k=0; k<lblock; k++){ for (int j=0; j<ntap; j++){ for (int i=0; i<ntsblock; i++){ band_P[k*ntap*ntsblock + i*ntap + j] = (float) coeff_P[(ntap-1-j)*lblock + k]; } } } // load to device CUDA_CALL( hipMemcpy(d_uPPT, uPPT, lblock*ntap*nblock*sizeof(float), hipMemcpyHostToDevice) ); CUDA_CALL( hipMemcpy(d_band_P, band_P, ntsblock*lblock*ntap*sizeof(float), hipMemcpyHostToDevice) ); free(uPPT); free(ab); free(coeff_P); free(coeff_PPT); free(band_P); return 1; } // generate pfb spectrum (doesn't actually do the polyphase bit...) int pfb(float *d_t, int num_samples, int num_tap, int num_freq, hipfftComplex *d_s){ int lblock = 2 * (num_freq - 1); int nblock = num_samples / lblock - (num_tap - 1); float *d_tt; hipfftComplex *d_ft; hipfftHandle plan; // create FFT plan int batch = 1; int fft_size = lblock*num_tap; CUDA_CALL( hipMalloc((void **) &d_ft, (fft_size/2+1)*sizeof(hipfftComplex)) ); CUDA_CALL( hipMalloc((void **) &d_tt, fft_size*sizeof(hipfftComplex)) ); CUFFT_CALL( hipfftPlanMany(&plan, 1, &fft_size,NULL,1,0,NULL,1,0,HIPFFT_R2C,batch) ); dim3 blocks(64,1,1); dim3 threads(512,1,1); // iterate over blocks (no batches yet) for (int i=0; i < nblock; i++){ // window hipLaunchKernelGGL(( window), dim3(blocks),dim3(threads), 0, 0, d_t + i*lblock, d_tt, fft_size); CUDA_CALL(hipGetLastError()); // execute rFFT CUFFT_CALL( hipfftExecR2C(plan, d_tt, d_ft) ); // decimate hipLaunchKernelGGL(( decimate), dim3(blocks),dim3(threads), 0, 0, d_ft,d_s+i*num_freq,num_tap,fft_size/2+1); CUDA_CALL(hipGetLastError()); } CUDA_CALL( hipFree(d_ft) ); CUDA_CALL( hipFree(d_tt) ); CUFFT_CALL( hipfftDestroy(plan) ); return 1; } /* d_s is complex PFB timestream [num_snapshots, num_freqs] */ int inverse_pfb(hipfftComplex *d_s, int num_samples, int num_tap, int num_freq, float *d_rts){ hipfftHandle plan; hipblasHandle_t handle; hipblasStatus_t err; float *d_pts, *d_foo, *d_uPPT, *d_band_P; hipEvent_t tic,toc; float elapsedTime; // pull out the number of blocks and their length int lblock = 2 * (num_freq - 1); int nblock = num_samples / lblock - (num_tap - 1); int ntsblock = nblock + num_tap - 1; float beta = 0.0,alpha = 1.0; // create CUDA events for timing hipEventCreate(&tic); hipEventCreate(&toc); // create cuBLAS context CUBLAS_CALL( hipblasCreate(&handle) ); // generate and load coeff_P and Cholesky factorized PPT matrix to device CUDA_CALL( hipMalloc((void **) &d_uPPT, nblock*lblock*num_tap*sizeof(float)) ); CUDA_CALL( hipMalloc((void **) &d_band_P, ntsblock*lblock*num_tap*sizeof(float)) ); PPT(nblock, lblock, num_tap, d_uPPT, d_band_P); hipEventRecord(tic); // generate pseudo timestream CUDA_CALL( hipMalloc((void **) &d_pts, nblock*lblock*sizeof(float)) ); CUDA_CALL( hipMalloc((void **) &d_foo, ntsblock*lblock*sizeof(float)) ); CUFFT_CALL( hipfftPlanMany(&plan, 1, &lblock, NULL, 1, 0, NULL, 1, 0, HIPFFT_C2R, nblock) ); CUFFT_CALL( hipfftExecC2R(plan, d_s, d_foo) ); // calculate correlation using pseudo timestream float corr_coeff = corr_FXt(d_rts,d_foo, num_samples); // transpose the nblock x lblock spectrum to lblock x nblock // cufft assumes row major jormat, cublas assumes collumn major format // http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-geam err = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, nblock, lblock, &alpha, d_foo, lblock, &beta, NULL, nblock, d_pts, nblock); if (err != HIPBLAS_STATUS_SUCCESS){ printf("Error at %s:%s:%d\n",__FILE__,__FUNCTION__,__LINE__); } // multiple pseudo-timestream by 1./lblock (to rescale inverse FFT) dim3 blocks(64,1,1); dim3 threads(512,1,1); hipLaunchKernelGGL(( multiply), dim3(blocks),dim3(threads), 0, 0, d_pts,1./lblock,lblock*nblock); // probably want to batch this or use streams for (int i = 0; i < lblock; i++){ // solve for intermediate vector // http://docs.nvidia.com/cuda/cublas/index.html#cublas-lt-t-gt-tbsv err = hipblasStbsv(handle,HIPBLAS_FILL_MODE_UPPER, HIPBLAS_OP_T,HIPBLAS_DIAG_NON_UNIT, nblock,num_tap-1, d_uPPT+i*nblock*num_tap,num_tap, d_pts+i*nblock,1); if (err != HIPBLAS_STATUS_SUCCESS){ printf("Error at %s:%s:%d\n",__FILE__,__FUNCTION__,__LINE__); } err = hipblasStbsv(handle,HIPBLAS_FILL_MODE_UPPER, HIPBLAS_OP_N,HIPBLAS_DIAG_NON_UNIT, nblock,num_tap-1, d_uPPT+i*nblock*num_tap,num_tap, d_pts+i*nblock,1); if (err != HIPBLAS_STATUS_SUCCESS){ printf("Error at %s:%s:%d\n",__FILE__,__FUNCTION__,__LINE__); } // project back onto time-stream // http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gbmv err = hipblasSgbmv(handle,HIPBLAS_OP_T, nblock, ntsblock, 0, num_tap-1, &alpha,d_band_P+i*num_tap*ntsblock,num_tap, d_pts+i*nblock, 1, &beta, d_foo+i*ntsblock, 1 ); //&beta, d_rts+i*ntsblock, 1 if (err != HIPBLAS_STATUS_SUCCESS){ printf("Error at %s:%s:%d\n",__FILE__,__FUNCTION__,__LINE__); } } // now transpose lblock x ntsblock to ntsblock x lblock // but remember that cublas is column major err = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, lblock, ntsblock, &alpha, d_foo, ntsblock, &beta, NULL, lblock, d_rts, lblock); if (err != HIPBLAS_STATUS_SUCCESS){ printf("Error at %s:%s:%d\n",__FILE__,__FUNCTION__,__LINE__); } hipEventRecord(toc); hipEventSynchronize(toc); hipEventElapsedTime(&elapsedTime,tic,toc); printf("inverse-pfb (gpu only): %f\n",elapsedTime); CUDA_CALL( hipEventDestroy(tic) ); CUDA_CALL( hipEventDestroy(toc) ); CUDA_CALL( hipFree(d_pts) ); CUDA_CALL( hipFree(d_foo) ); CUDA_CALL( hipFree(d_uPPT) ); CUDA_CALL( hipFree(d_band_P) ); CUBLAS_CALL( hipblasDestroy(handle) ); return 1; } int main(int argc, char* argv[]){ int num_beng_frames = 2; int num_tap = 4, num_freq = BENG_CHANNELS_ + 1; float elapsedTime; float *d_ts, *d_rts, *ts, *rts; hipfftComplex *d_s; hipEvent_t tic, toc; hiprandGenerator_t gen; int num_samples = 2*BENG_CHANNELS_*(BENG_SNAPSHOTS*num_beng_frames + num_tap - 1); int lblock = 2 * (num_freq - 1); int nblock = num_samples / lblock - (num_tap - 1); printf("num_samples=%d\n",num_samples); printf("num_freqs=%d\n",num_freq); printf("lblock=%d\n",lblock); printf("nblock=%d\n",nblock); // create events CUDA_CALL( hipEventCreate(&tic) ); CUDA_CALL( hipEventCreate(&toc) ); // allocate device memory CUDA_CALL( hipMalloc((void **) &d_ts, num_samples*sizeof(float)) ); CUDA_CALL( hipMalloc((void **) &d_s, nblock*num_freq*sizeof(hipfftComplex)) ); // generate data CURAND_CALL( hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT) ); CUDA_CALL( hipEventRecord(tic) ); CURAND_CALL(hiprandGenerateNormal(gen, d_ts, num_samples, 0., 1.) ); CUDA_CALL( hipEventRecord(toc) ); CUDA_CALL( hipEventSynchronize(toc) ); CUDA_CALL( hipEventElapsedTime(&elapsedTime,tic,toc) ); fprintf(stdout, "generating %d random numbers took %f ms\n",num_samples,elapsedTime); // pfb CUDA_CALL( hipEventRecord(tic) ); pfb(d_ts, num_samples, num_tap, num_freq, d_s); CUDA_CALL( hipEventRecord(toc) ); CUDA_CALL( hipEventSynchronize(toc) ); CUDA_CALL( hipEventElapsedTime(&elapsedTime,tic,toc) ); fprintf(stdout, "pfb took %f ms\n",elapsedTime); // inverse pfb CUDA_CALL( hipMalloc((void **) &d_rts, num_samples*sizeof(float)) ); CUDA_CALL( hipEventRecord(tic) ); inverse_pfb(d_s, num_samples, num_tap, num_freq, d_rts); CUDA_CALL( hipEventRecord(toc) ); CUDA_CALL( hipEventSynchronize(toc) ); CUDA_CALL( hipEventElapsedTime(&elapsedTime,tic,toc) ); fprintf(stdout, "inverse-pfb took %f ms\n",elapsedTime); // compute the correlation coefficient here: CUDA_CALL( hipEventRecord(tic) ); float corr_coeff = corr_FXt(d_rts,d_ts, num_samples); CUDA_CALL( hipEventRecord(toc) ); CUDA_CALL( hipEventSynchronize(toc) ); CUDA_CALL( hipEventElapsedTime(&elapsedTime,tic,toc) ); fprintf(stdout, "FXcorr took %f ms\n",elapsedTime); #if 0 // write time streams to file ts = (float*) malloc(num_samples*sizeof(float)); rts = (float*) malloc(num_samples*sizeof(float)); CUDA_CALL( hipMemcpy(ts, d_ts, num_samples*sizeof(float), hipMemcpyDeviceToHost) ); CUDA_CALL( hipMemcpy(rts, d_rts, num_samples*sizeof(float), hipMemcpyDeviceToHost) ); FILE *pFile; pFile = fopen("ts.txt","w"); for (int i=0; i < num_samples; i++){ fprintf(pFile,"%e %e\n",ts[i], rts[i]); } fclose(pFile); free(ts); free(rts); #endif // clean up CURAND_CALL( hiprandDestroyGenerator(gen) ); CUDA_CALL( hipEventDestroy(tic) ); CUDA_CALL( hipEventDestroy(toc) ); CUDA_CALL( hipFree(d_ts) ); CUDA_CALL( hipFree(d_s) ); CUDA_CALL( hipFree(d_rts) ); fprintf(stdout,"done!\n"); }
72d4ba877a940afad8142db25e462e198c223280.cu
/* Inverse PFB following Richard Shaw's original python/LAPACK routine: https://github.com/jrs65/pfb-inverse Beware: This implementation runs ~4x slower than the python version on hamster! @author Katherine Rosenfeld @date 8/2015 To compile: nvcc pfb_inverse.cu -o pfb_inverse.out -lcublas -lcurand -lcufft -llapack */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> #include <cublas_v2.h> #include <curand.h> #include <cufft.h> #define BENG_CHANNELS_ 16384 #define BENG_SNAPSHOTS 128 #define PI 3.14159265359 extern "C" { void dpbtrf_(char* uplo, int *n, int* kd, double* ab, int* ldab, int* info); } #define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \ printf("Error at %s:%d: %s\n",__FILE__,__LINE__,cudaGetErrorString(x));\ return EXIT_FAILURE;}} while(0) #define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) #define CUFFT_CALL(x) do { if((x)!=CUFFT_SUCCESS) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) #define CUBLAS_CALL(x) do { if((x)!=CUBLAS_STATUS_SUCCESS) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) __host__ __device__ float hamming(int n, int m){ return 0.54 - 0.46*cos(2.*PI*n/(m-1.)); } // decimation kernel __global__ void decimate(cufftComplex *in, cufftComplex *out, int M, int N){ int tid = blockIdx.x*blockDim.x + threadIdx.x; for (int i=tid; i<N; i+= gridDim.x*blockDim.x){ if (i % M == 0) { out[i / M] = in[i]; } } } // multiple kernel __global__ void multiply(float *a, float b, int N){ int tid = blockIdx.x*blockDim.x + threadIdx.x; for (int i=tid; i<N; i+=gridDim.x*blockDim.x){ a[i] *= b; } } // cross multiply kernel __global__ void cross_multiply(cufftComplex *S_0x1, cufftComplex *X0, cufftComplex *X1, int N){ // returns S_0x1 = X0 * conj(X1) int tid = blockIdx.x*blockDim.x + threadIdx.x; for (int i = tid; i < N; i += blockDim.x*gridDim.x){ S_0x1[i].x = X0[i].x*X1[i].x + X0[i].y*X1[i].y; S_0x1[i].y = X0[i].y*X1[i].x - X0[i].x*X1[i].y; } } // compute mean along column [m x n, row major format] __global__ void col_mean(cufftComplex *in, int m, int n){ int cid = blockIdx.x*blockDim.x + threadIdx.x; // stride along column id for (int i = cid; i < n; i += gridDim.x*blockDim.x){ float avg_re = 0; float avg_im = 0; for (int j = 0 ; j < m; j++){ avg_re += in[i + j*n].x; avg_im += in[i + j*n].y; } //in[i] = make_cuComplex(avg_re / m, avg_im / m); in[i].x = avg_re/m; in[i].y = avg_im/m; } } // apply window function __global__ void window(float *in, float *out, int N){ int tid = blockIdx.x*blockDim.x + threadIdx.x; for (int i=tid; i<N; i+= gridDim.x*blockDim.x){ out[i] = in[i]*hamming(i,N); } } float corr_FXt(float *d_x0, float *d_x1, int num_samples){ int idx,window_size = 32768*64; cufftHandle plan,iplan; cublasHandle_t handle; int batch = num_samples / window_size; cufftComplex *d_S,*d_X0, *d_X1; dim3 blocks(64,1,1),threads(256,1,1); float *d_s; float s0x0_max, s1x1_max, corr_coeff; printf("%s : batch = %d \n",__FUNCTION__, batch); // allocate device arrays CUDA_CALL( cudaMalloc((void **) &d_X0, (window_size/2+1)*batch*sizeof(cufftComplex)) ); CUDA_CALL( cudaMalloc((void **) &d_X1, (window_size/2+1)*batch*sizeof(cufftComplex)) ); CUDA_CALL( cudaMalloc((void **) &d_S, (window_size/2+1)*batch*sizeof(cufftComplex)) ); CUDA_CALL( cudaMalloc((void **) &d_s, window_size*sizeof(float)) ); // create FFT plans and cuBLAS handle CUFFT_CALL( cufftPlanMany(&plan, 1, &window_size, NULL,1,0,NULL,1,0,CUFFT_R2C,batch) ); CUFFT_CALL( cufftPlanMany(&iplan, 1, &window_size, NULL,1,0,NULL,1,0,CUFFT_C2R,1) ); CUBLAS_CALL( cublasCreate(&handle) ); // execute R2C FFT CUFFT_CALL( cufftExecR2C(plan, d_x0, d_X0) ); CUFFT_CALL( cufftExecR2C(plan, d_x1, d_X1) ); // auto-corr X0, X0 cross_multiply<<<blocks,threads>>>(d_S,d_X0,d_X0,batch*(window_size/2+1)); col_mean<<<blocks,threads>>>(d_S,batch,window_size/2+1); CUFFT_CALL( cufftExecC2R(iplan, d_S, d_s) ); CUBLAS_CALL( cublasIsamax(handle, window_size, d_s, 1, &idx) ); CUDA_CALL( cudaMemcpy( &s0x0_max, d_s + (idx-1), 1*sizeof(float), cudaMemcpyDeviceToHost) ); // auto-corr X1, X1 cross_multiply<<<blocks,threads>>>(d_S,d_X1,d_X1,batch*(window_size/2+1)); col_mean<<<blocks,threads>>>(d_S,batch,window_size/2+1); CUFFT_CALL( cufftExecC2R(iplan, d_S, d_s) ); CUBLAS_CALL( cublasIsamax(handle, window_size, d_s, 1, &idx) ); CUDA_CALL( cudaMemcpy( &s1x1_max, d_s + (idx-1), 1*sizeof(float), cudaMemcpyDeviceToHost) ); // cross-corr X0, X1 cross_multiply<<<blocks,threads>>>(d_S,d_X0,d_X1,batch*(window_size/2+1)); col_mean<<<blocks,threads>>>(d_S,batch,window_size/2+1); CUFFT_CALL( cufftExecC2R(iplan, d_S, d_s) ); CUBLAS_CALL( cublasIsamax(handle, window_size, d_s, 1, &idx) ); CUDA_CALL( cudaMemcpy( &corr_coeff, d_s + (idx-1), 1*sizeof(float), cudaMemcpyDeviceToHost) ); printf("corr coeff: %0.4f %d \n",corr_coeff/sqrt(s1x1_max*s0x0_max), idx); // clean up CUFFT_CALL( cufftDestroy(plan) ); CUFFT_CALL( cufftDestroy(iplan) ); CUDA_CALL( cudaFree(d_X0) ); CUDA_CALL( cudaFree(d_X1) ); CUDA_CALL( cudaFree(d_S) ); CUDA_CALL( cudaFree(d_s) ); CUBLAS_CALL( cublasDestroy(handle) ); return corr_coeff/sqrt(s1x1_max*s0x0_max); } int PPT(int nblock, int lblock, int ntap, float *d_uPPT, float *d_band_P){ // http://www.physics.orst.edu/~rubin/nacphy/lapack/routines/spbtrf.html double *ab, *coeff_P, *coeff_PPT; float *uPPT, *band_P; char uplo = 'U'; // store upper triangle int n = nblock, kd = ntap-1, ldab = ntap, info; int ntsblock = nblock + ntap - 1; // allocate memory coeff_P = (double *) malloc(ntap*lblock*sizeof(double)); coeff_PPT = (double *) malloc(lblock*ntap*sizeof(double)); ab = (double*) malloc(ntap*nblock*sizeof(double)); uPPT = (float*) malloc(lblock*ntap*nblock*sizeof(float)); band_P = (float*) malloc(lblock*ntap*ntsblock*sizeof(float)); // generate window function for (int i=0; i<ntap*lblock; i++){ coeff_P[i] = hamming(i,ntap*lblock); } for (int i=0; i<lblock*ntap; i++) coeff_PPT[i] = 0.; // initialize array for (int k=0; k < ntap; k++){ for (int j=0; j < lblock; j++){ for (int i=0; i < ntap - k; i++){ coeff_PPT[k*lblock + j] += coeff_P[(i+k)*lblock + j] * coeff_P[i*lblock + j]; } } } // compute Cholesky factorization of each coeff_PPT submatrix // remember that lapack has column major format for (int i=0; i<lblock; i++){ // band_PPT for (int j=0; j<ntap; j++){ for (int k=0; k<nblock; k++){ ab[k*ntap + j] = coeff_PPT[(ntap-1-j)*lblock + i]; } } dpbtrf_(&uplo, &n, &kd, ab, &ldab, &info); if (info != 0){ printf("pbtrf error :%d\n",info); } for (int j=0; j<ntap; j++){ for (int k=0; k<nblock; k++){ uPPT[i*ntap*nblock + k*ntap + j] = (float) ab[k*ntap + j]; // cuBLAS also has column major format } } } // fill host arrays for (int k=0; k<lblock; k++){ for (int j=0; j<ntap; j++){ for (int i=0; i<ntsblock; i++){ band_P[k*ntap*ntsblock + i*ntap + j] = (float) coeff_P[(ntap-1-j)*lblock + k]; } } } // load to device CUDA_CALL( cudaMemcpy(d_uPPT, uPPT, lblock*ntap*nblock*sizeof(float), cudaMemcpyHostToDevice) ); CUDA_CALL( cudaMemcpy(d_band_P, band_P, ntsblock*lblock*ntap*sizeof(float), cudaMemcpyHostToDevice) ); free(uPPT); free(ab); free(coeff_P); free(coeff_PPT); free(band_P); return 1; } // generate pfb spectrum (doesn't actually do the polyphase bit...) int pfb(float *d_t, int num_samples, int num_tap, int num_freq, cufftComplex *d_s){ int lblock = 2 * (num_freq - 1); int nblock = num_samples / lblock - (num_tap - 1); float *d_tt; cufftComplex *d_ft; cufftHandle plan; // create FFT plan int batch = 1; int fft_size = lblock*num_tap; CUDA_CALL( cudaMalloc((void **) &d_ft, (fft_size/2+1)*sizeof(cufftComplex)) ); CUDA_CALL( cudaMalloc((void **) &d_tt, fft_size*sizeof(cufftComplex)) ); CUFFT_CALL( cufftPlanMany(&plan, 1, &fft_size,NULL,1,0,NULL,1,0,CUFFT_R2C,batch) ); dim3 blocks(64,1,1); dim3 threads(512,1,1); // iterate over blocks (no batches yet) for (int i=0; i < nblock; i++){ // window window<<<blocks,threads>>>(d_t + i*lblock, d_tt, fft_size); CUDA_CALL(cudaGetLastError()); // execute rFFT CUFFT_CALL( cufftExecR2C(plan, d_tt, d_ft) ); // decimate decimate<<<blocks,threads>>>(d_ft,d_s+i*num_freq,num_tap,fft_size/2+1); CUDA_CALL(cudaGetLastError()); } CUDA_CALL( cudaFree(d_ft) ); CUDA_CALL( cudaFree(d_tt) ); CUFFT_CALL( cufftDestroy(plan) ); return 1; } /* d_s is complex PFB timestream [num_snapshots, num_freqs] */ int inverse_pfb(cufftComplex *d_s, int num_samples, int num_tap, int num_freq, float *d_rts){ cufftHandle plan; cublasHandle_t handle; cublasStatus_t err; float *d_pts, *d_foo, *d_uPPT, *d_band_P; cudaEvent_t tic,toc; float elapsedTime; // pull out the number of blocks and their length int lblock = 2 * (num_freq - 1); int nblock = num_samples / lblock - (num_tap - 1); int ntsblock = nblock + num_tap - 1; float beta = 0.0,alpha = 1.0; // create CUDA events for timing cudaEventCreate(&tic); cudaEventCreate(&toc); // create cuBLAS context CUBLAS_CALL( cublasCreate(&handle) ); // generate and load coeff_P and Cholesky factorized PPT matrix to device CUDA_CALL( cudaMalloc((void **) &d_uPPT, nblock*lblock*num_tap*sizeof(float)) ); CUDA_CALL( cudaMalloc((void **) &d_band_P, ntsblock*lblock*num_tap*sizeof(float)) ); PPT(nblock, lblock, num_tap, d_uPPT, d_band_P); cudaEventRecord(tic); // generate pseudo timestream CUDA_CALL( cudaMalloc((void **) &d_pts, nblock*lblock*sizeof(float)) ); CUDA_CALL( cudaMalloc((void **) &d_foo, ntsblock*lblock*sizeof(float)) ); CUFFT_CALL( cufftPlanMany(&plan, 1, &lblock, NULL, 1, 0, NULL, 1, 0, CUFFT_C2R, nblock) ); CUFFT_CALL( cufftExecC2R(plan, d_s, d_foo) ); // calculate correlation using pseudo timestream float corr_coeff = corr_FXt(d_rts,d_foo, num_samples); // transpose the nblock x lblock spectrum to lblock x nblock // cufft assumes row major jormat, cublas assumes collumn major format // http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-geam err = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, nblock, lblock, &alpha, d_foo, lblock, &beta, NULL, nblock, d_pts, nblock); if (err != CUBLAS_STATUS_SUCCESS){ printf("Error at %s:%s:%d\n",__FILE__,__FUNCTION__,__LINE__); } // multiple pseudo-timestream by 1./lblock (to rescale inverse FFT) dim3 blocks(64,1,1); dim3 threads(512,1,1); multiply<<<blocks,threads>>>(d_pts,1./lblock,lblock*nblock); // probably want to batch this or use streams for (int i = 0; i < lblock; i++){ // solve for intermediate vector // http://docs.nvidia.com/cuda/cublas/index.html#cublas-lt-t-gt-tbsv err = cublasStbsv(handle,CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_T,CUBLAS_DIAG_NON_UNIT, nblock,num_tap-1, d_uPPT+i*nblock*num_tap,num_tap, d_pts+i*nblock,1); if (err != CUBLAS_STATUS_SUCCESS){ printf("Error at %s:%s:%d\n",__FILE__,__FUNCTION__,__LINE__); } err = cublasStbsv(handle,CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N,CUBLAS_DIAG_NON_UNIT, nblock,num_tap-1, d_uPPT+i*nblock*num_tap,num_tap, d_pts+i*nblock,1); if (err != CUBLAS_STATUS_SUCCESS){ printf("Error at %s:%s:%d\n",__FILE__,__FUNCTION__,__LINE__); } // project back onto time-stream // http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gbmv err = cublasSgbmv(handle,CUBLAS_OP_T, nblock, ntsblock, 0, num_tap-1, &alpha,d_band_P+i*num_tap*ntsblock,num_tap, d_pts+i*nblock, 1, &beta, d_foo+i*ntsblock, 1 ); //&beta, d_rts+i*ntsblock, 1 if (err != CUBLAS_STATUS_SUCCESS){ printf("Error at %s:%s:%d\n",__FILE__,__FUNCTION__,__LINE__); } } // now transpose lblock x ntsblock to ntsblock x lblock // but remember that cublas is column major err = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, lblock, ntsblock, &alpha, d_foo, ntsblock, &beta, NULL, lblock, d_rts, lblock); if (err != CUBLAS_STATUS_SUCCESS){ printf("Error at %s:%s:%d\n",__FILE__,__FUNCTION__,__LINE__); } cudaEventRecord(toc); cudaEventSynchronize(toc); cudaEventElapsedTime(&elapsedTime,tic,toc); printf("inverse-pfb (gpu only): %f\n",elapsedTime); CUDA_CALL( cudaEventDestroy(tic) ); CUDA_CALL( cudaEventDestroy(toc) ); CUDA_CALL( cudaFree(d_pts) ); CUDA_CALL( cudaFree(d_foo) ); CUDA_CALL( cudaFree(d_uPPT) ); CUDA_CALL( cudaFree(d_band_P) ); CUBLAS_CALL( cublasDestroy(handle) ); return 1; } int main(int argc, char* argv[]){ int num_beng_frames = 2; int num_tap = 4, num_freq = BENG_CHANNELS_ + 1; float elapsedTime; float *d_ts, *d_rts, *ts, *rts; cufftComplex *d_s; cudaEvent_t tic, toc; curandGenerator_t gen; int num_samples = 2*BENG_CHANNELS_*(BENG_SNAPSHOTS*num_beng_frames + num_tap - 1); int lblock = 2 * (num_freq - 1); int nblock = num_samples / lblock - (num_tap - 1); printf("num_samples=%d\n",num_samples); printf("num_freqs=%d\n",num_freq); printf("lblock=%d\n",lblock); printf("nblock=%d\n",nblock); // create events CUDA_CALL( cudaEventCreate(&tic) ); CUDA_CALL( cudaEventCreate(&toc) ); // allocate device memory CUDA_CALL( cudaMalloc((void **) &d_ts, num_samples*sizeof(float)) ); CUDA_CALL( cudaMalloc((void **) &d_s, nblock*num_freq*sizeof(cufftComplex)) ); // generate data CURAND_CALL( curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT) ); CUDA_CALL( cudaEventRecord(tic) ); CURAND_CALL(curandGenerateNormal(gen, d_ts, num_samples, 0., 1.) ); CUDA_CALL( cudaEventRecord(toc) ); CUDA_CALL( cudaEventSynchronize(toc) ); CUDA_CALL( cudaEventElapsedTime(&elapsedTime,tic,toc) ); fprintf(stdout, "generating %d random numbers took %f ms\n",num_samples,elapsedTime); // pfb CUDA_CALL( cudaEventRecord(tic) ); pfb(d_ts, num_samples, num_tap, num_freq, d_s); CUDA_CALL( cudaEventRecord(toc) ); CUDA_CALL( cudaEventSynchronize(toc) ); CUDA_CALL( cudaEventElapsedTime(&elapsedTime,tic,toc) ); fprintf(stdout, "pfb took %f ms\n",elapsedTime); // inverse pfb CUDA_CALL( cudaMalloc((void **) &d_rts, num_samples*sizeof(float)) ); CUDA_CALL( cudaEventRecord(tic) ); inverse_pfb(d_s, num_samples, num_tap, num_freq, d_rts); CUDA_CALL( cudaEventRecord(toc) ); CUDA_CALL( cudaEventSynchronize(toc) ); CUDA_CALL( cudaEventElapsedTime(&elapsedTime,tic,toc) ); fprintf(stdout, "inverse-pfb took %f ms\n",elapsedTime); // compute the correlation coefficient here: CUDA_CALL( cudaEventRecord(tic) ); float corr_coeff = corr_FXt(d_rts,d_ts, num_samples); CUDA_CALL( cudaEventRecord(toc) ); CUDA_CALL( cudaEventSynchronize(toc) ); CUDA_CALL( cudaEventElapsedTime(&elapsedTime,tic,toc) ); fprintf(stdout, "FXcorr took %f ms\n",elapsedTime); #if 0 // write time streams to file ts = (float*) malloc(num_samples*sizeof(float)); rts = (float*) malloc(num_samples*sizeof(float)); CUDA_CALL( cudaMemcpy(ts, d_ts, num_samples*sizeof(float), cudaMemcpyDeviceToHost) ); CUDA_CALL( cudaMemcpy(rts, d_rts, num_samples*sizeof(float), cudaMemcpyDeviceToHost) ); FILE *pFile; pFile = fopen("ts.txt","w"); for (int i=0; i < num_samples; i++){ fprintf(pFile,"%e %e\n",ts[i], rts[i]); } fclose(pFile); free(ts); free(rts); #endif // clean up CURAND_CALL( curandDestroyGenerator(gen) ); CUDA_CALL( cudaEventDestroy(tic) ); CUDA_CALL( cudaEventDestroy(toc) ); CUDA_CALL( cudaFree(d_ts) ); CUDA_CALL( cudaFree(d_s) ); CUDA_CALL( cudaFree(d_rts) ); fprintf(stdout,"done!\n"); }
ac24816bcf28b61a8f014e7d4798e35489b86bba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void one_vector_int(int *vec, const int n) { unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; if ( xIndex < n ) vec[xIndex]=1; }
ac24816bcf28b61a8f014e7d4798e35489b86bba.cu
#include "includes.h" __global__ void one_vector_int(int *vec, const int n) { unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; if ( xIndex < n ) vec[xIndex]=1; }
d780d2660d95ebc872c04963c87464b1c9605f11.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void _bcnn_forward_softmax_layer_kernel(int n, int batch, float *input, float *output) { int i; float sum = 0; float largest = -INFINITY; int b = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (b >= batch) { return; } for (i = 0; i < n; ++i) { int val = input[i+b*n]; largest = (val>largest) ? val : largest; } for (i = 0; i < n; ++i) { sum += exp(input[i+b*n]-largest); } sum = (sum != 0) ? largest+log(sum) : largest-100; for (i = 0; i < n; ++i) { output[i+b*n] = exp(input[i+b*n]-sum); } }
d780d2660d95ebc872c04963c87464b1c9605f11.cu
#include "includes.h" __global__ void _bcnn_forward_softmax_layer_kernel(int n, int batch, float *input, float *output) { int i; float sum = 0; float largest = -INFINITY; int b = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (b >= batch) { return; } for (i = 0; i < n; ++i) { int val = input[i+b*n]; largest = (val>largest) ? val : largest; } for (i = 0; i < n; ++i) { sum += exp(input[i+b*n]-largest); } sum = (sum != 0) ? largest+log(sum) : largest-100; for (i = 0; i < n; ++i) { output[i+b*n] = exp(input[i+b*n]-sum); } }
edc64d3d5c71dd5dc1deb38b83e4b5f643700a54.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2020, Vijay Thakkar ([email protected]). **************************************************************************************************/ ////////////////////////////////////////////////////////////////////// // THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY // ////////////////////////////////////////////////////////////////////// #include "benchmark/benchmark.h" #include "cuasr/gemm/device/default_srgemm_configuration.h" #include "cuasr/gemm/device/srgemm.h" #include "cuasr/functional.h" #include "harness.h" //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_8x32x8_8x32x1_2x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x32x8_16x32x1_4x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x64x8_16x64x1_4x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_32x32x1_8x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x64x8_32x64x1_8x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x32x8_64x32x1_8x8_8x4_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_8x32x8_8x16x1_2x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_8x64x8_8x32x1_2x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x32x8_16x16x1_4x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x64x8_16x32x1_4x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x128x8_16x64x1_4x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_32x16x1_4x4_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x64x8_32x32x1_8x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x128x8_32x64x1_8x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_64x32x1_8x8_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_16x32x1_4x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x32x8_32x32x1_8x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_32x64x1_8x8_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 1 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x32x8_64x32x1_8x8_8x4_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x32x8_8x16x1_2x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x64x8_8x32x1_2x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_16x16x1_4x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x64x8_16x32x1_4x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x128x8_16x64x1_4x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x32x8_32x16x1_4x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_32x32x1_8x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x128x8_32x64x1_8x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x32x8_64x16x1_8x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x64x8_64x32x1_8x8_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x64x16_8x16x1_2x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x128x16_8x32x1_2x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_16x8x1_2x2_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x64x8_16x16x1_4x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x128x8_16x32x1_4x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x256x8_16x64x1_4x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_32x16x1_4x4_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x128x8_32x32x1_8x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x256x8_32x64x1_8x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x128x8_64x32x1_8x8_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_8x16x1_2x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x32x8_16x16x1_4x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_16x32x1_4x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x32x8_32x16x1_4x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x64x8_32x32x1_8x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x128x8_32x64x1_8x8_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_256x32x8_64x16x1_8x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_256x64x8_64x32x1_8x8_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x64x16_8x16x1_2x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x128x16_8x32x1_2x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 64 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x32x16_16x8x1_2x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_16x16x1_4x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x128x8_16x32x1_4x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x256x8_16x64x1_4x8_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x32x16_32x8x1_4x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x64x8_32x16x1_4x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x128x8_32x32x1_8x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_256x64x8_64x16x1_8x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif
edc64d3d5c71dd5dc1deb38b83e4b5f643700a54.cu
/*************************************************************************************************** * Copyright (c) 2020, Vijay Thakkar ([email protected]). **************************************************************************************************/ ////////////////////////////////////////////////////////////////////// // THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY // ////////////////////////////////////////////////////////////////////// #include "benchmark/benchmark.h" #include "cuasr/gemm/device/default_srgemm_configuration.h" #include "cuasr/gemm/device/srgemm.h" #include "cuasr/functional.h" #include "harness.h" //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_8x32x8_8x32x1_2x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x32x8_16x32x1_4x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x64x8_16x64x1_4x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_32x32x1_8x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x64x8_32x64x1_8x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x32x8_64x32x1_8x8_8x4_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_8x32x8_8x16x1_2x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_8x64x8_8x32x1_2x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x32x8_16x16x1_4x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x64x8_16x32x1_4x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x128x8_16x64x1_4x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_32x16x1_4x4_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x64x8_32x32x1_8x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x128x8_32x64x1_8x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_64x32x1_8x8_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_16x32x1_4x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x32x8_32x32x1_8x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_32x64x1_8x8_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 1 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x32x8_64x32x1_8x8_8x4_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x32x8_8x16x1_2x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x64x8_8x32x1_2x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_16x16x1_4x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x64x8_16x32x1_4x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x128x8_16x64x1_4x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x32x8_32x16x1_4x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_32x32x1_8x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x128x8_32x64x1_8x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x32x8_64x16x1_8x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x64x8_64x32x1_8x8_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x64x16_8x16x1_2x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_16x128x16_8x32x1_2x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_16x8x1_2x2_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x64x8_16x16x1_4x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x128x8_16x32x1_4x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x256x8_16x64x1_4x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_32x16x1_4x4_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x128x8_32x32x1_8x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x256x8_32x64x1_8x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x128x8_64x32x1_8x8_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x32x8_8x16x1_2x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x32x8_16x16x1_4x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_16x32x1_4x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x32x8_32x16x1_4x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x64x8_32x32x1_8x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x128x8_32x64x1_8x8_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_256x32x8_64x16x1_8x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_256x64x8_64x32x1_8x8_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x64x16_8x16x1_2x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_32x128x16_8x32x1_2x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 64 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x32x16_16x8x1_2x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x64x8_16x16x1_4x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x128x8_16x32x1_4x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_64x256x8_16x64x1_4x8_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x32x16_32x8x1_4x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x64x8_32x16x1_4x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_128x128x8_32x32x1_8x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tn_n_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tn_n_256x64x8_64x16x1_8x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif
6c0567def82f877c3127bb151971797de60abc0e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <Eigen/Dense> #include "DataFormats/CaloRecHit/interface/MultifitComputations.h" // needed to compile with USER_CXXFLAGS="-DCOMPUTE_TDC_TIME" #include "DataFormats/HcalRecHit/interface/HcalSpecialTimes.h" #include "FWCore/Utilities/interface/CMSUnrollLoop.h" // TODO reuse some of the HCAL constats from //#include "RecoLocalCalo/HcalRecAlgos/interface/HcalConstants.h" #include "SimpleAlgoGPU.h" #include "KernelHelpers.h" #ifdef HCAL_MAHI_GPUDEBUG #define DETID_TO_DEBUG 1125647428 #endif namespace hcal { namespace mahi { // TODO: provide constants from configuration // from RecoLocalCalo/HcalRecProducers/python/HBHEMahiParameters_cfi.py constexpr int nMaxItersMin = 50; constexpr int nMaxItersNNLS = 500; constexpr double nnlsThresh = 1e-11; constexpr float deltaChi2Threashold = 1e-3; // from RecoLocalCalo/HcalRecProducers/src/HBHEPhase1Reconstructor.cc __forceinline__ __device__ float get_raw_charge(double const charge, double const pedestal, float const* shrChargeMinusPedestal, float const* parLin1Values, float const* parLin2Values, float const* parLin3Values, int32_t const nsamplesForCompute, int32_t const soi, int const sipmQTSShift, int const sipmQNTStoSum, int const sipmType, float const fcByPE, bool const isqie11) { float rawCharge; if (!isqie11) rawCharge = charge; else { auto const parLin1 = parLin1Values[sipmType - 1]; auto const parLin2 = parLin2Values[sipmType - 1]; auto const parLin3 = parLin3Values[sipmType - 1]; int const first = ::max(soi + sipmQTSShift, 0); int const last = ::min(soi + sipmQNTStoSum, nsamplesForCompute); float sipmq = 0.0f; for (auto ts = first; ts < last; ts++) sipmq += shrChargeMinusPedestal[threadIdx.y * nsamplesForCompute + ts]; auto const effectivePixelsFired = sipmq / fcByPE; auto const factor = hcal::reconstruction::compute_reco_correction_factor(parLin1, parLin2, parLin3, effectivePixelsFired); rawCharge = (charge - pedestal) * factor + pedestal; #ifdef HCAL_MAHI_GPUDEBUG printf("first = %d last = %d sipmQ = %f factor = %f rawCharge = %f\n", first, last, sipmq, factor, rawCharge); #endif } return rawCharge; } // Assume: same number of samples for HB and HE // TODO: add/validate restrict (will increase #registers in use by the kernel) __global__ void kernel_prep1d_sameNumberOfSamples(float* amplitudes, float* noiseTerms, float* electronicNoiseTerms, float* outputEnergy, float* outputChi2, uint16_t const* dataf01HE, uint16_t const* dataf5HB, uint16_t const* dataf3HB, uint32_t const* idsf01HE, uint32_t const* idsf5HB, uint32_t const* idsf3HB, uint32_t const stridef01HE, uint32_t const stridef5HB, uint32_t const stridef3HB, uint32_t const nchannelsf01HE, uint32_t const nchannelsf5HB, uint8_t const* npresamplesf5HB, int8_t* soiSamples, float* method0Energy, float* method0Time, uint32_t* outputdid, uint32_t const nchannels, uint32_t const* qualityStatus, uint32_t const* recoParam1Values, uint32_t const* recoParam2Values, float const* qieCoderOffsets, float const* qieCoderSlopes, int const* qieTypes, float const* pedestalWidths, float const* effectivePedestalWidths, float const* pedestals, float const* effectivePedestals, bool const useEffectivePedestals, int const* sipmTypeValues, float const* fcByPEValues, float const* parLin1Values, float const* parLin2Values, float const* parLin3Values, float const* gainValues, float const* respCorrectionValues, int const maxDepthHB, int const maxDepthHE, int const maxPhiHE, int const firstHBRing, int const lastHBRing, int const firstHERing, int const lastHERing, int const nEtaHB, int const nEtaHE, int const sipmQTSShift, int const sipmQNTStoSum, int const firstSampleShift, uint32_t const offsetForHashes, float const ts4Thresh, int const startingSample) { // indices + runtime constants auto const sample = threadIdx.x + startingSample; auto const sampleWithinWindow = threadIdx.x; int32_t const nsamplesForCompute = blockDim.x; auto const lch = threadIdx.y; auto const gch = lch + blockDim.y * blockIdx.x; auto const nchannels_per_block = blockDim.y; auto const linearThPerBlock = threadIdx.x + threadIdx.y * blockDim.x; // remove if (gch >= nchannels) return; // initialize all output buffers if (sampleWithinWindow == 0) { outputdid[gch] = 0; method0Energy[gch] = 0; method0Time[gch] = 0; outputEnergy[gch] = 0; outputChi2[gch] = 0; soiSamples[gch] = -1; } #ifdef HCAL_MAHI_GPUDEBUG #ifdef HCAL_MAHI_GPUDEBUG_SINGLECHANNEL if (gch > 0) return; #endif #endif // configure shared mem extern __shared__ char smem[]; float* shrEnergyM0PerTS = reinterpret_cast<float*>(smem); float* shrChargeMinusPedestal = shrEnergyM0PerTS + nsamplesForCompute * nchannels_per_block; float* shrMethod0EnergyAccum = shrChargeMinusPedestal + nsamplesForCompute * nchannels_per_block; float* shrEnergyM0TotalAccum = shrMethod0EnergyAccum + nchannels_per_block; unsigned long long int* shrMethod0EnergySamplePair = reinterpret_cast<unsigned long long int*>(shrEnergyM0TotalAccum + nchannels_per_block); if (sampleWithinWindow == 0) { shrMethod0EnergyAccum[lch] = 0; shrMethod0EnergySamplePair[lch] = __float_as_uint(std::numeric_limits<float>::min()); shrEnergyM0TotalAccum[lch] = 0; } // offset output auto* amplitudesForChannel = amplitudes + nsamplesForCompute * gch; auto* noiseTermsForChannel = noiseTerms + nsamplesForCompute * gch; auto* electronicNoiseTermsForChannel = electronicNoiseTerms + nsamplesForCompute * gch; auto const nchannelsf015 = nchannelsf01HE + nchannelsf5HB; // get event input quantities auto const stride = gch < nchannelsf01HE ? stridef01HE : (gch < nchannelsf015 ? stridef5HB : stridef3HB); auto const nsamples = gch < nchannelsf01HE ? compute_nsamples<Flavor1>(stride) : (gch < nchannelsf015 ? compute_nsamples<Flavor5>(stride) : compute_nsamples<Flavor3>(stride)); #ifdef HCAL_MAHI_GPUDEBUG assert(nsamples == nsamplesForCompute || nsamples - startingSample == nsamplesForCompute); #endif auto const id = gch < nchannelsf01HE ? idsf01HE[gch] : (gch < nchannelsf015 ? idsf5HB[gch - nchannelsf01HE] : idsf3HB[gch - nchannelsf015]); auto const did = HcalDetId{id}; auto const adc = gch < nchannelsf01HE ? adc_for_sample<Flavor1>(dataf01HE + stride * gch, sample) : (gch < nchannelsf015 ? adc_for_sample<Flavor5>(dataf5HB + stride * (gch - nchannelsf01HE), sample) : adc_for_sample<Flavor3>(dataf3HB + stride * (gch - nchannelsf015), sample)); auto const capid = gch < nchannelsf01HE ? capid_for_sample<Flavor1>(dataf01HE + stride * gch, sample) : (gch < nchannelsf015 ? capid_for_sample<Flavor5>(dataf5HB + stride * (gch - nchannelsf01HE), sample) : capid_for_sample<Flavor3>(dataf3HB + stride * (gch - nchannelsf015), sample)); #ifdef HCAL_MAHI_GPUDEBUG #ifdef HCAL_MAHI_GPUDEBUG_FILTERDETID if (id != DETID_TO_DEBUG) return; #endif #endif // compute hash for this did auto const hashedId = did.subdetId() == HcalBarrel ? hcal::reconstruction::did2linearIndexHB(id, maxDepthHB, firstHBRing, lastHBRing, nEtaHB) : hcal::reconstruction::did2linearIndexHE(id, maxDepthHE, maxPhiHE, firstHERing, lastHERing, nEtaHE) + offsetForHashes; // conditions based on the hash // FIXME: remove hardcoded values auto const qieType = qieTypes[hashedId] > 0 ? 1 : 0; // 2 types at this point auto const* qieOffsets = qieCoderOffsets + hashedId * HcalQIECodersGPU::numValuesPerChannel; auto const* qieSlopes = qieCoderSlopes + hashedId * HcalQIECodersGPU::numValuesPerChannel; auto const* pedestalsForChannel = pedestals + hashedId * 4; auto const* pedestalWidthsForChannel = useEffectivePedestals && (gch < nchannelsf01HE || gch >= nchannelsf015) ? effectivePedestalWidths + hashedId * 4 : pedestalWidths + hashedId * 4; auto const* gains = gainValues + hashedId * 4; auto const gain = gains[capid]; auto const gain0 = gains[0]; auto const respCorrection = respCorrectionValues[hashedId]; auto const pedestal = pedestalsForChannel[capid]; auto const pedestalWidth = pedestalWidthsForChannel[capid]; // if needed, only use effective pedestals for f01 auto const pedestalToUseForMethod0 = useEffectivePedestals && (gch < nchannelsf01HE || gch >= nchannelsf015) ? effectivePedestals[hashedId * 4 + capid] : pedestal; auto const sipmType = sipmTypeValues[hashedId]; auto const fcByPE = fcByPEValues[hashedId]; auto const recoParam1 = recoParam1Values[hashedId]; auto const recoParam2 = recoParam2Values[hashedId]; #ifdef HCAL_MAHI_GPUDEBUG printf("qieType = %d qieOffset0 = %f qieOffset1 = %f qieSlope0 = %f qieSlope1 = %f\n", qieType, qieOffsets[0], qieOffsets[1], qieSlopes[0], qieSlopes[1]); #endif // compute charge auto const charge = hcal::reconstruction::compute_coder_charge(qieType, adc, capid, qieOffsets, qieSlopes); shrChargeMinusPedestal[linearThPerBlock] = charge - pedestal; if (gch < nchannelsf01HE) { // NOTE: assume that soi is high only for a single guy! // which must be the case. cpu version does not check for that // if that is not the case, we will see that with cuda mmecheck auto const soibit = soibit_for_sample<Flavor1>(dataf01HE + stride * gch, sample); if (soibit == 1) soiSamples[gch] = sampleWithinWindow; } else if (gch >= nchannelsf015) { auto const soibit = soibit_for_sample<Flavor3>(dataf3HB + stride * (gch - nchannelsf015), sample); if (soibit == 1) soiSamples[gch] = sampleWithinWindow; } __syncthreads(); int32_t const soi = gch < nchannelsf01HE ? soiSamples[gch] : (gch < nchannelsf015 ? npresamplesf5HB[gch - nchannelsf01HE] : soiSamples[gch]); bool badSOI = (soi < 0 or soi >= nsamplesForCompute); if (badSOI and sampleWithinWindow == 0) { #ifdef GPU_DEBUG printf("Found HBHE channel %d with invalid SOI %d\n", gch, soi); #endif // mark the channel as bad outputChi2[gch] = -9999.f; } //int32_t const soi = gch >= nchannelsf01HE // ? npresamplesf5HB[gch - nchannelsf01HE] // : soiSamples[gch]; // this is here just to make things uniform... if (gch >= nchannelsf01HE && gch < nchannelsf015 && sampleWithinWindow == 0) soiSamples[gch] = npresamplesf5HB[gch - nchannelsf01HE]; // // compute various quantities (raw charge and tdc stuff) // NOTE: this branch will be divergent only for a single warp that // sits on the boundary when flavor 01 channels end and flavor 5 start // float const rawCharge = get_raw_charge(charge, pedestal, shrChargeMinusPedestal, parLin1Values, parLin2Values, parLin3Values, nsamplesForCompute, soi, sipmQTSShift, sipmQNTStoSum, sipmType, fcByPE, gch < nchannelsf01HE || gch >= nchannelsf015); auto const dfc = hcal::reconstruction::compute_diff_charge_gain( qieType, adc, capid, qieOffsets, qieSlopes, gch < nchannelsf01HE || gch >= nchannelsf015); #ifdef COMPUTE_TDC_TIME float tdcTime; if (gch >= nchannelsf01HE && gch < nchannelsf015) { tdcTime = HcalSpecialTimes::UNKNOWN_T_NOTDC; } else { if (gch < nchannelsf01HE) tdcTime = HcalSpecialTimes::getTDCTime(tdc_for_sample<Flavor1>(dataf01HE + stride * gch, sample)); else if (gch >= nchannelsf015) tdcTime = HcalSpecialTimes::getTDCTime(tdc_for_sample<Flavor3>(dataf3HB + stride * (gch - nchannelsf015), sample)); } #endif // COMPUTE_TDC_TIME // compute method 0 quantities // TODO: need to apply containment // TODO: need to apply time slew // TODO: for < run 3, apply HBM legacy energy correction auto const nsamplesToAdd = recoParam1 < 10 ? recoParam2 : (recoParam1 >> 14) & 0xF; auto const startSampleTmp = soi + firstSampleShift; auto const startSample = startSampleTmp < 0 ? 0 : startSampleTmp; auto const endSample = startSample + nsamplesToAdd < nsamplesForCompute ? startSample + nsamplesToAdd : nsamplesForCompute; // NOTE: gain is a small number < 10^-3, multiply it last auto const energym0_per_ts = gain * ((rawCharge - pedestalToUseForMethod0) * respCorrection); auto const energym0_per_ts_gain0 = gain0 * ((rawCharge - pedestalToUseForMethod0) * respCorrection); // store to shared mem shrEnergyM0PerTS[lch * nsamplesForCompute + sampleWithinWindow] = energym0_per_ts; atomicAdd(&shrEnergyM0TotalAccum[lch], energym0_per_ts_gain0); #ifdef HCAL_MAHI_GPUDEBUG printf( "id = %u sample = %d gch = %d hashedId = %u adc = %u capid = %u\n" " charge = %f rawCharge = %f dfc = %f pedestal = %f\n" " gain = %f respCorrection = %f energym0_per_ts = %f\n", id, sample, gch, hashedId, adc, capid, charge, rawCharge, dfc, pedestalToUseForMethod0, gain, respCorrection, energym0_per_ts); printf( "startSample = %d endSample = %d param1 = %u param2 = %u\n", startSample, endSample, recoParam1, recoParam2); #endif if (sampleWithinWindow >= startSample && sampleWithinWindow < endSample) { atomicAdd(&shrMethod0EnergyAccum[lch], energym0_per_ts); // pack sample, energy as 64 bit value unsigned long long int old = shrMethod0EnergySamplePair[lch], assumed; unsigned long long int val = (static_cast<unsigned long long int>(sampleWithinWindow) << 32) + __float_as_uint(energym0_per_ts); do { assumed = old; // decode energy, sample values //int const current_sample = (assumed >> 32) & 0xffffffff; float const current_energy = __uint_as_float(assumed & 0xffffffff); if (energym0_per_ts > current_energy) old = atomicCAS(&shrMethod0EnergySamplePair[lch], assumed, val); else break; } while (assumed != old); } __syncthreads(); // NOTE: must take soi, as values for that thread are used... // NOTE: does not run if soi is bad, because it does not match any sampleWithinWindow if (sampleWithinWindow == soi) { auto const method0_energy = shrMethod0EnergyAccum[lch]; auto const val = shrMethod0EnergySamplePair[lch]; int const max_sample = (val >> 32) & 0xffffffff; float const max_energy = __uint_as_float(val & 0xffffffff); float const max_energy_1 = max_sample < nsamplesForCompute - 1 ? shrEnergyM0PerTS[lch * nsamplesForCompute + max_sample + 1] : 0.f; float const position = nsamplesToAdd < nsamplesForCompute ? max_sample - soi : max_sample; auto const sum = max_energy + max_energy_1; // FIXME: for full comparison with cpu method 0 timing, // need to correct by slew // requires an accumulator -> more shared mem -> omit here unless // really needed float const time = max_energy > 0.f && max_energy_1 > 0.f ? 25.f * (position + max_energy_1 / sum) : 25.f * position; // store method0 quantities to global mem outputdid[gch] = id; method0Energy[gch] = method0_energy; method0Time[gch] = time; #ifdef HCAL_MAHI_GPUDEBUG printf("tsTOT = %f tstrig = %f ts4Thresh = %f\n", shrEnergyM0TotalAccum[lch], energym0_per_ts_gain0, ts4Thresh); #endif // Channel quality check // https://github.com/cms-sw/cmssw/blob/master/RecoLocalCalo/HcalRecAlgos/plugins/HcalChannelPropertiesEP.cc#L107-L109 // https://github.com/cms-sw/cmssw/blob/6d2f66057131baacc2fcbdd203588c41c885b42c/CondCore/HcalPlugins/plugins/HcalChannelQuality_PayloadInspector.cc#L30 // const bool taggedBadByDb = severity.dropChannel(digistatus->getValue()); // do not run MAHI if taggedBadByDb = true auto const digiStatus_ = qualityStatus[hashedId]; const bool taggedBadByDb = (digiStatus_ / 32770); if (taggedBadByDb) outputChi2[gch] = -9999.f; // check as in cpu version if mahi is not needed // FIXME: KNOWN ISSUE: observed a problem when rawCharge and pedestal // are basically equal and generate -0.00000... // needs to be treated properly if (!(shrEnergyM0TotalAccum[lch] > 0 && energym0_per_ts_gain0 > ts4Thresh)) { // do not need to run mahi minimization //outputEnergy[gch] = 0; energy already inited to 0 outputChi2[gch] = -9999.f; } #ifdef HCAL_MAHI_GPUDEBUG printf("method0_energy = %f max_sample = %d max_energy = %f time = %f\n", method0_energy, max_sample, max_energy, time); #endif } // // preparations for mahi fit // auto const amplitude = rawCharge - pedestalToUseForMethod0; auto const noiseADC = (1. / std::sqrt(12)) * dfc; auto const noisePhotoSq = amplitude > pedestalWidth ? (amplitude * fcByPE) : 0.f; auto const noiseTerm = noiseADC * noiseADC + noisePhotoSq + pedestalWidth * pedestalWidth; #ifdef HCAL_MAHI_GPUDEBUG printf( "charge(%d) = %f pedestal(%d) = %f dfc(%d) = %f pedestalWidth(%d) = %f noiseADC(%d) = %f noisPhoto(%d) = " "%f\n", sample, rawCharge, sample, pedestalToUseForMethod0, sample, dfc, sample, pedestalWidth, sample, noiseADC, sample, noisePhotoSq); #endif // store to global memory amplitudesForChannel[sampleWithinWindow] = amplitude; noiseTermsForChannel[sampleWithinWindow] = noiseTerm; electronicNoiseTermsForChannel[sampleWithinWindow] = pedestalWidth; } // TODO: need to add an array of offsets for pulses (a la activeBXs...) // Assume for now 8 pulses __global__ void kernel_prep_pulseMatrices_sameNumberOfSamples(float* pulseMatrices, float* pulseMatricesM, float* pulseMatricesP, int const* pulseOffsets, float const* amplitudes, uint32_t const* idsf01HE, uint32_t const* idsf5HB, uint32_t const* idsf3HB, uint32_t const nchannelsf01HE, uint32_t const nchannelsf5HB, uint32_t const nchannelsTotal, int8_t const* soiSamples, uint32_t const* recoPulseShapeIds, float const* acc25nsVecValues, float const* diff25nsItvlVecValues, float const* accVarLenIdxMinusOneVecValues, float const* diffVarItvlIdxMinusOneVecValues, float const* accVarLenIdxZeroVecValues, float const* diffVarItvlIdxZeroVecValues, float const meanTime, float const timeSigmaSiPM, float const timeSigmaHPD, int const maxDepthHB, int const maxDepthHE, int const maxPhiHE, int const firstHBRing, int const lastHBRing, int const firstHERing, int const lastHERing, int const nEtaHB, int const nEtaHE, uint32_t const offsetForHashes, bool const applyTimeSlew, float const tzeroTimeSlew, float const slopeTimeSlew, float const tmaxTimeSlew) { // indices auto const ipulse = threadIdx.y; auto const npulses = blockDim.y; auto const sample = threadIdx.x; auto const nsamples = blockDim.x; auto const lch = threadIdx.z; auto const gch = lch + blockIdx.x * blockDim.z; auto const nchannelsf015 = nchannelsf01HE + nchannelsf5HB; if (gch >= nchannelsTotal) return; // conditions auto const id = gch < nchannelsf01HE ? idsf01HE[gch] : (gch < nchannelsf015 ? idsf5HB[gch - nchannelsf01HE] : idsf3HB[gch - nchannelsf015]); //auto const id = gch >= nchannelsf01HE // ? idsf5HB[gch - nchannelsf01HE] // : idsf01HE[gch]; auto const deltaT = gch >= nchannelsf01HE && gch < nchannelsf015 ? timeSigmaHPD : timeSigmaSiPM; auto const did = DetId{id}; auto const hashedId = did.subdetId() == HcalBarrel ? hcal::reconstruction::did2linearIndexHB(id, maxDepthHB, firstHBRing, lastHBRing, nEtaHB) : hcal::reconstruction::did2linearIndexHE(id, maxDepthHE, maxPhiHE, firstHERing, lastHERing, nEtaHE) + offsetForHashes; auto const recoPulseShapeId = recoPulseShapeIds[hashedId]; auto const* acc25nsVec = acc25nsVecValues + recoPulseShapeId * hcal::constants::maxPSshapeBin; auto const* diff25nsItvlVec = diff25nsItvlVecValues + recoPulseShapeId * hcal::constants::maxPSshapeBin; auto const* accVarLenIdxMinusOneVec = accVarLenIdxMinusOneVecValues + recoPulseShapeId * hcal::constants::nsPerBX; auto const* diffVarItvlIdxMinusOneVec = diffVarItvlIdxMinusOneVecValues + recoPulseShapeId * hcal::constants::nsPerBX; auto const* accVarLenIdxZeroVec = accVarLenIdxZeroVecValues + recoPulseShapeId * hcal::constants::nsPerBX; auto const* diffVarItvlIdxZeroVec = diffVarItvlIdxZeroVecValues + recoPulseShapeId * hcal::constants::nsPerBX; // offset output arrays auto* pulseMatrix = pulseMatrices + nsamples * npulses * gch; auto* pulseMatrixM = pulseMatricesM + nsamples * npulses * gch; auto* pulseMatrixP = pulseMatricesP + nsamples * npulses * gch; // amplitude per ipulse int const soi = soiSamples[gch]; int const pulseOffset = pulseOffsets[ipulse]; auto const amplitude = amplitudes[gch * nsamples + pulseOffset + soi]; #ifdef HCAL_MAHI_GPUDEBUG #ifdef HCAL_MAHI_GPUDEBUG_FILTERDETID if (id != DETID_TO_DEBUG) return; #endif #endif #ifdef HCAL_MAHI_GPUDEBUG if (sample == 0 && ipulse == 0) { for (int i = 0; i < 8; i++) printf("amplitude(%d) = %f\n", i, amplitudes[gch * nsamples + i]); printf("acc25nsVec and diff25nsItvlVec for recoPulseShapeId = %u\n", recoPulseShapeId); for (int i = 0; i < 256; i++) { printf("acc25nsVec(%d) = %f diff25nsItvlVec(%d) = %f\n", i, acc25nsVec[i], i, diff25nsItvlVec[i]); } printf("accVarLenIdxZEROVec and accVarLenIdxMinusOneVec\n"); for (int i = 0; i < 25; i++) { printf("accVarLenIdxZEROVec(%d) = %f accVarLenIdxMinusOneVec(%d) = %f\n", i, accVarLenIdxZeroVec[i], i, accVarLenIdxMinusOneVec[i]); } printf("diffVarItvlIdxZEROVec and diffVarItvlIdxMinusOneVec\n"); for (int i = 0; i < 25; i++) { printf("diffVarItvlIdxZEROVec(%d) = %f diffVarItvlIdxMinusOneVec(%d) = %f\n", i, diffVarItvlIdxZeroVec[i], i, diffVarItvlIdxMinusOneVec[i]); } } #endif auto t0 = meanTime; if (applyTimeSlew) { if (amplitude <= 1.0f) t0 += hcal::reconstruction::compute_time_slew_delay(1.0, tzeroTimeSlew, slopeTimeSlew, tmaxTimeSlew); else t0 += hcal::reconstruction::compute_time_slew_delay(amplitude, tzeroTimeSlew, slopeTimeSlew, tmaxTimeSlew); } auto const t0m = -deltaT + t0; auto const t0p = deltaT + t0; #ifdef HCAL_MAHI_GPUDEBUG if (sample == 0 && ipulse == 0) { printf("time values: %f %f %f\n", t0, t0m, t0p); } if (sample == 0 && ipulse == 0) { for (int i = 0; i < hcal::constants::maxSamples; i++) { auto const value = hcal::reconstruction::compute_pulse_shape_value(t0, i, 0, acc25nsVec, diff25nsItvlVec, accVarLenIdxMinusOneVec, diffVarItvlIdxMinusOneVec, accVarLenIdxZeroVec, diffVarItvlIdxZeroVec); printf("pulse(%d) = %f\n", i, value); } printf("\n"); for (int i = 0; i < hcal::constants::maxSamples; i++) { auto const value = hcal::reconstruction::compute_pulse_shape_value(t0p, i, 0, acc25nsVec, diff25nsItvlVec, accVarLenIdxMinusOneVec, diffVarItvlIdxMinusOneVec, accVarLenIdxZeroVec, diffVarItvlIdxZeroVec); printf("pulseP(%d) = %f\n", i, value); } printf("\n"); for (int i = 0; i < hcal::constants::maxSamples; i++) { auto const value = hcal::reconstruction::compute_pulse_shape_value(t0m, i, 0, acc25nsVec, diff25nsItvlVec, accVarLenIdxMinusOneVec, diffVarItvlIdxMinusOneVec, accVarLenIdxZeroVec, diffVarItvlIdxZeroVec); printf("pulseM(%d) = %f\n", i, value); } } #endif // FIXME: shift should be treated properly, // here assume 8 time slices and 8 samples auto const shift = 4 - soi; // as in cpu version! // auto const offset = ipulse - soi; // auto const idx = sample - offset; int32_t const idx = sample - pulseOffset; auto const value = idx >= 0 && idx < nsamples ? hcal::reconstruction::compute_pulse_shape_value(t0, idx, shift, acc25nsVec, diff25nsItvlVec, accVarLenIdxMinusOneVec, diffVarItvlIdxMinusOneVec, accVarLenIdxZeroVec, diffVarItvlIdxZeroVec) : 0; auto const value_t0m = idx >= 0 && idx < nsamples ? hcal::reconstruction::compute_pulse_shape_value(t0m, idx, shift, acc25nsVec, diff25nsItvlVec, accVarLenIdxMinusOneVec, diffVarItvlIdxMinusOneVec, accVarLenIdxZeroVec, diffVarItvlIdxZeroVec) : 0; auto const value_t0p = idx >= 0 && idx < nsamples ? hcal::reconstruction::compute_pulse_shape_value(t0p, idx, shift, acc25nsVec, diff25nsItvlVec, accVarLenIdxMinusOneVec, diffVarItvlIdxMinusOneVec, accVarLenIdxZeroVec, diffVarItvlIdxZeroVec) : 0; // store to global if (amplitude > 0.f) { pulseMatrix[ipulse * nsamples + sample] = value; pulseMatrixM[ipulse * nsamples + sample] = value_t0m; pulseMatrixP[ipulse * nsamples + sample] = value_t0p; } else { pulseMatrix[ipulse * nsamples + sample] = 0.f; pulseMatrixM[ipulse * nsamples + sample] = 0.f; pulseMatrixP[ipulse * nsamples + sample] = 0.f; } } template <int NSAMPLES, int NPULSES> __forceinline__ __device__ void update_covariance( calo::multifit::ColumnVector<NPULSES> const& resultAmplitudesVector, calo::multifit::MapSymM<float, NSAMPLES>& covarianceMatrix, Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> const& pulseMatrix, Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> const& pulseMatrixM, Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> const& pulseMatrixP) { CMS_UNROLL_LOOP for (int ipulse = 0; ipulse < NPULSES; ipulse++) { auto const resultAmplitude = resultAmplitudesVector(ipulse); if (resultAmplitude == 0) continue; #ifdef HCAL_MAHI_GPUDEBUG printf("pulse cov array for ibx = %d\n", ipulse); #endif // preload a column float pmcol[NSAMPLES], pmpcol[NSAMPLES], pmmcol[NSAMPLES]; CMS_UNROLL_LOOP for (int counter = 0; counter < NSAMPLES; counter++) { pmcol[counter] = __ldg(&pulseMatrix.coeffRef(counter, ipulse)); pmpcol[counter] = __ldg(&pulseMatrixP.coeffRef(counter, ipulse)); pmmcol[counter] = __ldg(&pulseMatrixM.coeffRef(counter, ipulse)); } auto const ampl2 = resultAmplitude * resultAmplitude; CMS_UNROLL_LOOP for (int col = 0; col < NSAMPLES; col++) { auto const valueP_col = pmpcol[col]; auto const valueM_col = pmmcol[col]; auto const value_col = pmcol[col]; auto const tmppcol = valueP_col - value_col; auto const tmpmcol = valueM_col - value_col; // diagonal auto tmp_value = 0.5 * (tmppcol * tmppcol + tmpmcol * tmpmcol); covarianceMatrix(col, col) += ampl2 * tmp_value; // FIXME: understand if this actually gets unrolled CMS_UNROLL_LOOP for (int row = col + 1; row < NSAMPLES; row++) { float const valueP_row = pmpcol[row]; //pulseMatrixP(j, ipulseReal); float const value_row = pmcol[row]; //pulseMatrix(j, ipulseReal); float const valueM_row = pmmcol[row]; //pulseMatrixM(j, ipulseReal); float tmpprow = valueP_row - value_row; float tmpmrow = valueM_row - value_row; auto const covValue = 0.5 * (tmppcol * tmpprow + tmpmcol * tmpmrow); covarianceMatrix(row, col) += ampl2 * covValue; } } } } template <int NSAMPLES, int NPULSES> __global__ void kernel_minimize(float* outputEnergy, float* outputChi2, float const* __restrict__ inputAmplitudes, float const* __restrict__ pulseMatrices, float const* __restrict__ pulseMatricesM, float const* __restrict__ pulseMatricesP, int const* __restrict__ pulseOffsetValues, float const* __restrict__ noiseTerms, float const* __restrict__ electronicNoiseTerms, int8_t const* __restrict__ soiSamples, float const* __restrict__ noiseCorrelationValues, float const* __restrict__ pedestalWidths, float const* __restrict__ effectivePedestalWidths, bool const useEffectivePedestals, uint32_t const* __restrict__ idsf01HE, uint32_t const* __restrict__ idsf5HB, uint32_t const* __restrict__ idsf3HB, float const* __restrict__ gainValues, float const* __restrict__ respCorrectionValues, uint32_t const nchannelsf01HE, uint32_t const nchannelsf5HB, uint32_t const nchannelsTotal, uint32_t const offsetForHashes, int const maxDepthHB, int const maxDepthHE, int const maxPhiHE, int const firstHBRing, int const lastHBRing, int const firstHERing, int const lastHERing, int const nEtaHB, int const nEtaHE) { // can be relaxed if needed - minor updates are needed in that case! static_assert(NPULSES == NSAMPLES); // indices auto const gch = threadIdx.x + blockIdx.x * blockDim.x; auto const nchannelsf015 = nchannelsf01HE + nchannelsf5HB; if (gch >= nchannelsTotal) return; // if chi2 is set to -9999 do not run minimization if (outputChi2[gch] == -9999.f) return; // configure shared mem extern __shared__ char shrmem[]; float* shrMatrixLFnnlsStorage = reinterpret_cast<float*>(shrmem) + calo::multifit::MapSymM<float, NPULSES>::total * threadIdx.x; float* shrAtAStorage = reinterpret_cast<float*>(shrmem) + calo::multifit::MapSymM<float, NPULSES>::total * (threadIdx.x + blockDim.x); // conditions for pedestal widths auto const id = gch < nchannelsf01HE ? idsf01HE[gch] : (gch < nchannelsf015 ? idsf5HB[gch - nchannelsf01HE] : idsf3HB[gch - nchannelsf015]); auto const did = DetId{id}; auto const hashedId = did.subdetId() == HcalBarrel ? hcal::reconstruction::did2linearIndexHB(id, maxDepthHB, firstHBRing, lastHBRing, nEtaHB) : hcal::reconstruction::did2linearIndexHE(id, maxDepthHE, maxPhiHE, firstHERing, lastHERing, nEtaHE) + offsetForHashes; auto const* pedestalWidthsForChannel = useEffectivePedestals && (gch < nchannelsf01HE || gch >= nchannelsf015) ? effectivePedestalWidths + hashedId * 4 : pedestalWidths + hashedId * 4; auto const averagePedestalWidth2 = 0.25 * (pedestalWidthsForChannel[0] * pedestalWidthsForChannel[0] + pedestalWidthsForChannel[1] * pedestalWidthsForChannel[1] + pedestalWidthsForChannel[2] * pedestalWidthsForChannel[2] + pedestalWidthsForChannel[3] * pedestalWidthsForChannel[3]); auto const* gains = gainValues + hashedId * 4; // FIXME on cpu ts 0 capid was used - does it make any difference auto const gain = gains[0]; auto const respCorrection = respCorrectionValues[hashedId]; auto const noisecorr = noiseCorrelationValues[hashedId]; #ifdef HCAL_MAHI_GPUDEBUG #ifdef HCAL_MAHI_GPUDEBUG_FILTERDETID if (id != DETID_TO_DEBUG) return; #endif #endif /* // TODO: provide this properly int const soi = soiSamples[gch]; */ calo::multifit::ColumnVector<NPULSES, int> pulseOffsets; CMS_UNROLL_LOOP for (int i = 0; i < NPULSES; ++i) pulseOffsets(i) = i; // pulseOffsets(i) = pulseOffsetValues[i] - pulseOffsetValues[0]; // output amplitudes/weights calo::multifit::ColumnVector<NPULSES> resultAmplitudesVector = calo::multifit::ColumnVector<NPULSES>::Zero(); // map views Eigen::Map<const calo::multifit::ColumnVector<NSAMPLES>> inputAmplitudesView{inputAmplitudes + gch * NSAMPLES}; Eigen::Map<const calo::multifit::ColumnVector<NSAMPLES>> noiseTermsView{noiseTerms + gch * NSAMPLES}; Eigen::Map<const calo::multifit::ColumnVector<NSAMPLES>> noiseElectronicView{electronicNoiseTerms + gch * NSAMPLES}; Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> glbPulseMatrixMView{pulseMatricesM + gch * NSAMPLES * NPULSES}; Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> glbPulseMatrixPView{pulseMatricesP + gch * NSAMPLES * NPULSES}; Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> glbPulseMatrixView{pulseMatrices + gch * NSAMPLES * NPULSES}; #ifdef HCAL_MAHI_GPUDEBUG for (int i = 0; i < NSAMPLES; i++) printf("inputValues(%d) = %f noiseTerms(%d) = %f\n", i, inputAmplitudesView(i), i, noiseTermsView(i)); for (int i = 0; i < NSAMPLES; i++) { for (int j = 0; j < NPULSES; j++) printf("%f ", glbPulseMatrixView(i, j)); printf("\n"); } printf("\n"); for (int i = 0; i < NSAMPLES; i++) { for (int j = 0; j < NPULSES; j++) printf("%f ", glbPulseMatrixMView(i, j)); printf("\n"); } printf("\n"); for (int i = 0; i < NSAMPLES; i++) { for (int j = 0; j < NPULSES; j++) printf("%f ", glbPulseMatrixPView(i, j)); printf("\n"); } #endif int npassive = 0; float chi2 = 0, previous_chi2 = 0.f, chi2_2itersback = 0.f; for (int iter = 1; iter < nMaxItersMin; iter++) { //float covarianceMatrixStorage[MapSymM<float, NSAMPLES>::total]; // NOTE: only works when NSAMPLES == NPULSES // if does not hold -> slightly rearrange shared mem to still reuse // shared memory float* covarianceMatrixStorage = shrMatrixLFnnlsStorage; calo::multifit::MapSymM<float, NSAMPLES> covarianceMatrix{covarianceMatrixStorage}; CMS_UNROLL_LOOP for (int counter = 0; counter < calo::multifit::MapSymM<float, NSAMPLES>::total; counter++) covarianceMatrixStorage[counter] = (noisecorr != 0.f) ? 0.f : averagePedestalWidth2; CMS_UNROLL_LOOP for (unsigned int counter = 0; counter < calo::multifit::MapSymM<float, NSAMPLES>::stride; counter++) { covarianceMatrix(counter, counter) += noiseTermsView.coeffRef(counter); if (counter != 0) covarianceMatrix(counter, counter - 1) += noisecorr * __ldg(&noiseElectronicView.coeffRef(counter - 1)) * __ldg(&noiseElectronicView.coeffRef(counter)); } // update covariance matrix update_covariance( resultAmplitudesVector, covarianceMatrix, glbPulseMatrixView, glbPulseMatrixMView, glbPulseMatrixPView); #ifdef HCAL_MAHI_GPUDEBUG printf("covariance matrix\n"); for (int i = 0; i < 8; i++) { for (int j = 0; j < 8; j++) printf("%f ", covarianceMatrix(i, j)); printf("\n"); } #endif // compute Cholesky Decomposition L matrix //matrixDecomposition.compute(covarianceMatrix); //auto const& matrixL = matrixDecomposition.matrixL(); float matrixLStorage[calo::multifit::MapSymM<float, NSAMPLES>::total]; calo::multifit::MapSymM<float, NSAMPLES> matrixL{matrixLStorage}; calo::multifit::compute_decomposition_unrolled(matrixL, covarianceMatrix); // // replace eigen // //auto const& A = matrixDecomposition // .matrixL() // .solve(pulseMatrixView); calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES> A; calo::multifit::solve_forward_subst_matrix(A, glbPulseMatrixView, matrixL); // // remove eigen // //auto const& b = matrixL // .solve(inputAmplitudesView); // float reg_b[NSAMPLES]; calo::multifit::solve_forward_subst_vector(reg_b, inputAmplitudesView, matrixL); // TODO: we do not really need to change these matrcies // will be fixed in the optimized version //ColMajorMatrix<NPULSES, NPULSES> AtA = A.transpose() * A; //ColumnVector<NPULSES> Atb = A.transpose() * b; //ColMajorMatrix<NPULSES, NPULSES> AtA; //float AtAStorage[MapSymM<float, NPULSES>::total]; calo::multifit::MapSymM<float, NPULSES> AtA{shrAtAStorage}; calo::multifit::ColumnVector<NPULSES> Atb; CMS_UNROLL_LOOP for (int icol = 0; icol < NPULSES; icol++) { float reg_ai[NSAMPLES]; // load column icol CMS_UNROLL_LOOP for (int counter = 0; counter < NSAMPLES; counter++) reg_ai[counter] = A(counter, icol); // compute diagonal float sum = 0.f; CMS_UNROLL_LOOP for (int counter = 0; counter < NSAMPLES; counter++) sum += reg_ai[counter] * reg_ai[counter]; // store AtA(icol, icol) = sum; // go thru the other columns CMS_UNROLL_LOOP for (int j = icol + 1; j < NPULSES; j++) { // load column j float reg_aj[NSAMPLES]; CMS_UNROLL_LOOP for (int counter = 0; counter < NSAMPLES; counter++) reg_aj[counter] = A(counter, j); // accum float sum = 0.f; CMS_UNROLL_LOOP for (int counter = 0; counter < NSAMPLES; counter++) sum += reg_aj[counter] * reg_ai[counter]; // store //AtA(icol, j) = sum; AtA(j, icol) = sum; } // Atb accum float sum_atb = 0; CMS_UNROLL_LOOP for (int counter = 0; counter < NSAMPLES; counter++) sum_atb += reg_ai[counter] * reg_b[counter]; // store atb Atb(icol) = sum_atb; } #ifdef HCAL_MAHI_GPUDEBUG printf("AtA\n"); for (int i = 0; i < 8; i++) { for (int j = 0; j < 8; j++) printf("%f ", AtA(i, j)); printf("\n"); } printf("Atb\n"); for (int i = 0; i < 8; i++) printf("%f ", Atb(i)); printf("\n"); printf("result Amplitudes before nnls\n"); for (int i = 0; i < 8; i++) printf("%f ", resultAmplitudesVector(i)); printf("\n"); #endif // for fnnls calo::multifit::MapSymM<float, NPULSES> matrixLForFnnls{shrMatrixLFnnlsStorage}; // run fast nnls calo::multifit::fnnls( AtA, Atb, resultAmplitudesVector, npassive, pulseOffsets, matrixLForFnnls, nnlsThresh, nMaxItersNNLS, 10, 10); #ifdef HCAL_MAHI_GPUDEBUG printf("result Amplitudes\n"); for (int i = 0; i < 8; i++) printf("resultAmplitudes(%d) = %f\n", i, resultAmplitudesVector(i)); #endif calo::multifit::calculateChiSq(matrixL, glbPulseMatrixView, resultAmplitudesVector, inputAmplitudesView, chi2); auto const deltaChi2 = std::abs(chi2 - previous_chi2); if (chi2 == chi2_2itersback && chi2 < previous_chi2) break; // update chi2_2itersback = previous_chi2; previous_chi2 = chi2; // exit condition if (deltaChi2 < deltaChi2Threashold) break; } #ifdef HCAL_MAHI_GPUDEBUG for (int i = 0; i < NPULSES; i++) printf("pulseOffsets(%d) = %d outputAmplitudes(%d) = %f\n", i, pulseOffsets(i), i, resultAmplitudesVector(i)); printf("chi2 = %f\n", chi2); #endif outputChi2[gch] = chi2; auto const idx_for_energy = std::abs(pulseOffsetValues[0]); outputEnergy[gch] = (gain * resultAmplitudesVector(idx_for_energy)) * respCorrection; /* CMS_UNROLL_LOOP for (int i=0; i<NPULSES; i++) if (pulseOffsets[i] == soi) // NOTE: gain is a number < 10^-3/4, multiply first to avoid stab issues outputEnergy[gch] = (gain*resultAmplitudesVector(i))*respCorrection; */ } } // namespace mahi } // namespace hcal namespace hcal { namespace reconstruction { void entryPoint(InputDataGPU const& inputGPU, OutputDataGPU& outputGPU, ConditionsProducts const& conditions, ScratchDataGPU& scratch, ConfigParameters const& configParameters, hipStream_t cudaStream) { auto const totalChannels = inputGPU.f01HEDigis.size + inputGPU.f5HBDigis.size + inputGPU.f3HBDigis.size; // FIXME: the number of channels in output might change given that some channesl might be filtered out // do not run when there are no rechits (e.g. if HCAL is not being read), // but do set the size of the output collection to 0 outputGPU.recHits.size = totalChannels; if (totalChannels == 0) { return; } // TODO: this can be lifted by implementing a separate kernel // similar to the default one, but properly handling the diff in #sample // or modifying existing one auto const f01nsamples = compute_nsamples<Flavor1>(inputGPU.f01HEDigis.stride); auto const f5nsamples = compute_nsamples<Flavor5>(inputGPU.f5HBDigis.stride); auto const f3nsamples = compute_nsamples<Flavor3>(inputGPU.f3HBDigis.stride); int constexpr windowSize = 8; int const startingSample = f01nsamples - windowSize; assert(startingSample == 0 || startingSample == 2); if (inputGPU.f01HEDigis.stride > 0 && inputGPU.f5HBDigis.stride > 0) assert(f01nsamples == f5nsamples); if (inputGPU.f01HEDigis.stride > 0 && inputGPU.f3HBDigis.stride > 0) assert(f01nsamples == f3nsamples); dim3 threadsPerBlock{windowSize, configParameters.kprep1dChannelsPerBlock}; int blocks = static_cast<uint32_t>(threadsPerBlock.y) > totalChannels ? 1 : (totalChannels + threadsPerBlock.y - 1) / threadsPerBlock.y; int nbytesShared = ((2 * windowSize + 2) * sizeof(float) + sizeof(uint64_t)) * configParameters.kprep1dChannelsPerBlock; hipLaunchKernelGGL(( hcal::mahi::kernel_prep1d_sameNumberOfSamples), dim3(blocks), dim3(threadsPerBlock), nbytesShared, cudaStream, scratch.amplitudes.get(), scratch.noiseTerms.get(), scratch.electronicNoiseTerms.get(), outputGPU.recHits.energy.get(), outputGPU.recHits.chi2.get(), inputGPU.f01HEDigis.data.get(), inputGPU.f5HBDigis.data.get(), inputGPU.f3HBDigis.data.get(), inputGPU.f01HEDigis.ids.get(), inputGPU.f5HBDigis.ids.get(), inputGPU.f3HBDigis.ids.get(), inputGPU.f01HEDigis.stride, inputGPU.f5HBDigis.stride, inputGPU.f3HBDigis.stride, inputGPU.f01HEDigis.size, inputGPU.f5HBDigis.size, inputGPU.f5HBDigis.npresamples.get(), scratch.soiSamples.get(), outputGPU.recHits.energyM0.get(), outputGPU.recHits.timeM0.get(), outputGPU.recHits.did.get(), totalChannels, conditions.channelQuality.status, conditions.recoParams.param1, conditions.recoParams.param2, conditions.qieCoders.offsets, conditions.qieCoders.slopes, conditions.qieTypes.values, conditions.pedestalWidths.values, conditions.effectivePedestalWidths.values, conditions.pedestals.values, conditions.convertedEffectivePedestals ? conditions.convertedEffectivePedestals->values : conditions.pedestals.values, configParameters.useEffectivePedestals, conditions.sipmParameters.type, conditions.sipmParameters.fcByPE, conditions.sipmCharacteristics.parLin1, conditions.sipmCharacteristics.parLin2, conditions.sipmCharacteristics.parLin3, conditions.gains.values, conditions.respCorrs.values, conditions.topology->maxDepthHB(), conditions.topology->maxDepthHE(), conditions.recConstants->getNPhi(1) > hcal::reconstruction::IPHI_MAX ? conditions.recConstants->getNPhi(1) : hcal::reconstruction::IPHI_MAX, conditions.topology->firstHBRing(), conditions.topology->lastHBRing(), conditions.topology->firstHERing(), conditions.topology->lastHERing(), conditions.recConstants->getEtaRange(0).second - conditions.recConstants->getEtaRange(0).first + 1, conditions.topology->firstHERing() > conditions.topology->lastHERing() ? 0 : (conditions.topology->lastHERing() - conditions.topology->firstHERing() + 1), configParameters.sipmQTSShift, configParameters.sipmQNTStoSum, configParameters.firstSampleShift, conditions.offsetForHashes, configParameters.ts4Thresh, startingSample); cudaCheck(hipGetLastError()); // 1024 is the max threads per block for gtx1080 // FIXME: take this from cuda service or something like that uint32_t const channelsPerBlock = 1024 / (windowSize * conditions.pulseOffsetsHost.size()); dim3 threadsPerBlock2{windowSize, static_cast<uint32_t>(conditions.pulseOffsetsHost.size()), channelsPerBlock}; int blocks2 = threadsPerBlock2.z > totalChannels ? 1 : (totalChannels + threadsPerBlock2.z - 1) / threadsPerBlock2.z; #ifdef HCAL_MAHI_CPUDEBUG std::cout << "threads: " << threadsPerBlock2.x << " " << threadsPerBlock2.y << " " << threadsPerBlock2.z << std::endl; std::cout << "blocks: " << blocks2 << std::endl; #endif hipLaunchKernelGGL(( hcal::mahi::kernel_prep_pulseMatrices_sameNumberOfSamples), dim3(blocks2), dim3(threadsPerBlock2), 0, cudaStream, scratch.pulseMatrices.get(), scratch.pulseMatricesM.get(), scratch.pulseMatricesP.get(), conditions.pulseOffsets.values, scratch.amplitudes.get(), inputGPU.f01HEDigis.ids.get(), inputGPU.f5HBDigis.ids.get(), inputGPU.f3HBDigis.ids.get(), inputGPU.f01HEDigis.size, inputGPU.f5HBDigis.size, totalChannels, scratch.soiSamples.get(), conditions.recoParams.ids, conditions.recoParams.acc25nsVec, conditions.recoParams.diff25nsItvlVec, conditions.recoParams.accVarLenIdxMinusOneVec, conditions.recoParams.diffVarItvlIdxMinusOneVec, conditions.recoParams.accVarLenIdxZEROVec, conditions.recoParams.diffVarItvlIdxZEROVec, configParameters.meanTime, configParameters.timeSigmaSiPM, configParameters.timeSigmaHPD, conditions.topology->maxDepthHB(), conditions.topology->maxDepthHE(), conditions.recConstants->getNPhi(1) > hcal::reconstruction::IPHI_MAX ? conditions.recConstants->getNPhi(1) : hcal::reconstruction::IPHI_MAX, conditions.topology->firstHBRing(), conditions.topology->lastHBRing(), conditions.topology->firstHERing(), conditions.topology->lastHERing(), conditions.recConstants->getEtaRange(0).second - conditions.recConstants->getEtaRange(0).first + 1, conditions.topology->firstHERing() > conditions.topology->lastHERing() ? 0 : (conditions.topology->lastHERing() - conditions.topology->firstHERing() + 1), conditions.offsetForHashes, configParameters.applyTimeSlew, configParameters.tzeroTimeSlew, configParameters.slopeTimeSlew, configParameters.tmaxTimeSlew); cudaCheck(hipGetLastError()); // number of samples is checked in above assert if (conditions.pulseOffsetsHost.size() == 8u) { // FIXME: provide constants from configuration uint32_t threadsPerBlock = configParameters.kernelMinimizeThreads[0]; uint32_t blocks = threadsPerBlock > totalChannels ? 1 : (totalChannels + threadsPerBlock - 1) / threadsPerBlock; auto const nbytesShared = 2 * threadsPerBlock * calo::multifit::MapSymM<float, 8>::total * sizeof(float); hipLaunchKernelGGL(( hcal::mahi::kernel_minimize<8, 8>), dim3(blocks), dim3(threadsPerBlock), nbytesShared, cudaStream, outputGPU.recHits.energy.get(), outputGPU.recHits.chi2.get(), scratch.amplitudes.get(), scratch.pulseMatrices.get(), scratch.pulseMatricesM.get(), scratch.pulseMatricesP.get(), conditions.pulseOffsets.values, scratch.noiseTerms.get(), scratch.electronicNoiseTerms.get(), scratch.soiSamples.get(), conditions.sipmParameters.auxi2, conditions.pedestalWidths.values, conditions.effectivePedestalWidths.values, configParameters.useEffectivePedestals, inputGPU.f01HEDigis.ids.get(), inputGPU.f5HBDigis.ids.get(), inputGPU.f3HBDigis.ids.get(), conditions.gains.values, conditions.respCorrs.values, inputGPU.f01HEDigis.size, inputGPU.f5HBDigis.size, totalChannels, conditions.offsetForHashes, conditions.topology->maxDepthHB(), conditions.topology->maxDepthHE(), conditions.recConstants->getNPhi(1) > hcal::reconstruction::IPHI_MAX ? conditions.recConstants->getNPhi(1) : hcal::reconstruction::IPHI_MAX, conditions.topology->firstHBRing(), conditions.topology->lastHBRing(), conditions.topology->firstHERing(), conditions.topology->lastHERing(), conditions.recConstants->getEtaRange(0).second - conditions.recConstants->getEtaRange(0).first + 1, conditions.topology->firstHERing() > conditions.topology->lastHERing() ? 0 : (conditions.topology->lastHERing() - conditions.topology->firstHERing() + 1)); } else { throw cms::Exception("Invalid MahiGPU configuration") << "Currently support only 8 pulses and 8 time samples and provided: " << f01nsamples << " samples and " << conditions.pulseOffsetsHost.size() << " pulses" << std::endl; } } } // namespace reconstruction } // namespace hcal
6c0567def82f877c3127bb151971797de60abc0e.cu
#include <Eigen/Dense> #include "DataFormats/CaloRecHit/interface/MultifitComputations.h" // needed to compile with USER_CXXFLAGS="-DCOMPUTE_TDC_TIME" #include "DataFormats/HcalRecHit/interface/HcalSpecialTimes.h" #include "FWCore/Utilities/interface/CMSUnrollLoop.h" // TODO reuse some of the HCAL constats from //#include "RecoLocalCalo/HcalRecAlgos/interface/HcalConstants.h" #include "SimpleAlgoGPU.h" #include "KernelHelpers.h" #ifdef HCAL_MAHI_GPUDEBUG #define DETID_TO_DEBUG 1125647428 #endif namespace hcal { namespace mahi { // TODO: provide constants from configuration // from RecoLocalCalo/HcalRecProducers/python/HBHEMahiParameters_cfi.py constexpr int nMaxItersMin = 50; constexpr int nMaxItersNNLS = 500; constexpr double nnlsThresh = 1e-11; constexpr float deltaChi2Threashold = 1e-3; // from RecoLocalCalo/HcalRecProducers/src/HBHEPhase1Reconstructor.cc __forceinline__ __device__ float get_raw_charge(double const charge, double const pedestal, float const* shrChargeMinusPedestal, float const* parLin1Values, float const* parLin2Values, float const* parLin3Values, int32_t const nsamplesForCompute, int32_t const soi, int const sipmQTSShift, int const sipmQNTStoSum, int const sipmType, float const fcByPE, bool const isqie11) { float rawCharge; if (!isqie11) rawCharge = charge; else { auto const parLin1 = parLin1Values[sipmType - 1]; auto const parLin2 = parLin2Values[sipmType - 1]; auto const parLin3 = parLin3Values[sipmType - 1]; int const first = std::max(soi + sipmQTSShift, 0); int const last = std::min(soi + sipmQNTStoSum, nsamplesForCompute); float sipmq = 0.0f; for (auto ts = first; ts < last; ts++) sipmq += shrChargeMinusPedestal[threadIdx.y * nsamplesForCompute + ts]; auto const effectivePixelsFired = sipmq / fcByPE; auto const factor = hcal::reconstruction::compute_reco_correction_factor(parLin1, parLin2, parLin3, effectivePixelsFired); rawCharge = (charge - pedestal) * factor + pedestal; #ifdef HCAL_MAHI_GPUDEBUG printf("first = %d last = %d sipmQ = %f factor = %f rawCharge = %f\n", first, last, sipmq, factor, rawCharge); #endif } return rawCharge; } // Assume: same number of samples for HB and HE // TODO: add/validate restrict (will increase #registers in use by the kernel) __global__ void kernel_prep1d_sameNumberOfSamples(float* amplitudes, float* noiseTerms, float* electronicNoiseTerms, float* outputEnergy, float* outputChi2, uint16_t const* dataf01HE, uint16_t const* dataf5HB, uint16_t const* dataf3HB, uint32_t const* idsf01HE, uint32_t const* idsf5HB, uint32_t const* idsf3HB, uint32_t const stridef01HE, uint32_t const stridef5HB, uint32_t const stridef3HB, uint32_t const nchannelsf01HE, uint32_t const nchannelsf5HB, uint8_t const* npresamplesf5HB, int8_t* soiSamples, float* method0Energy, float* method0Time, uint32_t* outputdid, uint32_t const nchannels, uint32_t const* qualityStatus, uint32_t const* recoParam1Values, uint32_t const* recoParam2Values, float const* qieCoderOffsets, float const* qieCoderSlopes, int const* qieTypes, float const* pedestalWidths, float const* effectivePedestalWidths, float const* pedestals, float const* effectivePedestals, bool const useEffectivePedestals, int const* sipmTypeValues, float const* fcByPEValues, float const* parLin1Values, float const* parLin2Values, float const* parLin3Values, float const* gainValues, float const* respCorrectionValues, int const maxDepthHB, int const maxDepthHE, int const maxPhiHE, int const firstHBRing, int const lastHBRing, int const firstHERing, int const lastHERing, int const nEtaHB, int const nEtaHE, int const sipmQTSShift, int const sipmQNTStoSum, int const firstSampleShift, uint32_t const offsetForHashes, float const ts4Thresh, int const startingSample) { // indices + runtime constants auto const sample = threadIdx.x + startingSample; auto const sampleWithinWindow = threadIdx.x; int32_t const nsamplesForCompute = blockDim.x; auto const lch = threadIdx.y; auto const gch = lch + blockDim.y * blockIdx.x; auto const nchannels_per_block = blockDim.y; auto const linearThPerBlock = threadIdx.x + threadIdx.y * blockDim.x; // remove if (gch >= nchannels) return; // initialize all output buffers if (sampleWithinWindow == 0) { outputdid[gch] = 0; method0Energy[gch] = 0; method0Time[gch] = 0; outputEnergy[gch] = 0; outputChi2[gch] = 0; soiSamples[gch] = -1; } #ifdef HCAL_MAHI_GPUDEBUG #ifdef HCAL_MAHI_GPUDEBUG_SINGLECHANNEL if (gch > 0) return; #endif #endif // configure shared mem extern __shared__ char smem[]; float* shrEnergyM0PerTS = reinterpret_cast<float*>(smem); float* shrChargeMinusPedestal = shrEnergyM0PerTS + nsamplesForCompute * nchannels_per_block; float* shrMethod0EnergyAccum = shrChargeMinusPedestal + nsamplesForCompute * nchannels_per_block; float* shrEnergyM0TotalAccum = shrMethod0EnergyAccum + nchannels_per_block; unsigned long long int* shrMethod0EnergySamplePair = reinterpret_cast<unsigned long long int*>(shrEnergyM0TotalAccum + nchannels_per_block); if (sampleWithinWindow == 0) { shrMethod0EnergyAccum[lch] = 0; shrMethod0EnergySamplePair[lch] = __float_as_uint(std::numeric_limits<float>::min()); shrEnergyM0TotalAccum[lch] = 0; } // offset output auto* amplitudesForChannel = amplitudes + nsamplesForCompute * gch; auto* noiseTermsForChannel = noiseTerms + nsamplesForCompute * gch; auto* electronicNoiseTermsForChannel = electronicNoiseTerms + nsamplesForCompute * gch; auto const nchannelsf015 = nchannelsf01HE + nchannelsf5HB; // get event input quantities auto const stride = gch < nchannelsf01HE ? stridef01HE : (gch < nchannelsf015 ? stridef5HB : stridef3HB); auto const nsamples = gch < nchannelsf01HE ? compute_nsamples<Flavor1>(stride) : (gch < nchannelsf015 ? compute_nsamples<Flavor5>(stride) : compute_nsamples<Flavor3>(stride)); #ifdef HCAL_MAHI_GPUDEBUG assert(nsamples == nsamplesForCompute || nsamples - startingSample == nsamplesForCompute); #endif auto const id = gch < nchannelsf01HE ? idsf01HE[gch] : (gch < nchannelsf015 ? idsf5HB[gch - nchannelsf01HE] : idsf3HB[gch - nchannelsf015]); auto const did = HcalDetId{id}; auto const adc = gch < nchannelsf01HE ? adc_for_sample<Flavor1>(dataf01HE + stride * gch, sample) : (gch < nchannelsf015 ? adc_for_sample<Flavor5>(dataf5HB + stride * (gch - nchannelsf01HE), sample) : adc_for_sample<Flavor3>(dataf3HB + stride * (gch - nchannelsf015), sample)); auto const capid = gch < nchannelsf01HE ? capid_for_sample<Flavor1>(dataf01HE + stride * gch, sample) : (gch < nchannelsf015 ? capid_for_sample<Flavor5>(dataf5HB + stride * (gch - nchannelsf01HE), sample) : capid_for_sample<Flavor3>(dataf3HB + stride * (gch - nchannelsf015), sample)); #ifdef HCAL_MAHI_GPUDEBUG #ifdef HCAL_MAHI_GPUDEBUG_FILTERDETID if (id != DETID_TO_DEBUG) return; #endif #endif // compute hash for this did auto const hashedId = did.subdetId() == HcalBarrel ? hcal::reconstruction::did2linearIndexHB(id, maxDepthHB, firstHBRing, lastHBRing, nEtaHB) : hcal::reconstruction::did2linearIndexHE(id, maxDepthHE, maxPhiHE, firstHERing, lastHERing, nEtaHE) + offsetForHashes; // conditions based on the hash // FIXME: remove hardcoded values auto const qieType = qieTypes[hashedId] > 0 ? 1 : 0; // 2 types at this point auto const* qieOffsets = qieCoderOffsets + hashedId * HcalQIECodersGPU::numValuesPerChannel; auto const* qieSlopes = qieCoderSlopes + hashedId * HcalQIECodersGPU::numValuesPerChannel; auto const* pedestalsForChannel = pedestals + hashedId * 4; auto const* pedestalWidthsForChannel = useEffectivePedestals && (gch < nchannelsf01HE || gch >= nchannelsf015) ? effectivePedestalWidths + hashedId * 4 : pedestalWidths + hashedId * 4; auto const* gains = gainValues + hashedId * 4; auto const gain = gains[capid]; auto const gain0 = gains[0]; auto const respCorrection = respCorrectionValues[hashedId]; auto const pedestal = pedestalsForChannel[capid]; auto const pedestalWidth = pedestalWidthsForChannel[capid]; // if needed, only use effective pedestals for f01 auto const pedestalToUseForMethod0 = useEffectivePedestals && (gch < nchannelsf01HE || gch >= nchannelsf015) ? effectivePedestals[hashedId * 4 + capid] : pedestal; auto const sipmType = sipmTypeValues[hashedId]; auto const fcByPE = fcByPEValues[hashedId]; auto const recoParam1 = recoParam1Values[hashedId]; auto const recoParam2 = recoParam2Values[hashedId]; #ifdef HCAL_MAHI_GPUDEBUG printf("qieType = %d qieOffset0 = %f qieOffset1 = %f qieSlope0 = %f qieSlope1 = %f\n", qieType, qieOffsets[0], qieOffsets[1], qieSlopes[0], qieSlopes[1]); #endif // compute charge auto const charge = hcal::reconstruction::compute_coder_charge(qieType, adc, capid, qieOffsets, qieSlopes); shrChargeMinusPedestal[linearThPerBlock] = charge - pedestal; if (gch < nchannelsf01HE) { // NOTE: assume that soi is high only for a single guy! // which must be the case. cpu version does not check for that // if that is not the case, we will see that with cuda mmecheck auto const soibit = soibit_for_sample<Flavor1>(dataf01HE + stride * gch, sample); if (soibit == 1) soiSamples[gch] = sampleWithinWindow; } else if (gch >= nchannelsf015) { auto const soibit = soibit_for_sample<Flavor3>(dataf3HB + stride * (gch - nchannelsf015), sample); if (soibit == 1) soiSamples[gch] = sampleWithinWindow; } __syncthreads(); int32_t const soi = gch < nchannelsf01HE ? soiSamples[gch] : (gch < nchannelsf015 ? npresamplesf5HB[gch - nchannelsf01HE] : soiSamples[gch]); bool badSOI = (soi < 0 or soi >= nsamplesForCompute); if (badSOI and sampleWithinWindow == 0) { #ifdef GPU_DEBUG printf("Found HBHE channel %d with invalid SOI %d\n", gch, soi); #endif // mark the channel as bad outputChi2[gch] = -9999.f; } //int32_t const soi = gch >= nchannelsf01HE // ? npresamplesf5HB[gch - nchannelsf01HE] // : soiSamples[gch]; // this is here just to make things uniform... if (gch >= nchannelsf01HE && gch < nchannelsf015 && sampleWithinWindow == 0) soiSamples[gch] = npresamplesf5HB[gch - nchannelsf01HE]; // // compute various quantities (raw charge and tdc stuff) // NOTE: this branch will be divergent only for a single warp that // sits on the boundary when flavor 01 channels end and flavor 5 start // float const rawCharge = get_raw_charge(charge, pedestal, shrChargeMinusPedestal, parLin1Values, parLin2Values, parLin3Values, nsamplesForCompute, soi, sipmQTSShift, sipmQNTStoSum, sipmType, fcByPE, gch < nchannelsf01HE || gch >= nchannelsf015); auto const dfc = hcal::reconstruction::compute_diff_charge_gain( qieType, adc, capid, qieOffsets, qieSlopes, gch < nchannelsf01HE || gch >= nchannelsf015); #ifdef COMPUTE_TDC_TIME float tdcTime; if (gch >= nchannelsf01HE && gch < nchannelsf015) { tdcTime = HcalSpecialTimes::UNKNOWN_T_NOTDC; } else { if (gch < nchannelsf01HE) tdcTime = HcalSpecialTimes::getTDCTime(tdc_for_sample<Flavor1>(dataf01HE + stride * gch, sample)); else if (gch >= nchannelsf015) tdcTime = HcalSpecialTimes::getTDCTime(tdc_for_sample<Flavor3>(dataf3HB + stride * (gch - nchannelsf015), sample)); } #endif // COMPUTE_TDC_TIME // compute method 0 quantities // TODO: need to apply containment // TODO: need to apply time slew // TODO: for < run 3, apply HBM legacy energy correction auto const nsamplesToAdd = recoParam1 < 10 ? recoParam2 : (recoParam1 >> 14) & 0xF; auto const startSampleTmp = soi + firstSampleShift; auto const startSample = startSampleTmp < 0 ? 0 : startSampleTmp; auto const endSample = startSample + nsamplesToAdd < nsamplesForCompute ? startSample + nsamplesToAdd : nsamplesForCompute; // NOTE: gain is a small number < 10^-3, multiply it last auto const energym0_per_ts = gain * ((rawCharge - pedestalToUseForMethod0) * respCorrection); auto const energym0_per_ts_gain0 = gain0 * ((rawCharge - pedestalToUseForMethod0) * respCorrection); // store to shared mem shrEnergyM0PerTS[lch * nsamplesForCompute + sampleWithinWindow] = energym0_per_ts; atomicAdd(&shrEnergyM0TotalAccum[lch], energym0_per_ts_gain0); #ifdef HCAL_MAHI_GPUDEBUG printf( "id = %u sample = %d gch = %d hashedId = %u adc = %u capid = %u\n" " charge = %f rawCharge = %f dfc = %f pedestal = %f\n" " gain = %f respCorrection = %f energym0_per_ts = %f\n", id, sample, gch, hashedId, adc, capid, charge, rawCharge, dfc, pedestalToUseForMethod0, gain, respCorrection, energym0_per_ts); printf( "startSample = %d endSample = %d param1 = %u param2 = %u\n", startSample, endSample, recoParam1, recoParam2); #endif if (sampleWithinWindow >= startSample && sampleWithinWindow < endSample) { atomicAdd(&shrMethod0EnergyAccum[lch], energym0_per_ts); // pack sample, energy as 64 bit value unsigned long long int old = shrMethod0EnergySamplePair[lch], assumed; unsigned long long int val = (static_cast<unsigned long long int>(sampleWithinWindow) << 32) + __float_as_uint(energym0_per_ts); do { assumed = old; // decode energy, sample values //int const current_sample = (assumed >> 32) & 0xffffffff; float const current_energy = __uint_as_float(assumed & 0xffffffff); if (energym0_per_ts > current_energy) old = atomicCAS(&shrMethod0EnergySamplePair[lch], assumed, val); else break; } while (assumed != old); } __syncthreads(); // NOTE: must take soi, as values for that thread are used... // NOTE: does not run if soi is bad, because it does not match any sampleWithinWindow if (sampleWithinWindow == soi) { auto const method0_energy = shrMethod0EnergyAccum[lch]; auto const val = shrMethod0EnergySamplePair[lch]; int const max_sample = (val >> 32) & 0xffffffff; float const max_energy = __uint_as_float(val & 0xffffffff); float const max_energy_1 = max_sample < nsamplesForCompute - 1 ? shrEnergyM0PerTS[lch * nsamplesForCompute + max_sample + 1] : 0.f; float const position = nsamplesToAdd < nsamplesForCompute ? max_sample - soi : max_sample; auto const sum = max_energy + max_energy_1; // FIXME: for full comparison with cpu method 0 timing, // need to correct by slew // requires an accumulator -> more shared mem -> omit here unless // really needed float const time = max_energy > 0.f && max_energy_1 > 0.f ? 25.f * (position + max_energy_1 / sum) : 25.f * position; // store method0 quantities to global mem outputdid[gch] = id; method0Energy[gch] = method0_energy; method0Time[gch] = time; #ifdef HCAL_MAHI_GPUDEBUG printf("tsTOT = %f tstrig = %f ts4Thresh = %f\n", shrEnergyM0TotalAccum[lch], energym0_per_ts_gain0, ts4Thresh); #endif // Channel quality check // https://github.com/cms-sw/cmssw/blob/master/RecoLocalCalo/HcalRecAlgos/plugins/HcalChannelPropertiesEP.cc#L107-L109 // https://github.com/cms-sw/cmssw/blob/6d2f66057131baacc2fcbdd203588c41c885b42c/CondCore/HcalPlugins/plugins/HcalChannelQuality_PayloadInspector.cc#L30 // const bool taggedBadByDb = severity.dropChannel(digistatus->getValue()); // do not run MAHI if taggedBadByDb = true auto const digiStatus_ = qualityStatus[hashedId]; const bool taggedBadByDb = (digiStatus_ / 32770); if (taggedBadByDb) outputChi2[gch] = -9999.f; // check as in cpu version if mahi is not needed // FIXME: KNOWN ISSUE: observed a problem when rawCharge and pedestal // are basically equal and generate -0.00000... // needs to be treated properly if (!(shrEnergyM0TotalAccum[lch] > 0 && energym0_per_ts_gain0 > ts4Thresh)) { // do not need to run mahi minimization //outputEnergy[gch] = 0; energy already inited to 0 outputChi2[gch] = -9999.f; } #ifdef HCAL_MAHI_GPUDEBUG printf("method0_energy = %f max_sample = %d max_energy = %f time = %f\n", method0_energy, max_sample, max_energy, time); #endif } // // preparations for mahi fit // auto const amplitude = rawCharge - pedestalToUseForMethod0; auto const noiseADC = (1. / std::sqrt(12)) * dfc; auto const noisePhotoSq = amplitude > pedestalWidth ? (amplitude * fcByPE) : 0.f; auto const noiseTerm = noiseADC * noiseADC + noisePhotoSq + pedestalWidth * pedestalWidth; #ifdef HCAL_MAHI_GPUDEBUG printf( "charge(%d) = %f pedestal(%d) = %f dfc(%d) = %f pedestalWidth(%d) = %f noiseADC(%d) = %f noisPhoto(%d) = " "%f\n", sample, rawCharge, sample, pedestalToUseForMethod0, sample, dfc, sample, pedestalWidth, sample, noiseADC, sample, noisePhotoSq); #endif // store to global memory amplitudesForChannel[sampleWithinWindow] = amplitude; noiseTermsForChannel[sampleWithinWindow] = noiseTerm; electronicNoiseTermsForChannel[sampleWithinWindow] = pedestalWidth; } // TODO: need to add an array of offsets for pulses (a la activeBXs...) // Assume for now 8 pulses __global__ void kernel_prep_pulseMatrices_sameNumberOfSamples(float* pulseMatrices, float* pulseMatricesM, float* pulseMatricesP, int const* pulseOffsets, float const* amplitudes, uint32_t const* idsf01HE, uint32_t const* idsf5HB, uint32_t const* idsf3HB, uint32_t const nchannelsf01HE, uint32_t const nchannelsf5HB, uint32_t const nchannelsTotal, int8_t const* soiSamples, uint32_t const* recoPulseShapeIds, float const* acc25nsVecValues, float const* diff25nsItvlVecValues, float const* accVarLenIdxMinusOneVecValues, float const* diffVarItvlIdxMinusOneVecValues, float const* accVarLenIdxZeroVecValues, float const* diffVarItvlIdxZeroVecValues, float const meanTime, float const timeSigmaSiPM, float const timeSigmaHPD, int const maxDepthHB, int const maxDepthHE, int const maxPhiHE, int const firstHBRing, int const lastHBRing, int const firstHERing, int const lastHERing, int const nEtaHB, int const nEtaHE, uint32_t const offsetForHashes, bool const applyTimeSlew, float const tzeroTimeSlew, float const slopeTimeSlew, float const tmaxTimeSlew) { // indices auto const ipulse = threadIdx.y; auto const npulses = blockDim.y; auto const sample = threadIdx.x; auto const nsamples = blockDim.x; auto const lch = threadIdx.z; auto const gch = lch + blockIdx.x * blockDim.z; auto const nchannelsf015 = nchannelsf01HE + nchannelsf5HB; if (gch >= nchannelsTotal) return; // conditions auto const id = gch < nchannelsf01HE ? idsf01HE[gch] : (gch < nchannelsf015 ? idsf5HB[gch - nchannelsf01HE] : idsf3HB[gch - nchannelsf015]); //auto const id = gch >= nchannelsf01HE // ? idsf5HB[gch - nchannelsf01HE] // : idsf01HE[gch]; auto const deltaT = gch >= nchannelsf01HE && gch < nchannelsf015 ? timeSigmaHPD : timeSigmaSiPM; auto const did = DetId{id}; auto const hashedId = did.subdetId() == HcalBarrel ? hcal::reconstruction::did2linearIndexHB(id, maxDepthHB, firstHBRing, lastHBRing, nEtaHB) : hcal::reconstruction::did2linearIndexHE(id, maxDepthHE, maxPhiHE, firstHERing, lastHERing, nEtaHE) + offsetForHashes; auto const recoPulseShapeId = recoPulseShapeIds[hashedId]; auto const* acc25nsVec = acc25nsVecValues + recoPulseShapeId * hcal::constants::maxPSshapeBin; auto const* diff25nsItvlVec = diff25nsItvlVecValues + recoPulseShapeId * hcal::constants::maxPSshapeBin; auto const* accVarLenIdxMinusOneVec = accVarLenIdxMinusOneVecValues + recoPulseShapeId * hcal::constants::nsPerBX; auto const* diffVarItvlIdxMinusOneVec = diffVarItvlIdxMinusOneVecValues + recoPulseShapeId * hcal::constants::nsPerBX; auto const* accVarLenIdxZeroVec = accVarLenIdxZeroVecValues + recoPulseShapeId * hcal::constants::nsPerBX; auto const* diffVarItvlIdxZeroVec = diffVarItvlIdxZeroVecValues + recoPulseShapeId * hcal::constants::nsPerBX; // offset output arrays auto* pulseMatrix = pulseMatrices + nsamples * npulses * gch; auto* pulseMatrixM = pulseMatricesM + nsamples * npulses * gch; auto* pulseMatrixP = pulseMatricesP + nsamples * npulses * gch; // amplitude per ipulse int const soi = soiSamples[gch]; int const pulseOffset = pulseOffsets[ipulse]; auto const amplitude = amplitudes[gch * nsamples + pulseOffset + soi]; #ifdef HCAL_MAHI_GPUDEBUG #ifdef HCAL_MAHI_GPUDEBUG_FILTERDETID if (id != DETID_TO_DEBUG) return; #endif #endif #ifdef HCAL_MAHI_GPUDEBUG if (sample == 0 && ipulse == 0) { for (int i = 0; i < 8; i++) printf("amplitude(%d) = %f\n", i, amplitudes[gch * nsamples + i]); printf("acc25nsVec and diff25nsItvlVec for recoPulseShapeId = %u\n", recoPulseShapeId); for (int i = 0; i < 256; i++) { printf("acc25nsVec(%d) = %f diff25nsItvlVec(%d) = %f\n", i, acc25nsVec[i], i, diff25nsItvlVec[i]); } printf("accVarLenIdxZEROVec and accVarLenIdxMinusOneVec\n"); for (int i = 0; i < 25; i++) { printf("accVarLenIdxZEROVec(%d) = %f accVarLenIdxMinusOneVec(%d) = %f\n", i, accVarLenIdxZeroVec[i], i, accVarLenIdxMinusOneVec[i]); } printf("diffVarItvlIdxZEROVec and diffVarItvlIdxMinusOneVec\n"); for (int i = 0; i < 25; i++) { printf("diffVarItvlIdxZEROVec(%d) = %f diffVarItvlIdxMinusOneVec(%d) = %f\n", i, diffVarItvlIdxZeroVec[i], i, diffVarItvlIdxMinusOneVec[i]); } } #endif auto t0 = meanTime; if (applyTimeSlew) { if (amplitude <= 1.0f) t0 += hcal::reconstruction::compute_time_slew_delay(1.0, tzeroTimeSlew, slopeTimeSlew, tmaxTimeSlew); else t0 += hcal::reconstruction::compute_time_slew_delay(amplitude, tzeroTimeSlew, slopeTimeSlew, tmaxTimeSlew); } auto const t0m = -deltaT + t0; auto const t0p = deltaT + t0; #ifdef HCAL_MAHI_GPUDEBUG if (sample == 0 && ipulse == 0) { printf("time values: %f %f %f\n", t0, t0m, t0p); } if (sample == 0 && ipulse == 0) { for (int i = 0; i < hcal::constants::maxSamples; i++) { auto const value = hcal::reconstruction::compute_pulse_shape_value(t0, i, 0, acc25nsVec, diff25nsItvlVec, accVarLenIdxMinusOneVec, diffVarItvlIdxMinusOneVec, accVarLenIdxZeroVec, diffVarItvlIdxZeroVec); printf("pulse(%d) = %f\n", i, value); } printf("\n"); for (int i = 0; i < hcal::constants::maxSamples; i++) { auto const value = hcal::reconstruction::compute_pulse_shape_value(t0p, i, 0, acc25nsVec, diff25nsItvlVec, accVarLenIdxMinusOneVec, diffVarItvlIdxMinusOneVec, accVarLenIdxZeroVec, diffVarItvlIdxZeroVec); printf("pulseP(%d) = %f\n", i, value); } printf("\n"); for (int i = 0; i < hcal::constants::maxSamples; i++) { auto const value = hcal::reconstruction::compute_pulse_shape_value(t0m, i, 0, acc25nsVec, diff25nsItvlVec, accVarLenIdxMinusOneVec, diffVarItvlIdxMinusOneVec, accVarLenIdxZeroVec, diffVarItvlIdxZeroVec); printf("pulseM(%d) = %f\n", i, value); } } #endif // FIXME: shift should be treated properly, // here assume 8 time slices and 8 samples auto const shift = 4 - soi; // as in cpu version! // auto const offset = ipulse - soi; // auto const idx = sample - offset; int32_t const idx = sample - pulseOffset; auto const value = idx >= 0 && idx < nsamples ? hcal::reconstruction::compute_pulse_shape_value(t0, idx, shift, acc25nsVec, diff25nsItvlVec, accVarLenIdxMinusOneVec, diffVarItvlIdxMinusOneVec, accVarLenIdxZeroVec, diffVarItvlIdxZeroVec) : 0; auto const value_t0m = idx >= 0 && idx < nsamples ? hcal::reconstruction::compute_pulse_shape_value(t0m, idx, shift, acc25nsVec, diff25nsItvlVec, accVarLenIdxMinusOneVec, diffVarItvlIdxMinusOneVec, accVarLenIdxZeroVec, diffVarItvlIdxZeroVec) : 0; auto const value_t0p = idx >= 0 && idx < nsamples ? hcal::reconstruction::compute_pulse_shape_value(t0p, idx, shift, acc25nsVec, diff25nsItvlVec, accVarLenIdxMinusOneVec, diffVarItvlIdxMinusOneVec, accVarLenIdxZeroVec, diffVarItvlIdxZeroVec) : 0; // store to global if (amplitude > 0.f) { pulseMatrix[ipulse * nsamples + sample] = value; pulseMatrixM[ipulse * nsamples + sample] = value_t0m; pulseMatrixP[ipulse * nsamples + sample] = value_t0p; } else { pulseMatrix[ipulse * nsamples + sample] = 0.f; pulseMatrixM[ipulse * nsamples + sample] = 0.f; pulseMatrixP[ipulse * nsamples + sample] = 0.f; } } template <int NSAMPLES, int NPULSES> __forceinline__ __device__ void update_covariance( calo::multifit::ColumnVector<NPULSES> const& resultAmplitudesVector, calo::multifit::MapSymM<float, NSAMPLES>& covarianceMatrix, Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> const& pulseMatrix, Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> const& pulseMatrixM, Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> const& pulseMatrixP) { CMS_UNROLL_LOOP for (int ipulse = 0; ipulse < NPULSES; ipulse++) { auto const resultAmplitude = resultAmplitudesVector(ipulse); if (resultAmplitude == 0) continue; #ifdef HCAL_MAHI_GPUDEBUG printf("pulse cov array for ibx = %d\n", ipulse); #endif // preload a column float pmcol[NSAMPLES], pmpcol[NSAMPLES], pmmcol[NSAMPLES]; CMS_UNROLL_LOOP for (int counter = 0; counter < NSAMPLES; counter++) { pmcol[counter] = __ldg(&pulseMatrix.coeffRef(counter, ipulse)); pmpcol[counter] = __ldg(&pulseMatrixP.coeffRef(counter, ipulse)); pmmcol[counter] = __ldg(&pulseMatrixM.coeffRef(counter, ipulse)); } auto const ampl2 = resultAmplitude * resultAmplitude; CMS_UNROLL_LOOP for (int col = 0; col < NSAMPLES; col++) { auto const valueP_col = pmpcol[col]; auto const valueM_col = pmmcol[col]; auto const value_col = pmcol[col]; auto const tmppcol = valueP_col - value_col; auto const tmpmcol = valueM_col - value_col; // diagonal auto tmp_value = 0.5 * (tmppcol * tmppcol + tmpmcol * tmpmcol); covarianceMatrix(col, col) += ampl2 * tmp_value; // FIXME: understand if this actually gets unrolled CMS_UNROLL_LOOP for (int row = col + 1; row < NSAMPLES; row++) { float const valueP_row = pmpcol[row]; //pulseMatrixP(j, ipulseReal); float const value_row = pmcol[row]; //pulseMatrix(j, ipulseReal); float const valueM_row = pmmcol[row]; //pulseMatrixM(j, ipulseReal); float tmpprow = valueP_row - value_row; float tmpmrow = valueM_row - value_row; auto const covValue = 0.5 * (tmppcol * tmpprow + tmpmcol * tmpmrow); covarianceMatrix(row, col) += ampl2 * covValue; } } } } template <int NSAMPLES, int NPULSES> __global__ void kernel_minimize(float* outputEnergy, float* outputChi2, float const* __restrict__ inputAmplitudes, float const* __restrict__ pulseMatrices, float const* __restrict__ pulseMatricesM, float const* __restrict__ pulseMatricesP, int const* __restrict__ pulseOffsetValues, float const* __restrict__ noiseTerms, float const* __restrict__ electronicNoiseTerms, int8_t const* __restrict__ soiSamples, float const* __restrict__ noiseCorrelationValues, float const* __restrict__ pedestalWidths, float const* __restrict__ effectivePedestalWidths, bool const useEffectivePedestals, uint32_t const* __restrict__ idsf01HE, uint32_t const* __restrict__ idsf5HB, uint32_t const* __restrict__ idsf3HB, float const* __restrict__ gainValues, float const* __restrict__ respCorrectionValues, uint32_t const nchannelsf01HE, uint32_t const nchannelsf5HB, uint32_t const nchannelsTotal, uint32_t const offsetForHashes, int const maxDepthHB, int const maxDepthHE, int const maxPhiHE, int const firstHBRing, int const lastHBRing, int const firstHERing, int const lastHERing, int const nEtaHB, int const nEtaHE) { // can be relaxed if needed - minor updates are needed in that case! static_assert(NPULSES == NSAMPLES); // indices auto const gch = threadIdx.x + blockIdx.x * blockDim.x; auto const nchannelsf015 = nchannelsf01HE + nchannelsf5HB; if (gch >= nchannelsTotal) return; // if chi2 is set to -9999 do not run minimization if (outputChi2[gch] == -9999.f) return; // configure shared mem extern __shared__ char shrmem[]; float* shrMatrixLFnnlsStorage = reinterpret_cast<float*>(shrmem) + calo::multifit::MapSymM<float, NPULSES>::total * threadIdx.x; float* shrAtAStorage = reinterpret_cast<float*>(shrmem) + calo::multifit::MapSymM<float, NPULSES>::total * (threadIdx.x + blockDim.x); // conditions for pedestal widths auto const id = gch < nchannelsf01HE ? idsf01HE[gch] : (gch < nchannelsf015 ? idsf5HB[gch - nchannelsf01HE] : idsf3HB[gch - nchannelsf015]); auto const did = DetId{id}; auto const hashedId = did.subdetId() == HcalBarrel ? hcal::reconstruction::did2linearIndexHB(id, maxDepthHB, firstHBRing, lastHBRing, nEtaHB) : hcal::reconstruction::did2linearIndexHE(id, maxDepthHE, maxPhiHE, firstHERing, lastHERing, nEtaHE) + offsetForHashes; auto const* pedestalWidthsForChannel = useEffectivePedestals && (gch < nchannelsf01HE || gch >= nchannelsf015) ? effectivePedestalWidths + hashedId * 4 : pedestalWidths + hashedId * 4; auto const averagePedestalWidth2 = 0.25 * (pedestalWidthsForChannel[0] * pedestalWidthsForChannel[0] + pedestalWidthsForChannel[1] * pedestalWidthsForChannel[1] + pedestalWidthsForChannel[2] * pedestalWidthsForChannel[2] + pedestalWidthsForChannel[3] * pedestalWidthsForChannel[3]); auto const* gains = gainValues + hashedId * 4; // FIXME on cpu ts 0 capid was used - does it make any difference auto const gain = gains[0]; auto const respCorrection = respCorrectionValues[hashedId]; auto const noisecorr = noiseCorrelationValues[hashedId]; #ifdef HCAL_MAHI_GPUDEBUG #ifdef HCAL_MAHI_GPUDEBUG_FILTERDETID if (id != DETID_TO_DEBUG) return; #endif #endif /* // TODO: provide this properly int const soi = soiSamples[gch]; */ calo::multifit::ColumnVector<NPULSES, int> pulseOffsets; CMS_UNROLL_LOOP for (int i = 0; i < NPULSES; ++i) pulseOffsets(i) = i; // pulseOffsets(i) = pulseOffsetValues[i] - pulseOffsetValues[0]; // output amplitudes/weights calo::multifit::ColumnVector<NPULSES> resultAmplitudesVector = calo::multifit::ColumnVector<NPULSES>::Zero(); // map views Eigen::Map<const calo::multifit::ColumnVector<NSAMPLES>> inputAmplitudesView{inputAmplitudes + gch * NSAMPLES}; Eigen::Map<const calo::multifit::ColumnVector<NSAMPLES>> noiseTermsView{noiseTerms + gch * NSAMPLES}; Eigen::Map<const calo::multifit::ColumnVector<NSAMPLES>> noiseElectronicView{electronicNoiseTerms + gch * NSAMPLES}; Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> glbPulseMatrixMView{pulseMatricesM + gch * NSAMPLES * NPULSES}; Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> glbPulseMatrixPView{pulseMatricesP + gch * NSAMPLES * NPULSES}; Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> glbPulseMatrixView{pulseMatrices + gch * NSAMPLES * NPULSES}; #ifdef HCAL_MAHI_GPUDEBUG for (int i = 0; i < NSAMPLES; i++) printf("inputValues(%d) = %f noiseTerms(%d) = %f\n", i, inputAmplitudesView(i), i, noiseTermsView(i)); for (int i = 0; i < NSAMPLES; i++) { for (int j = 0; j < NPULSES; j++) printf("%f ", glbPulseMatrixView(i, j)); printf("\n"); } printf("\n"); for (int i = 0; i < NSAMPLES; i++) { for (int j = 0; j < NPULSES; j++) printf("%f ", glbPulseMatrixMView(i, j)); printf("\n"); } printf("\n"); for (int i = 0; i < NSAMPLES; i++) { for (int j = 0; j < NPULSES; j++) printf("%f ", glbPulseMatrixPView(i, j)); printf("\n"); } #endif int npassive = 0; float chi2 = 0, previous_chi2 = 0.f, chi2_2itersback = 0.f; for (int iter = 1; iter < nMaxItersMin; iter++) { //float covarianceMatrixStorage[MapSymM<float, NSAMPLES>::total]; // NOTE: only works when NSAMPLES == NPULSES // if does not hold -> slightly rearrange shared mem to still reuse // shared memory float* covarianceMatrixStorage = shrMatrixLFnnlsStorage; calo::multifit::MapSymM<float, NSAMPLES> covarianceMatrix{covarianceMatrixStorage}; CMS_UNROLL_LOOP for (int counter = 0; counter < calo::multifit::MapSymM<float, NSAMPLES>::total; counter++) covarianceMatrixStorage[counter] = (noisecorr != 0.f) ? 0.f : averagePedestalWidth2; CMS_UNROLL_LOOP for (unsigned int counter = 0; counter < calo::multifit::MapSymM<float, NSAMPLES>::stride; counter++) { covarianceMatrix(counter, counter) += noiseTermsView.coeffRef(counter); if (counter != 0) covarianceMatrix(counter, counter - 1) += noisecorr * __ldg(&noiseElectronicView.coeffRef(counter - 1)) * __ldg(&noiseElectronicView.coeffRef(counter)); } // update covariance matrix update_covariance( resultAmplitudesVector, covarianceMatrix, glbPulseMatrixView, glbPulseMatrixMView, glbPulseMatrixPView); #ifdef HCAL_MAHI_GPUDEBUG printf("covariance matrix\n"); for (int i = 0; i < 8; i++) { for (int j = 0; j < 8; j++) printf("%f ", covarianceMatrix(i, j)); printf("\n"); } #endif // compute Cholesky Decomposition L matrix //matrixDecomposition.compute(covarianceMatrix); //auto const& matrixL = matrixDecomposition.matrixL(); float matrixLStorage[calo::multifit::MapSymM<float, NSAMPLES>::total]; calo::multifit::MapSymM<float, NSAMPLES> matrixL{matrixLStorage}; calo::multifit::compute_decomposition_unrolled(matrixL, covarianceMatrix); // // replace eigen // //auto const& A = matrixDecomposition // .matrixL() // .solve(pulseMatrixView); calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES> A; calo::multifit::solve_forward_subst_matrix(A, glbPulseMatrixView, matrixL); // // remove eigen // //auto const& b = matrixL // .solve(inputAmplitudesView); // float reg_b[NSAMPLES]; calo::multifit::solve_forward_subst_vector(reg_b, inputAmplitudesView, matrixL); // TODO: we do not really need to change these matrcies // will be fixed in the optimized version //ColMajorMatrix<NPULSES, NPULSES> AtA = A.transpose() * A; //ColumnVector<NPULSES> Atb = A.transpose() * b; //ColMajorMatrix<NPULSES, NPULSES> AtA; //float AtAStorage[MapSymM<float, NPULSES>::total]; calo::multifit::MapSymM<float, NPULSES> AtA{shrAtAStorage}; calo::multifit::ColumnVector<NPULSES> Atb; CMS_UNROLL_LOOP for (int icol = 0; icol < NPULSES; icol++) { float reg_ai[NSAMPLES]; // load column icol CMS_UNROLL_LOOP for (int counter = 0; counter < NSAMPLES; counter++) reg_ai[counter] = A(counter, icol); // compute diagonal float sum = 0.f; CMS_UNROLL_LOOP for (int counter = 0; counter < NSAMPLES; counter++) sum += reg_ai[counter] * reg_ai[counter]; // store AtA(icol, icol) = sum; // go thru the other columns CMS_UNROLL_LOOP for (int j = icol + 1; j < NPULSES; j++) { // load column j float reg_aj[NSAMPLES]; CMS_UNROLL_LOOP for (int counter = 0; counter < NSAMPLES; counter++) reg_aj[counter] = A(counter, j); // accum float sum = 0.f; CMS_UNROLL_LOOP for (int counter = 0; counter < NSAMPLES; counter++) sum += reg_aj[counter] * reg_ai[counter]; // store //AtA(icol, j) = sum; AtA(j, icol) = sum; } // Atb accum float sum_atb = 0; CMS_UNROLL_LOOP for (int counter = 0; counter < NSAMPLES; counter++) sum_atb += reg_ai[counter] * reg_b[counter]; // store atb Atb(icol) = sum_atb; } #ifdef HCAL_MAHI_GPUDEBUG printf("AtA\n"); for (int i = 0; i < 8; i++) { for (int j = 0; j < 8; j++) printf("%f ", AtA(i, j)); printf("\n"); } printf("Atb\n"); for (int i = 0; i < 8; i++) printf("%f ", Atb(i)); printf("\n"); printf("result Amplitudes before nnls\n"); for (int i = 0; i < 8; i++) printf("%f ", resultAmplitudesVector(i)); printf("\n"); #endif // for fnnls calo::multifit::MapSymM<float, NPULSES> matrixLForFnnls{shrMatrixLFnnlsStorage}; // run fast nnls calo::multifit::fnnls( AtA, Atb, resultAmplitudesVector, npassive, pulseOffsets, matrixLForFnnls, nnlsThresh, nMaxItersNNLS, 10, 10); #ifdef HCAL_MAHI_GPUDEBUG printf("result Amplitudes\n"); for (int i = 0; i < 8; i++) printf("resultAmplitudes(%d) = %f\n", i, resultAmplitudesVector(i)); #endif calo::multifit::calculateChiSq(matrixL, glbPulseMatrixView, resultAmplitudesVector, inputAmplitudesView, chi2); auto const deltaChi2 = std::abs(chi2 - previous_chi2); if (chi2 == chi2_2itersback && chi2 < previous_chi2) break; // update chi2_2itersback = previous_chi2; previous_chi2 = chi2; // exit condition if (deltaChi2 < deltaChi2Threashold) break; } #ifdef HCAL_MAHI_GPUDEBUG for (int i = 0; i < NPULSES; i++) printf("pulseOffsets(%d) = %d outputAmplitudes(%d) = %f\n", i, pulseOffsets(i), i, resultAmplitudesVector(i)); printf("chi2 = %f\n", chi2); #endif outputChi2[gch] = chi2; auto const idx_for_energy = std::abs(pulseOffsetValues[0]); outputEnergy[gch] = (gain * resultAmplitudesVector(idx_for_energy)) * respCorrection; /* CMS_UNROLL_LOOP for (int i=0; i<NPULSES; i++) if (pulseOffsets[i] == soi) // NOTE: gain is a number < 10^-3/4, multiply first to avoid stab issues outputEnergy[gch] = (gain*resultAmplitudesVector(i))*respCorrection; */ } } // namespace mahi } // namespace hcal namespace hcal { namespace reconstruction { void entryPoint(InputDataGPU const& inputGPU, OutputDataGPU& outputGPU, ConditionsProducts const& conditions, ScratchDataGPU& scratch, ConfigParameters const& configParameters, cudaStream_t cudaStream) { auto const totalChannels = inputGPU.f01HEDigis.size + inputGPU.f5HBDigis.size + inputGPU.f3HBDigis.size; // FIXME: the number of channels in output might change given that some channesl might be filtered out // do not run when there are no rechits (e.g. if HCAL is not being read), // but do set the size of the output collection to 0 outputGPU.recHits.size = totalChannels; if (totalChannels == 0) { return; } // TODO: this can be lifted by implementing a separate kernel // similar to the default one, but properly handling the diff in #sample // or modifying existing one auto const f01nsamples = compute_nsamples<Flavor1>(inputGPU.f01HEDigis.stride); auto const f5nsamples = compute_nsamples<Flavor5>(inputGPU.f5HBDigis.stride); auto const f3nsamples = compute_nsamples<Flavor3>(inputGPU.f3HBDigis.stride); int constexpr windowSize = 8; int const startingSample = f01nsamples - windowSize; assert(startingSample == 0 || startingSample == 2); if (inputGPU.f01HEDigis.stride > 0 && inputGPU.f5HBDigis.stride > 0) assert(f01nsamples == f5nsamples); if (inputGPU.f01HEDigis.stride > 0 && inputGPU.f3HBDigis.stride > 0) assert(f01nsamples == f3nsamples); dim3 threadsPerBlock{windowSize, configParameters.kprep1dChannelsPerBlock}; int blocks = static_cast<uint32_t>(threadsPerBlock.y) > totalChannels ? 1 : (totalChannels + threadsPerBlock.y - 1) / threadsPerBlock.y; int nbytesShared = ((2 * windowSize + 2) * sizeof(float) + sizeof(uint64_t)) * configParameters.kprep1dChannelsPerBlock; hcal::mahi::kernel_prep1d_sameNumberOfSamples<<<blocks, threadsPerBlock, nbytesShared, cudaStream>>>( scratch.amplitudes.get(), scratch.noiseTerms.get(), scratch.electronicNoiseTerms.get(), outputGPU.recHits.energy.get(), outputGPU.recHits.chi2.get(), inputGPU.f01HEDigis.data.get(), inputGPU.f5HBDigis.data.get(), inputGPU.f3HBDigis.data.get(), inputGPU.f01HEDigis.ids.get(), inputGPU.f5HBDigis.ids.get(), inputGPU.f3HBDigis.ids.get(), inputGPU.f01HEDigis.stride, inputGPU.f5HBDigis.stride, inputGPU.f3HBDigis.stride, inputGPU.f01HEDigis.size, inputGPU.f5HBDigis.size, inputGPU.f5HBDigis.npresamples.get(), scratch.soiSamples.get(), outputGPU.recHits.energyM0.get(), outputGPU.recHits.timeM0.get(), outputGPU.recHits.did.get(), totalChannels, conditions.channelQuality.status, conditions.recoParams.param1, conditions.recoParams.param2, conditions.qieCoders.offsets, conditions.qieCoders.slopes, conditions.qieTypes.values, conditions.pedestalWidths.values, conditions.effectivePedestalWidths.values, conditions.pedestals.values, conditions.convertedEffectivePedestals ? conditions.convertedEffectivePedestals->values : conditions.pedestals.values, configParameters.useEffectivePedestals, conditions.sipmParameters.type, conditions.sipmParameters.fcByPE, conditions.sipmCharacteristics.parLin1, conditions.sipmCharacteristics.parLin2, conditions.sipmCharacteristics.parLin3, conditions.gains.values, conditions.respCorrs.values, conditions.topology->maxDepthHB(), conditions.topology->maxDepthHE(), conditions.recConstants->getNPhi(1) > hcal::reconstruction::IPHI_MAX ? conditions.recConstants->getNPhi(1) : hcal::reconstruction::IPHI_MAX, conditions.topology->firstHBRing(), conditions.topology->lastHBRing(), conditions.topology->firstHERing(), conditions.topology->lastHERing(), conditions.recConstants->getEtaRange(0).second - conditions.recConstants->getEtaRange(0).first + 1, conditions.topology->firstHERing() > conditions.topology->lastHERing() ? 0 : (conditions.topology->lastHERing() - conditions.topology->firstHERing() + 1), configParameters.sipmQTSShift, configParameters.sipmQNTStoSum, configParameters.firstSampleShift, conditions.offsetForHashes, configParameters.ts4Thresh, startingSample); cudaCheck(cudaGetLastError()); // 1024 is the max threads per block for gtx1080 // FIXME: take this from cuda service or something like that uint32_t const channelsPerBlock = 1024 / (windowSize * conditions.pulseOffsetsHost.size()); dim3 threadsPerBlock2{windowSize, static_cast<uint32_t>(conditions.pulseOffsetsHost.size()), channelsPerBlock}; int blocks2 = threadsPerBlock2.z > totalChannels ? 1 : (totalChannels + threadsPerBlock2.z - 1) / threadsPerBlock2.z; #ifdef HCAL_MAHI_CPUDEBUG std::cout << "threads: " << threadsPerBlock2.x << " " << threadsPerBlock2.y << " " << threadsPerBlock2.z << std::endl; std::cout << "blocks: " << blocks2 << std::endl; #endif hcal::mahi::kernel_prep_pulseMatrices_sameNumberOfSamples<<<blocks2, threadsPerBlock2, 0, cudaStream>>>( scratch.pulseMatrices.get(), scratch.pulseMatricesM.get(), scratch.pulseMatricesP.get(), conditions.pulseOffsets.values, scratch.amplitudes.get(), inputGPU.f01HEDigis.ids.get(), inputGPU.f5HBDigis.ids.get(), inputGPU.f3HBDigis.ids.get(), inputGPU.f01HEDigis.size, inputGPU.f5HBDigis.size, totalChannels, scratch.soiSamples.get(), conditions.recoParams.ids, conditions.recoParams.acc25nsVec, conditions.recoParams.diff25nsItvlVec, conditions.recoParams.accVarLenIdxMinusOneVec, conditions.recoParams.diffVarItvlIdxMinusOneVec, conditions.recoParams.accVarLenIdxZEROVec, conditions.recoParams.diffVarItvlIdxZEROVec, configParameters.meanTime, configParameters.timeSigmaSiPM, configParameters.timeSigmaHPD, conditions.topology->maxDepthHB(), conditions.topology->maxDepthHE(), conditions.recConstants->getNPhi(1) > hcal::reconstruction::IPHI_MAX ? conditions.recConstants->getNPhi(1) : hcal::reconstruction::IPHI_MAX, conditions.topology->firstHBRing(), conditions.topology->lastHBRing(), conditions.topology->firstHERing(), conditions.topology->lastHERing(), conditions.recConstants->getEtaRange(0).second - conditions.recConstants->getEtaRange(0).first + 1, conditions.topology->firstHERing() > conditions.topology->lastHERing() ? 0 : (conditions.topology->lastHERing() - conditions.topology->firstHERing() + 1), conditions.offsetForHashes, configParameters.applyTimeSlew, configParameters.tzeroTimeSlew, configParameters.slopeTimeSlew, configParameters.tmaxTimeSlew); cudaCheck(cudaGetLastError()); // number of samples is checked in above assert if (conditions.pulseOffsetsHost.size() == 8u) { // FIXME: provide constants from configuration uint32_t threadsPerBlock = configParameters.kernelMinimizeThreads[0]; uint32_t blocks = threadsPerBlock > totalChannels ? 1 : (totalChannels + threadsPerBlock - 1) / threadsPerBlock; auto const nbytesShared = 2 * threadsPerBlock * calo::multifit::MapSymM<float, 8>::total * sizeof(float); hcal::mahi::kernel_minimize<8, 8><<<blocks, threadsPerBlock, nbytesShared, cudaStream>>>( outputGPU.recHits.energy.get(), outputGPU.recHits.chi2.get(), scratch.amplitudes.get(), scratch.pulseMatrices.get(), scratch.pulseMatricesM.get(), scratch.pulseMatricesP.get(), conditions.pulseOffsets.values, scratch.noiseTerms.get(), scratch.electronicNoiseTerms.get(), scratch.soiSamples.get(), conditions.sipmParameters.auxi2, conditions.pedestalWidths.values, conditions.effectivePedestalWidths.values, configParameters.useEffectivePedestals, inputGPU.f01HEDigis.ids.get(), inputGPU.f5HBDigis.ids.get(), inputGPU.f3HBDigis.ids.get(), conditions.gains.values, conditions.respCorrs.values, inputGPU.f01HEDigis.size, inputGPU.f5HBDigis.size, totalChannels, conditions.offsetForHashes, conditions.topology->maxDepthHB(), conditions.topology->maxDepthHE(), conditions.recConstants->getNPhi(1) > hcal::reconstruction::IPHI_MAX ? conditions.recConstants->getNPhi(1) : hcal::reconstruction::IPHI_MAX, conditions.topology->firstHBRing(), conditions.topology->lastHBRing(), conditions.topology->firstHERing(), conditions.topology->lastHERing(), conditions.recConstants->getEtaRange(0).second - conditions.recConstants->getEtaRange(0).first + 1, conditions.topology->firstHERing() > conditions.topology->lastHERing() ? 0 : (conditions.topology->lastHERing() - conditions.topology->firstHERing() + 1)); } else { throw cms::Exception("Invalid MahiGPU configuration") << "Currently support only 8 pulses and 8 time samples and provided: " << f01nsamples << " samples and " << conditions.pulseOffsetsHost.size() << " pulses" << std::endl; } } } // namespace reconstruction } // namespace hcal
1c2f7d48a2f83b2a942589c092be168b8a88af6c.hip
// !!! This is a file automatically generated by hipify!!! #include "JitteredSampler.h" #include "hiprand/hiprand.h" #include "hiprand/hiprand_kernel.h" /* JitteredSampler */ __device__ void JitteredSampler::GenerateSamples(int l_num_of_samples, int l_num_of_sets) { hiprandState_t curand_state; hiprand_init(23041996,0,0,&curand_state); square_samples = (float2*)malloc(sizeof(float2) * l_num_of_samples * l_num_of_sets); int n = sqrt(float(l_num_of_samples)); num_samples = n*n; num_sets = l_num_of_sets; for (int i = 0; i < l_num_of_sets; ++i) for (int y = 0; y < n; ++y) for (int x = 0; x < n; ++x) square_samples[i * num_samples + n * y + x] = make_float2(float(x) / n + hiprand_uniform(&curand_state) / n, float(y) / n + hiprand_uniform(&curand_state) / n); } __device__ JitteredSampler::~JitteredSampler() {}
1c2f7d48a2f83b2a942589c092be168b8a88af6c.cu
#include "JitteredSampler.h" #include "curand.h" #include "curand_kernel.h" /* JitteredSampler */ __device__ void JitteredSampler::GenerateSamples(int l_num_of_samples, int l_num_of_sets) { curandState_t curand_state; curand_init(23041996,0,0,&curand_state); square_samples = (float2*)malloc(sizeof(float2) * l_num_of_samples * l_num_of_sets); int n = sqrt(float(l_num_of_samples)); num_samples = n*n; num_sets = l_num_of_sets; for (int i = 0; i < l_num_of_sets; ++i) for (int y = 0; y < n; ++y) for (int x = 0; x < n; ++x) square_samples[i * num_samples + n * y + x] = make_float2(float(x) / n + curand_uniform(&curand_state) / n, float(y) / n + curand_uniform(&curand_state) / n); } __device__ JitteredSampler::~JitteredSampler() {}
9b571089e94b85b49fdd3d0addf7a69b8f11c27d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This file belongs to the Galois project, a C++ library for exploiting parallelism. * The code is being released under the terms of the 3-Clause BSD License (a * copy is located in LICENSE.txt at the top-level directory). * * Copyright (C) 2018, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. */ /* -*- mode: c++ -*- */ #include "gg.h" #include "ggcuda.h" void kernel_sizing(CSRGraph &, dim3 &, dim3 &); #define TB_SIZE 256 const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic"; uint32_t * P_NOUT; float * P_RESIDUAL; float * P_VALUE; #include "kernels/reduce.cuh" #include "gen_hip.cuh" static const int __tb_PageRank = TB_SIZE; static const int __tb_InitializeGraph = TB_SIZE; static const int __tb_InitializeGraphNout = TB_SIZE; __global__ void ResetGraph(CSRGraph graph, unsigned int __nowned, unsigned int __begin, unsigned int __end, uint32_t * p_nout, float * p_residual, float * p_delta, float * p_value) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; index_type src_end; // FP: "1 -> 2; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { p_value[src] = 0; p_nout[src] = 0; p_residual[src] = 0; p_delta[src] = 0; } } // FP: "9 -> 10; } __global__ void InitializeGraph(CSRGraph graph, DynamicBitset *nout_is_updated, unsigned int __nowned, unsigned int __begin, unsigned int __end, const float local_alpha, uint32_t * p_nout, float * p_residual, float * p_delta, float * p_value) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = __tb_InitializeGraphNout; index_type src_end; // FP: "1 -> 2; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { // manual change by Loc to reflect newly changed CPU code // some sets no longer necessary if reset graph called first //p_value[src] = 0; p_residual[src] = local_alpha; //residual_is_updated->set(src); //p_delta[src] = 0; atomicAdd(&p_nout[src], graph.getOutDegree(src)); nout_is_updated->set(src); } } // FP: "8 -> 9; } __global__ void PageRank_delta(CSRGraph graph, unsigned int __nowned, unsigned int __begin, unsigned int __end, const float local_alpha, float local_tolerance, uint32_t * p_nout, float * p_residual, float * p_delta, float * p_value) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = __tb_InitializeGraphNout; float residual_old; index_type src_end; // FP: "1 -> 2; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { if (p_residual[src] > local_tolerance) { residual_old = p_residual[src]; p_residual[src] = 0; p_value[src] += residual_old; if (p_nout[src] > 0) { p_delta[src] = residual_old*(1-local_alpha)/p_nout[src]; } } } } // FP: "8 -> 9; } __global__ void PageRank(CSRGraph graph, DynamicBitset *is_updated, unsigned int __nowned, unsigned int __begin, unsigned int __end, float * p_residual, float * p_delta, HGAccumulator<int> ret_val) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = __tb_PageRank; typedef hipcub::BlockReduce<int, TB_SIZE> _br; __shared__ _br::TempStorage _ts; ret_val.thread_entry(); float delta; index_type src_end; index_type src_rup; // FP: "1 -> 2; const int _NP_CROSSOVER_WP = 32; const int _NP_CROSSOVER_TB = __kernel_tb_size; // FP: "2 -> 3; const int BLKSIZE = __kernel_tb_size; const int ITSIZE = BLKSIZE * 8; // FP: "3 -> 4; typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan; typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy; // FP: "4 -> 5; __shared__ npsTy nps ; // FP: "5 -> 6; // FP: "6 -> 7; // FP: "7 -> 8; src_end = __end; src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x))); for (index_type src = __begin + tid; src < src_rup; src += nthreads) { multiple_sum<2, index_type> _np_mps; multiple_sum<2, index_type> _np_mps_total; // FP: "8 -> 9; bool pop = src < __end; // FP: "9 -> 10; if (pop) { if (p_delta[src] > 0) { delta = p_delta[src]; p_delta[src] = 0; ret_val.reduce( 1); } else { pop = false; } } // FP: "18 -> 19; // FP: "21 -> 22; struct NPInspector1 _np = {0,0,0,0,0,0}; // FP: "22 -> 23; __shared__ struct { float delta; } _np_closure [TB_SIZE]; // FP: "23 -> 24; _np_closure[threadIdx.x].delta = delta; // FP: "24 -> 25; if (pop) { _np.size = (graph).getOutDegree(src); _np.start = (graph).getFirstEdge(src); } // FP: "27 -> 28; // FP: "28 -> 29; _np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0; _np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0; // FP: "29 -> 30; BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total); // FP: "30 -> 31; if (threadIdx.x == 0) { nps.tb.owner = MAX_TB_SIZE + 1; } // FP: "33 -> 34; __syncthreads(); // FP: "34 -> 35; while (true) { // FP: "35 -> 36; if (_np.size >= _NP_CROSSOVER_TB) { nps.tb.owner = threadIdx.x; } // FP: "38 -> 39; __syncthreads(); // FP: "39 -> 40; if (nps.tb.owner == MAX_TB_SIZE + 1) { // FP: "40 -> 41; __syncthreads(); // FP: "41 -> 42; break; } // FP: "43 -> 44; if (nps.tb.owner == threadIdx.x) { nps.tb.start = _np.start; nps.tb.size = _np.size; nps.tb.src = threadIdx.x; _np.start = 0; _np.size = 0; } // FP: "46 -> 47; __syncthreads(); // FP: "47 -> 48; int ns = nps.tb.start; int ne = nps.tb.size; // FP: "48 -> 49; if (nps.tb.src == threadIdx.x) { nps.tb.owner = MAX_TB_SIZE + 1; } // FP: "51 -> 52; assert(nps.tb.src < __kernel_tb_size); delta = _np_closure[nps.tb.src].delta; // FP: "52 -> 53; for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE) { index_type nbr; nbr = ns +_np_j; { index_type dst; float dst_residual_old; dst = graph.getAbsDestination(nbr); dst_residual_old = atomicAdd(&p_residual[dst], delta); is_updated->set(dst); } } // FP: "60 -> 61; __syncthreads(); } // FP: "62 -> 63; // FP: "63 -> 64; { const int warpid = threadIdx.x / 32; // FP: "64 -> 65; const int _np_laneid = cub::LaneId(); // FP: "65 -> 66; while (__any(_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)) { if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB) { nps.warp.owner[warpid] = _np_laneid; } if (nps.warp.owner[warpid] == _np_laneid) { nps.warp.start[warpid] = _np.start; nps.warp.size[warpid] = _np.size; nps.warp.src[warpid] = threadIdx.x; _np.start = 0; _np.size = 0; } index_type _np_w_start = nps.warp.start[warpid]; index_type _np_w_size = nps.warp.size[warpid]; assert(nps.warp.src[warpid] < __kernel_tb_size); delta = _np_closure[nps.warp.src[warpid]].delta; for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32) { index_type nbr; nbr = _np_w_start +_np_ii; { index_type dst; float dst_residual_old; dst = graph.getAbsDestination(nbr); dst_residual_old = atomicAdd(&p_residual[dst], delta); is_updated->set(dst); } } } // FP: "83 -> 84; __syncthreads(); // FP: "84 -> 85; } // FP: "85 -> 86; __syncthreads(); // FP: "86 -> 87; _np.total = _np_mps_total.el[1]; _np.offset = _np_mps.el[1]; // FP: "87 -> 88; while (_np.work()) { // FP: "88 -> 89; int _np_i =0; // FP: "89 -> 90; _np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x); // FP: "90 -> 91; __syncthreads(); // FP: "91 -> 92; // FP: "92 -> 93; for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE) { index_type nbr; assert(nps.fg.src[_np_i] < __kernel_tb_size); delta = _np_closure[nps.fg.src[_np_i]].delta; nbr= nps.fg.itvalue[_np_i]; { index_type dst; float dst_residual_old; dst = graph.getAbsDestination(nbr); dst_residual_old = atomicAdd(&p_residual[dst], delta); is_updated->set(dst); } } // FP: "101 -> 102; _np.execute_round_done(ITSIZE); // FP: "102 -> 103; __syncthreads(); } // FP: "104 -> 105; assert(threadIdx.x < __kernel_tb_size); delta = _np_closure[threadIdx.x].delta; // FP: "105 -> 106; // FP: "106 -> 107; } ret_val.thread_exit<_br>(_ts); } void ResetGraph_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context * ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; hipLaunchKernelGGL(( ResetGraph) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, ctx->numNodesWithEdges, __begin, __end, ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), ctx->delta.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr()); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void ResetGraph_all_cuda(struct CUDA_Context * ctx) { // FP: "1 -> 2; ResetGraph_cuda(0, ctx->numNodesWithEdges, ctx); // FP: "2 -> 3; } void InitializeGraph_cuda(unsigned int __begin, unsigned int __end, const float & local_alpha, struct CUDA_Context * ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; hipLaunchKernelGGL(( InitializeGraph) , dim3(blocks), dim3(__tb_InitializeGraph), 0, 0, ctx->gg, ctx->nout.is_updated.gpu_rd_ptr(), ctx->numNodesWithEdges, __begin, __end, local_alpha, ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), ctx->delta.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr()); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void InitializeGraph_all_cuda(const float & local_alpha, struct CUDA_Context * ctx) { // FP: "1 -> 2; InitializeGraph_cuda(0, ctx->numNodesWithEdges, local_alpha, ctx); // FP: "2 -> 3; } void PageRank_delta_cuda(unsigned int __begin, unsigned int __end, const float & local_alpha, float local_tolerance, struct CUDA_Context * ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; hipLaunchKernelGGL(( PageRank_delta) , dim3(blocks), dim3(__tb_PageRank), 0, 0, ctx->gg, ctx->numNodesWithEdges, __begin, __end, local_alpha, local_tolerance, ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), ctx->delta.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr()); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void PageRank_delta_all_cuda(const float & local_alpha, float local_tolerance, struct CUDA_Context * ctx) { // FP: "1 -> 2; PageRank_delta_cuda(0, ctx->numNodesWithEdges, local_alpha, local_tolerance, ctx); // FP: "2 -> 3; } void PageRank_cuda(unsigned int __begin, unsigned int __end, int & __retval, struct CUDA_Context * ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; Shared<int> retval = Shared<int>(1); HGAccumulator<int> _rv; *(retval.cpu_wr_ptr()) = 0; _rv.rv = retval.gpu_wr_ptr(); hipLaunchKernelGGL(( PageRank) , dim3(blocks), dim3(__tb_PageRank), 0, 0, ctx->gg, ctx->residual.is_updated.gpu_rd_ptr(), ctx->numNodesWithEdges, __begin, __end, ctx->residual.data.gpu_wr_ptr(), ctx->delta.data.gpu_wr_ptr(), _rv); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; __retval = *(retval.cpu_rd_ptr()); // FP: "7 -> 8; } void PageRank_all_cuda(int & __retval, struct CUDA_Context * ctx) { // FP: "1 -> 2; PageRank_cuda(0, ctx->numNodesWithEdges, __retval, ctx); // FP: "2 -> 3; }
9b571089e94b85b49fdd3d0addf7a69b8f11c27d.cu
/* * This file belongs to the Galois project, a C++ library for exploiting parallelism. * The code is being released under the terms of the 3-Clause BSD License (a * copy is located in LICENSE.txt at the top-level directory). * * Copyright (C) 2018, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. */ /* -*- mode: c++ -*- */ #include "gg.h" #include "ggcuda.h" void kernel_sizing(CSRGraph &, dim3 &, dim3 &); #define TB_SIZE 256 const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic"; uint32_t * P_NOUT; float * P_RESIDUAL; float * P_VALUE; #include "kernels/reduce.cuh" #include "gen_cuda.cuh" static const int __tb_PageRank = TB_SIZE; static const int __tb_InitializeGraph = TB_SIZE; static const int __tb_InitializeGraphNout = TB_SIZE; __global__ void ResetGraph(CSRGraph graph, unsigned int __nowned, unsigned int __begin, unsigned int __end, uint32_t * p_nout, float * p_residual, float * p_delta, float * p_value) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; index_type src_end; // FP: "1 -> 2; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { p_value[src] = 0; p_nout[src] = 0; p_residual[src] = 0; p_delta[src] = 0; } } // FP: "9 -> 10; } __global__ void InitializeGraph(CSRGraph graph, DynamicBitset *nout_is_updated, unsigned int __nowned, unsigned int __begin, unsigned int __end, const float local_alpha, uint32_t * p_nout, float * p_residual, float * p_delta, float * p_value) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = __tb_InitializeGraphNout; index_type src_end; // FP: "1 -> 2; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { // manual change by Loc to reflect newly changed CPU code // some sets no longer necessary if reset graph called first //p_value[src] = 0; p_residual[src] = local_alpha; //residual_is_updated->set(src); //p_delta[src] = 0; atomicAdd(&p_nout[src], graph.getOutDegree(src)); nout_is_updated->set(src); } } // FP: "8 -> 9; } __global__ void PageRank_delta(CSRGraph graph, unsigned int __nowned, unsigned int __begin, unsigned int __end, const float local_alpha, float local_tolerance, uint32_t * p_nout, float * p_residual, float * p_delta, float * p_value) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = __tb_InitializeGraphNout; float residual_old; index_type src_end; // FP: "1 -> 2; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { if (p_residual[src] > local_tolerance) { residual_old = p_residual[src]; p_residual[src] = 0; p_value[src] += residual_old; if (p_nout[src] > 0) { p_delta[src] = residual_old*(1-local_alpha)/p_nout[src]; } } } } // FP: "8 -> 9; } __global__ void PageRank(CSRGraph graph, DynamicBitset *is_updated, unsigned int __nowned, unsigned int __begin, unsigned int __end, float * p_residual, float * p_delta, HGAccumulator<int> ret_val) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = __tb_PageRank; typedef cub::BlockReduce<int, TB_SIZE> _br; __shared__ _br::TempStorage _ts; ret_val.thread_entry(); float delta; index_type src_end; index_type src_rup; // FP: "1 -> 2; const int _NP_CROSSOVER_WP = 32; const int _NP_CROSSOVER_TB = __kernel_tb_size; // FP: "2 -> 3; const int BLKSIZE = __kernel_tb_size; const int ITSIZE = BLKSIZE * 8; // FP: "3 -> 4; typedef cub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan; typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy; // FP: "4 -> 5; __shared__ npsTy nps ; // FP: "5 -> 6; // FP: "6 -> 7; // FP: "7 -> 8; src_end = __end; src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x))); for (index_type src = __begin + tid; src < src_rup; src += nthreads) { multiple_sum<2, index_type> _np_mps; multiple_sum<2, index_type> _np_mps_total; // FP: "8 -> 9; bool pop = src < __end; // FP: "9 -> 10; if (pop) { if (p_delta[src] > 0) { delta = p_delta[src]; p_delta[src] = 0; ret_val.reduce( 1); } else { pop = false; } } // FP: "18 -> 19; // FP: "21 -> 22; struct NPInspector1 _np = {0,0,0,0,0,0}; // FP: "22 -> 23; __shared__ struct { float delta; } _np_closure [TB_SIZE]; // FP: "23 -> 24; _np_closure[threadIdx.x].delta = delta; // FP: "24 -> 25; if (pop) { _np.size = (graph).getOutDegree(src); _np.start = (graph).getFirstEdge(src); } // FP: "27 -> 28; // FP: "28 -> 29; _np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0; _np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0; // FP: "29 -> 30; BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total); // FP: "30 -> 31; if (threadIdx.x == 0) { nps.tb.owner = MAX_TB_SIZE + 1; } // FP: "33 -> 34; __syncthreads(); // FP: "34 -> 35; while (true) { // FP: "35 -> 36; if (_np.size >= _NP_CROSSOVER_TB) { nps.tb.owner = threadIdx.x; } // FP: "38 -> 39; __syncthreads(); // FP: "39 -> 40; if (nps.tb.owner == MAX_TB_SIZE + 1) { // FP: "40 -> 41; __syncthreads(); // FP: "41 -> 42; break; } // FP: "43 -> 44; if (nps.tb.owner == threadIdx.x) { nps.tb.start = _np.start; nps.tb.size = _np.size; nps.tb.src = threadIdx.x; _np.start = 0; _np.size = 0; } // FP: "46 -> 47; __syncthreads(); // FP: "47 -> 48; int ns = nps.tb.start; int ne = nps.tb.size; // FP: "48 -> 49; if (nps.tb.src == threadIdx.x) { nps.tb.owner = MAX_TB_SIZE + 1; } // FP: "51 -> 52; assert(nps.tb.src < __kernel_tb_size); delta = _np_closure[nps.tb.src].delta; // FP: "52 -> 53; for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE) { index_type nbr; nbr = ns +_np_j; { index_type dst; float dst_residual_old; dst = graph.getAbsDestination(nbr); dst_residual_old = atomicAdd(&p_residual[dst], delta); is_updated->set(dst); } } // FP: "60 -> 61; __syncthreads(); } // FP: "62 -> 63; // FP: "63 -> 64; { const int warpid = threadIdx.x / 32; // FP: "64 -> 65; const int _np_laneid = cub::LaneId(); // FP: "65 -> 66; while (__any(_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)) { if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB) { nps.warp.owner[warpid] = _np_laneid; } if (nps.warp.owner[warpid] == _np_laneid) { nps.warp.start[warpid] = _np.start; nps.warp.size[warpid] = _np.size; nps.warp.src[warpid] = threadIdx.x; _np.start = 0; _np.size = 0; } index_type _np_w_start = nps.warp.start[warpid]; index_type _np_w_size = nps.warp.size[warpid]; assert(nps.warp.src[warpid] < __kernel_tb_size); delta = _np_closure[nps.warp.src[warpid]].delta; for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32) { index_type nbr; nbr = _np_w_start +_np_ii; { index_type dst; float dst_residual_old; dst = graph.getAbsDestination(nbr); dst_residual_old = atomicAdd(&p_residual[dst], delta); is_updated->set(dst); } } } // FP: "83 -> 84; __syncthreads(); // FP: "84 -> 85; } // FP: "85 -> 86; __syncthreads(); // FP: "86 -> 87; _np.total = _np_mps_total.el[1]; _np.offset = _np_mps.el[1]; // FP: "87 -> 88; while (_np.work()) { // FP: "88 -> 89; int _np_i =0; // FP: "89 -> 90; _np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x); // FP: "90 -> 91; __syncthreads(); // FP: "91 -> 92; // FP: "92 -> 93; for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE) { index_type nbr; assert(nps.fg.src[_np_i] < __kernel_tb_size); delta = _np_closure[nps.fg.src[_np_i]].delta; nbr= nps.fg.itvalue[_np_i]; { index_type dst; float dst_residual_old; dst = graph.getAbsDestination(nbr); dst_residual_old = atomicAdd(&p_residual[dst], delta); is_updated->set(dst); } } // FP: "101 -> 102; _np.execute_round_done(ITSIZE); // FP: "102 -> 103; __syncthreads(); } // FP: "104 -> 105; assert(threadIdx.x < __kernel_tb_size); delta = _np_closure[threadIdx.x].delta; // FP: "105 -> 106; // FP: "106 -> 107; } ret_val.thread_exit<_br>(_ts); } void ResetGraph_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context * ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; ResetGraph <<<blocks, threads>>>(ctx->gg, ctx->numNodesWithEdges, __begin, __end, ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), ctx->delta.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr()); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void ResetGraph_all_cuda(struct CUDA_Context * ctx) { // FP: "1 -> 2; ResetGraph_cuda(0, ctx->numNodesWithEdges, ctx); // FP: "2 -> 3; } void InitializeGraph_cuda(unsigned int __begin, unsigned int __end, const float & local_alpha, struct CUDA_Context * ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; InitializeGraph <<<blocks, __tb_InitializeGraph>>>(ctx->gg, ctx->nout.is_updated.gpu_rd_ptr(), ctx->numNodesWithEdges, __begin, __end, local_alpha, ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), ctx->delta.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr()); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void InitializeGraph_all_cuda(const float & local_alpha, struct CUDA_Context * ctx) { // FP: "1 -> 2; InitializeGraph_cuda(0, ctx->numNodesWithEdges, local_alpha, ctx); // FP: "2 -> 3; } void PageRank_delta_cuda(unsigned int __begin, unsigned int __end, const float & local_alpha, float local_tolerance, struct CUDA_Context * ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; PageRank_delta <<<blocks, __tb_PageRank>>>(ctx->gg, ctx->numNodesWithEdges, __begin, __end, local_alpha, local_tolerance, ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), ctx->delta.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr()); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void PageRank_delta_all_cuda(const float & local_alpha, float local_tolerance, struct CUDA_Context * ctx) { // FP: "1 -> 2; PageRank_delta_cuda(0, ctx->numNodesWithEdges, local_alpha, local_tolerance, ctx); // FP: "2 -> 3; } void PageRank_cuda(unsigned int __begin, unsigned int __end, int & __retval, struct CUDA_Context * ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; Shared<int> retval = Shared<int>(1); HGAccumulator<int> _rv; *(retval.cpu_wr_ptr()) = 0; _rv.rv = retval.gpu_wr_ptr(); PageRank <<<blocks, __tb_PageRank>>>(ctx->gg, ctx->residual.is_updated.gpu_rd_ptr(), ctx->numNodesWithEdges, __begin, __end, ctx->residual.data.gpu_wr_ptr(), ctx->delta.data.gpu_wr_ptr(), _rv); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; __retval = *(retval.cpu_rd_ptr()); // FP: "7 -> 8; } void PageRank_all_cuda(int & __retval, struct CUDA_Context * ctx) { // FP: "1 -> 2; PageRank_cuda(0, ctx->numNodesWithEdges, __retval, ctx); // FP: "2 -> 3; }
98581f25b1ee0a54e79a76726321b1e872e33c0c.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <test_utils.h> #include <cuml/ensemble/randomforest.hpp> #include <raft/cuda_utils.cuh> namespace ML { using namespace MLCommon; template <typename T> // template useless for now. struct RfInputs { int n_rows; int n_cols; int n_trees; float max_features; float max_samples; int n_inference_rows; int max_depth; int max_leaves; bool bootstrap; bool bootstrap_features; int n_bins; int split_algo; int min_samples_leaf; int min_samples_split; float min_impurity_decrease; int n_streams; CRITERION split_criterion; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const RfInputs<T>& dims) { return os; } template <typename T> class RfClassifierTest : public ::testing::TestWithParam<RfInputs<T>> { protected: void basicTest() { params = ::testing::TestWithParam<RfInputs<T>>::GetParam(); DecisionTree::DecisionTreeParams tree_params; set_tree_params(tree_params, params.max_depth, params.max_leaves, params.max_features, params.n_bins, params.split_algo, params.min_samples_leaf, params.min_samples_split, params.min_impurity_decrease, params.bootstrap_features, params.split_criterion, false); RF_params rf_params; set_all_rf_params(rf_params, params.n_trees, params.bootstrap, params.max_samples, 0, params.n_streams, tree_params); //print(rf_params); //-------------------------------------------------------- // Random Forest //-------------------------------------------------------- int data_len = params.n_rows * params.n_cols; raft::allocate(data, data_len); raft::allocate(labels, params.n_rows); raft::allocate(predicted_labels, params.n_inference_rows); hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); // Populate data (assume Col major) std::vector<T> data_h = {30.0, 1.0, 2.0, 0.0, 10.0, 20.0, 10.0, 40.0}; data_h.resize(data_len); raft::update_device(data, data_h.data(), data_len, stream); // Populate labels labels_h = {0, 1, 0, 4}; labels_h.resize(params.n_rows); preprocess_labels(params.n_rows, labels_h, labels_map); raft::update_device(labels, labels_h.data(), params.n_rows, stream); forest = new typename ML::RandomForestMetaData<T, int>; null_trees_ptr(forest); raft::handle_t handle(rf_params.n_streams); handle.set_stream(stream); fit(handle, forest, data, params.n_rows, params.n_cols, labels, labels_map.size(), rf_params); CUDA_CHECK(hipStreamSynchronize(stream)); //print_rf_detailed(forest); // Inference data: same as train, but row major int inference_data_len = params.n_inference_rows * params.n_cols; inference_data_h = {30.0, 10.0, 1.0, 20.0, 2.0, 10.0, 0.0, 40.0}; inference_data_h.resize(inference_data_len); raft::allocate(inference_data_d, inference_data_len); raft::update_device(inference_data_d, inference_data_h.data(), inference_data_len, stream); predict(handle, forest, inference_data_d, params.n_inference_rows, params.n_cols, predicted_labels); // Predict and compare against known labels RF_metrics tmp = score(handle, forest, labels, params.n_inference_rows, predicted_labels); CUDA_CHECK(hipStreamSynchronize(stream)); CUDA_CHECK(hipStreamDestroy(stream)); accuracy = tmp.accuracy; } void SetUp() override { basicTest(); } void TearDown() override { accuracy = -1.0f; // reset accuracy postprocess_labels(params.n_rows, labels_h, labels_map); inference_data_h.clear(); labels_h.clear(); labels_map.clear(); CUDA_CHECK(hipFree(labels)); CUDA_CHECK(hipFree(predicted_labels)); CUDA_CHECK(hipFree(data)); CUDA_CHECK(hipFree(inference_data_d)); delete forest; } protected: RfInputs<T> params; T *data, *inference_data_d; int* labels; std::vector<T> inference_data_h; std::vector<int> labels_h; std::map<int, int> labels_map; //unique map of labels to int vals starting from 0 RandomForestMetaData<T, int>* forest; float accuracy = -1.0f; // overriden in each test SetUp and TearDown int* predicted_labels; }; //------------------------------------------------------------------------------------------------------------------------------------- template <typename T> class RfRegressorTest : public ::testing::TestWithParam<RfInputs<T>> { protected: void basicTest() { params = ::testing::TestWithParam<RfInputs<T>>::GetParam(); DecisionTree::DecisionTreeParams tree_params; set_tree_params(tree_params, params.max_depth, params.max_leaves, params.max_features, params.n_bins, params.split_algo, params.min_samples_leaf, params.min_samples_split, params.min_impurity_decrease, params.bootstrap_features, params.split_criterion, false); RF_params rf_params; set_all_rf_params(rf_params, params.n_trees, params.bootstrap, params.max_samples, 0, params.n_streams, tree_params); //print(rf_params); //-------------------------------------------------------- // Random Forest //-------------------------------------------------------- int data_len = params.n_rows * params.n_cols; raft::allocate(data, data_len); raft::allocate(labels, params.n_rows); raft::allocate(predicted_labels, params.n_inference_rows); hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); // Populate data (assume Col major) std::vector<T> data_h = {0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 30.0, 40.0}; data_h.resize(data_len); raft::update_device(data, data_h.data(), data_len, stream); // Populate labels labels_h = {1.0, 2.0, 3.0, 4.0}; labels_h.resize(params.n_rows); raft::update_device(labels, labels_h.data(), params.n_rows, stream); forest = new typename ML::RandomForestMetaData<T, T>; null_trees_ptr(forest); raft::handle_t handle(rf_params.n_streams); handle.set_stream(stream); fit(handle, forest, data, params.n_rows, params.n_cols, labels, rf_params); CUDA_CHECK(hipStreamSynchronize(stream)); // Inference data: same as train, but row major int inference_data_len = params.n_inference_rows * params.n_cols; inference_data_h = {0.0, 10.0, 0.0, 20.0, 0.0, 30.0, 0.0, 40.0}; inference_data_h.resize(inference_data_len); raft::allocate(inference_data_d, inference_data_len); raft::update_device(inference_data_d, inference_data_h.data(), inference_data_len, stream); predict(handle, forest, inference_data_d, params.n_inference_rows, params.n_cols, predicted_labels); // Predict and compare against known labels RF_metrics tmp = score(handle, forest, labels, params.n_inference_rows, predicted_labels); CUDA_CHECK(hipStreamSynchronize(stream)); CUDA_CHECK(hipStreamDestroy(stream)); mse = tmp.mean_squared_error; } void SetUp() override { basicTest(); } void TearDown() override { mse = -1.0f; // reset mse inference_data_h.clear(); labels_h.clear(); CUDA_CHECK(hipFree(labels)); CUDA_CHECK(hipFree(predicted_labels)); CUDA_CHECK(hipFree(data)); CUDA_CHECK(hipFree(inference_data_d)); delete forest; } protected: RfInputs<T> params; T *data, *inference_data_d; T* labels; std::vector<T> inference_data_h; std::vector<T> labels_h; RandomForestMetaData<T, T>* forest; float mse = -1.0f; // overriden in each test SetUp and TearDown T* predicted_labels; }; //------------------------------------------------------------------------------------------------------------------------------------- const std::vector<RfInputs<float>> inputsf2_clf = { {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::GINI}, // single tree forest, bootstrap false, depth 8, 4 bins {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::GINI}, // single tree forest, bootstrap false, depth of 8, 4 bins {4, 2, 10, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION:: GINI}, //forest with 10 trees, all trees should produce identical predictions (no bootstrapping or column subsampling) {4, 2, 10, 0.8f, 0.8f, 4, 7, -1, true, false, 3, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION:: GINI}, //forest with 10 trees, with bootstrap and column subsampling enabled, 3 bins {4, 2, 10, 0.8f, 0.8f, 4, 7, -1, true, false, 3, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION:: CRITERION_END}, //forest with 10 trees, with bootstrap and column subsampling enabled, 3 bins, different split algorithm {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 10, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 10, 0.8f, 0.8f, 4, 7, -1, true, false, 3, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 10, 0.8f, 0.8f, 4, 7, -1, true, false, 3, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::ENTROPY}, {50, 10, 10, 0.8f, 0.8f, 10, 7, -1, true, true, 3, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::ENTROPY}}; const std::vector<RfInputs<double>> inputsd2_clf = { // Same as inputsf2_clf {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::GINI}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::GINI}, {4, 2, 10, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::GINI}, {4, 2, 10, 0.8f, 0.8f, 4, 7, -1, true, false, 3, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::GINI}, {4, 2, 10, 0.8f, 0.8f, 4, 7, -1, true, false, 3, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::CRITERION_END}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 10, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 10, 0.8f, 0.8f, 4, 7, -1, true, false, 3, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 10, 0.8f, 0.8f, 4, 7, -1, true, false, 3, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::ENTROPY}, {50, 10, 10, 0.8f, 0.8f, 10, 7, -1, true, true, 3, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::ENTROPY}}; typedef RfClassifierTest<float> RfClassifierTestF; TEST_P(RfClassifierTestF, Fit) { //print_rf_detailed(forest); // Prints all trees in the forest. Leaf nodes use the remapped values from labels_map. if (!params.bootstrap && (params.max_features == 1.0f)) { ASSERT_TRUE(accuracy == 1.0f); } else { ASSERT_TRUE(accuracy >= 0.75f); // Empirically derived accuracy range } } typedef RfClassifierTest<double> RfClassifierTestD; TEST_P(RfClassifierTestD, Fit) { if (!params.bootstrap && (params.max_features == 1.0f)) { ASSERT_TRUE(accuracy == 1.0f); } else { ASSERT_TRUE(accuracy >= 0.75f); } } INSTANTIATE_TEST_CASE_P(RfClassifierTests, RfClassifierTestF, ::testing::ValuesIn(inputsf2_clf)); INSTANTIATE_TEST_CASE_P(RfClassifierTests, RfClassifierTestD, ::testing::ValuesIn(inputsd2_clf)); typedef RfRegressorTest<float> RfRegressorTestF; TEST_P(RfRegressorTestF, Fit) { //print_rf_detailed(forest); // Prints all trees in the forest. if (!params.bootstrap && (params.max_features == 1.0f)) { ASSERT_TRUE(mse == 0.0f); } else { ASSERT_TRUE(mse <= 0.2f); } } typedef RfRegressorTest<double> RfRegressorTestD; TEST_P(RfRegressorTestD, Fit) { if (!params.bootstrap && (params.max_features == 1.0f)) { ASSERT_TRUE(mse == 0.0f); } else { ASSERT_TRUE(mse <= 0.2f); } } const std::vector<RfInputs<float>> inputsf2_reg = { {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::MSE}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::MSE}, {4, 2, 5, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION:: CRITERION_END}, // CRITERION_END uses the default criterion (GINI for classification, MSE for regression) {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::MAE}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::MAE}, {4, 2, 5, 1.0f, 1.0f, 4, 7, -1, true, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::CRITERION_END}}; const std::vector<RfInputs<double>> inputsd2_reg = { // Same as inputsf2_reg {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::MSE}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::MSE}, {4, 2, 5, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::CRITERION_END}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::MAE}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::MAE}, {4, 2, 5, 1.0f, 1.0f, 4, 7, -1, true, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::CRITERION_END}}; INSTANTIATE_TEST_CASE_P(RfRegressorTests, RfRegressorTestF, ::testing::ValuesIn(inputsf2_reg)); INSTANTIATE_TEST_CASE_P(RfRegressorTests, RfRegressorTestD, ::testing::ValuesIn(inputsd2_reg)); } // end namespace ML
98581f25b1ee0a54e79a76726321b1e872e33c0c.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <test_utils.h> #include <cuml/ensemble/randomforest.hpp> #include <raft/cuda_utils.cuh> namespace ML { using namespace MLCommon; template <typename T> // template useless for now. struct RfInputs { int n_rows; int n_cols; int n_trees; float max_features; float max_samples; int n_inference_rows; int max_depth; int max_leaves; bool bootstrap; bool bootstrap_features; int n_bins; int split_algo; int min_samples_leaf; int min_samples_split; float min_impurity_decrease; int n_streams; CRITERION split_criterion; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const RfInputs<T>& dims) { return os; } template <typename T> class RfClassifierTest : public ::testing::TestWithParam<RfInputs<T>> { protected: void basicTest() { params = ::testing::TestWithParam<RfInputs<T>>::GetParam(); DecisionTree::DecisionTreeParams tree_params; set_tree_params(tree_params, params.max_depth, params.max_leaves, params.max_features, params.n_bins, params.split_algo, params.min_samples_leaf, params.min_samples_split, params.min_impurity_decrease, params.bootstrap_features, params.split_criterion, false); RF_params rf_params; set_all_rf_params(rf_params, params.n_trees, params.bootstrap, params.max_samples, 0, params.n_streams, tree_params); //print(rf_params); //-------------------------------------------------------- // Random Forest //-------------------------------------------------------- int data_len = params.n_rows * params.n_cols; raft::allocate(data, data_len); raft::allocate(labels, params.n_rows); raft::allocate(predicted_labels, params.n_inference_rows); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); // Populate data (assume Col major) std::vector<T> data_h = {30.0, 1.0, 2.0, 0.0, 10.0, 20.0, 10.0, 40.0}; data_h.resize(data_len); raft::update_device(data, data_h.data(), data_len, stream); // Populate labels labels_h = {0, 1, 0, 4}; labels_h.resize(params.n_rows); preprocess_labels(params.n_rows, labels_h, labels_map); raft::update_device(labels, labels_h.data(), params.n_rows, stream); forest = new typename ML::RandomForestMetaData<T, int>; null_trees_ptr(forest); raft::handle_t handle(rf_params.n_streams); handle.set_stream(stream); fit(handle, forest, data, params.n_rows, params.n_cols, labels, labels_map.size(), rf_params); CUDA_CHECK(cudaStreamSynchronize(stream)); //print_rf_detailed(forest); // Inference data: same as train, but row major int inference_data_len = params.n_inference_rows * params.n_cols; inference_data_h = {30.0, 10.0, 1.0, 20.0, 2.0, 10.0, 0.0, 40.0}; inference_data_h.resize(inference_data_len); raft::allocate(inference_data_d, inference_data_len); raft::update_device(inference_data_d, inference_data_h.data(), inference_data_len, stream); predict(handle, forest, inference_data_d, params.n_inference_rows, params.n_cols, predicted_labels); // Predict and compare against known labels RF_metrics tmp = score(handle, forest, labels, params.n_inference_rows, predicted_labels); CUDA_CHECK(cudaStreamSynchronize(stream)); CUDA_CHECK(cudaStreamDestroy(stream)); accuracy = tmp.accuracy; } void SetUp() override { basicTest(); } void TearDown() override { accuracy = -1.0f; // reset accuracy postprocess_labels(params.n_rows, labels_h, labels_map); inference_data_h.clear(); labels_h.clear(); labels_map.clear(); CUDA_CHECK(cudaFree(labels)); CUDA_CHECK(cudaFree(predicted_labels)); CUDA_CHECK(cudaFree(data)); CUDA_CHECK(cudaFree(inference_data_d)); delete forest; } protected: RfInputs<T> params; T *data, *inference_data_d; int* labels; std::vector<T> inference_data_h; std::vector<int> labels_h; std::map<int, int> labels_map; //unique map of labels to int vals starting from 0 RandomForestMetaData<T, int>* forest; float accuracy = -1.0f; // overriden in each test SetUp and TearDown int* predicted_labels; }; //------------------------------------------------------------------------------------------------------------------------------------- template <typename T> class RfRegressorTest : public ::testing::TestWithParam<RfInputs<T>> { protected: void basicTest() { params = ::testing::TestWithParam<RfInputs<T>>::GetParam(); DecisionTree::DecisionTreeParams tree_params; set_tree_params(tree_params, params.max_depth, params.max_leaves, params.max_features, params.n_bins, params.split_algo, params.min_samples_leaf, params.min_samples_split, params.min_impurity_decrease, params.bootstrap_features, params.split_criterion, false); RF_params rf_params; set_all_rf_params(rf_params, params.n_trees, params.bootstrap, params.max_samples, 0, params.n_streams, tree_params); //print(rf_params); //-------------------------------------------------------- // Random Forest //-------------------------------------------------------- int data_len = params.n_rows * params.n_cols; raft::allocate(data, data_len); raft::allocate(labels, params.n_rows); raft::allocate(predicted_labels, params.n_inference_rows); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); // Populate data (assume Col major) std::vector<T> data_h = {0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 30.0, 40.0}; data_h.resize(data_len); raft::update_device(data, data_h.data(), data_len, stream); // Populate labels labels_h = {1.0, 2.0, 3.0, 4.0}; labels_h.resize(params.n_rows); raft::update_device(labels, labels_h.data(), params.n_rows, stream); forest = new typename ML::RandomForestMetaData<T, T>; null_trees_ptr(forest); raft::handle_t handle(rf_params.n_streams); handle.set_stream(stream); fit(handle, forest, data, params.n_rows, params.n_cols, labels, rf_params); CUDA_CHECK(cudaStreamSynchronize(stream)); // Inference data: same as train, but row major int inference_data_len = params.n_inference_rows * params.n_cols; inference_data_h = {0.0, 10.0, 0.0, 20.0, 0.0, 30.0, 0.0, 40.0}; inference_data_h.resize(inference_data_len); raft::allocate(inference_data_d, inference_data_len); raft::update_device(inference_data_d, inference_data_h.data(), inference_data_len, stream); predict(handle, forest, inference_data_d, params.n_inference_rows, params.n_cols, predicted_labels); // Predict and compare against known labels RF_metrics tmp = score(handle, forest, labels, params.n_inference_rows, predicted_labels); CUDA_CHECK(cudaStreamSynchronize(stream)); CUDA_CHECK(cudaStreamDestroy(stream)); mse = tmp.mean_squared_error; } void SetUp() override { basicTest(); } void TearDown() override { mse = -1.0f; // reset mse inference_data_h.clear(); labels_h.clear(); CUDA_CHECK(cudaFree(labels)); CUDA_CHECK(cudaFree(predicted_labels)); CUDA_CHECK(cudaFree(data)); CUDA_CHECK(cudaFree(inference_data_d)); delete forest; } protected: RfInputs<T> params; T *data, *inference_data_d; T* labels; std::vector<T> inference_data_h; std::vector<T> labels_h; RandomForestMetaData<T, T>* forest; float mse = -1.0f; // overriden in each test SetUp and TearDown T* predicted_labels; }; //------------------------------------------------------------------------------------------------------------------------------------- const std::vector<RfInputs<float>> inputsf2_clf = { {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::GINI}, // single tree forest, bootstrap false, depth 8, 4 bins {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::GINI}, // single tree forest, bootstrap false, depth of 8, 4 bins {4, 2, 10, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION:: GINI}, //forest with 10 trees, all trees should produce identical predictions (no bootstrapping or column subsampling) {4, 2, 10, 0.8f, 0.8f, 4, 7, -1, true, false, 3, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION:: GINI}, //forest with 10 trees, with bootstrap and column subsampling enabled, 3 bins {4, 2, 10, 0.8f, 0.8f, 4, 7, -1, true, false, 3, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION:: CRITERION_END}, //forest with 10 trees, with bootstrap and column subsampling enabled, 3 bins, different split algorithm {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 10, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 10, 0.8f, 0.8f, 4, 7, -1, true, false, 3, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 10, 0.8f, 0.8f, 4, 7, -1, true, false, 3, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::ENTROPY}, {50, 10, 10, 0.8f, 0.8f, 10, 7, -1, true, true, 3, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::ENTROPY}}; const std::vector<RfInputs<double>> inputsd2_clf = { // Same as inputsf2_clf {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::GINI}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::GINI}, {4, 2, 10, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::GINI}, {4, 2, 10, 0.8f, 0.8f, 4, 7, -1, true, false, 3, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::GINI}, {4, 2, 10, 0.8f, 0.8f, 4, 7, -1, true, false, 3, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::CRITERION_END}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 10, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 10, 0.8f, 0.8f, 4, 7, -1, true, false, 3, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 10, 0.8f, 0.8f, 4, 7, -1, true, false, 3, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::ENTROPY}, {50, 10, 10, 0.8f, 0.8f, 10, 7, -1, true, true, 3, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::ENTROPY}}; typedef RfClassifierTest<float> RfClassifierTestF; TEST_P(RfClassifierTestF, Fit) { //print_rf_detailed(forest); // Prints all trees in the forest. Leaf nodes use the remapped values from labels_map. if (!params.bootstrap && (params.max_features == 1.0f)) { ASSERT_TRUE(accuracy == 1.0f); } else { ASSERT_TRUE(accuracy >= 0.75f); // Empirically derived accuracy range } } typedef RfClassifierTest<double> RfClassifierTestD; TEST_P(RfClassifierTestD, Fit) { if (!params.bootstrap && (params.max_features == 1.0f)) { ASSERT_TRUE(accuracy == 1.0f); } else { ASSERT_TRUE(accuracy >= 0.75f); } } INSTANTIATE_TEST_CASE_P(RfClassifierTests, RfClassifierTestF, ::testing::ValuesIn(inputsf2_clf)); INSTANTIATE_TEST_CASE_P(RfClassifierTests, RfClassifierTestD, ::testing::ValuesIn(inputsd2_clf)); typedef RfRegressorTest<float> RfRegressorTestF; TEST_P(RfRegressorTestF, Fit) { //print_rf_detailed(forest); // Prints all trees in the forest. if (!params.bootstrap && (params.max_features == 1.0f)) { ASSERT_TRUE(mse == 0.0f); } else { ASSERT_TRUE(mse <= 0.2f); } } typedef RfRegressorTest<double> RfRegressorTestD; TEST_P(RfRegressorTestD, Fit) { if (!params.bootstrap && (params.max_features == 1.0f)) { ASSERT_TRUE(mse == 0.0f); } else { ASSERT_TRUE(mse <= 0.2f); } } const std::vector<RfInputs<float>> inputsf2_reg = { {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::MSE}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::MSE}, {4, 2, 5, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION:: CRITERION_END}, // CRITERION_END uses the default criterion (GINI for classification, MSE for regression) {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::MAE}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::MAE}, {4, 2, 5, 1.0f, 1.0f, 4, 7, -1, true, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::CRITERION_END}}; const std::vector<RfInputs<double>> inputsd2_reg = { // Same as inputsf2_reg {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::MSE}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::MSE}, {4, 2, 5, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::CRITERION_END}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::MAE}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::MAE}, {4, 2, 5, 1.0f, 1.0f, 4, 7, -1, true, false, 4, SPLIT_ALGO::HIST, 2, 2, 0.0, 2, CRITERION::CRITERION_END}}; INSTANTIATE_TEST_CASE_P(RfRegressorTests, RfRegressorTestF, ::testing::ValuesIn(inputsf2_reg)); INSTANTIATE_TEST_CASE_P(RfRegressorTests, RfRegressorTestD, ::testing::ValuesIn(inputsd2_reg)); } // end namespace ML
dc3380789ed9a8b8558592e37bb5d7f964d85540.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ---------------------------------------------------------------------------------------------------- // Copyrighted by Marko Rakita. // Author: Marko Rakita // File contains: Neural network softmax layer. // Created: 02/20/2016. // ---------------------------------------------------------------------------------------------------- #include "include/softmaxlayer.cuh" SoftMaxLayer::SoftMaxLayer(ParallelismMode parallelismMode, hipStream_t deviceCalculationStream, hipStream_t deviceMemoryStream, uint inputDataSize, uint inputDataCount, bool holdsInputData) { m_layerType = LayerType::SoftMax; m_parallelismMode = parallelismMode; m_deviceCalculationStream = deviceCalculationStream; m_deviceMemoryStream = deviceMemoryStream; m_indexInTier = 0; m_tierSize = 1; m_inputNumChannels = m_activationNumChannels = 1; m_inputDataWidth = m_activationDataWidth = inputDataSize; m_inputDataHeight = m_activationDataHeight = 1; m_inputDataSize = m_activationDataSize = inputDataSize; m_inputDataCount = inputDataCount; m_holdsInputData = holdsInputData; // Allocating input data buffer. m_inputBufferSize = m_inputDataSize * m_inputDataCount * sizeof(float); if (m_holdsInputData) { CudaAssert(hipMalloc<float>(&m_inputDataBuffer, m_inputBufferSize)); } // Allocating input gradients buffer. CudaAssert(hipMalloc<float>(&m_inputGradientsBuffer, m_inputBufferSize)); // Allocating input activations maximums buffer. CudaAssert(hipMalloc<float>(&m_inputActivationsMaxBuffer, m_inputDataCount * sizeof(float))); // Allocating input activations maximums buffer. CudaAssert(hipMalloc<float>(&m_exponentialsSumBuffer, m_inputDataCount * sizeof(float))); // Allocating activation data buffers. m_activationBufferSize = m_inputBufferSize; CudaAssert(hipMalloc<float>(&m_activationDataBuffer, m_activationBufferSize)); m_holdsActivationGradients = false; } SoftMaxLayer::~SoftMaxLayer() { CudaAssert(hipFree(m_inputActivationsMaxBuffer)); CudaAssert(hipFree(m_exponentialsSumBuffer)); } void SoftMaxLayer::LoadInputs() { CommonLoadInputs(); } /* Finds maximum values of input activations for each input sample. */ __global__ void FindMaximums(float* inputActivations, const uint numInputSamples, const uint numInputActivations, float* inputActivationsMaximums) { const uint c_sampleIndex = blockIdx.x * blockDim.x + threadIdx.x; if (c_sampleIndex < numInputSamples) { float activationMaximum = inputActivations[c_sampleIndex]; for (uint activationIndex = 1; activationIndex < numInputActivations; ++activationIndex) { activationMaximum = max(activationMaximum, inputActivations[activationIndex * numInputSamples + c_sampleIndex]); } inputActivationsMaximums[c_sampleIndex] = activationMaximum; } } /* Subtracts maximum values of input activations from all input activations for each input sample. */ template <uint c_blockWidth> __global__ void SubtractMaximums(float* inputActivations, const uint numInputSamples, const uint numInputActivations, float* inputActivationsMaximums, float* outputActivations) { __shared__ float maximums[c_blockWidth]; for (uint y = blockIdx.y * blockDim.y + threadIdx.y; y < numInputActivations; y += gridDim.y * blockDim.y) { __syncthreads(); if (threadIdx.y == 0) { maximums[threadIdx.x] = inputActivationsMaximums[blockIdx.x * blockDim.x + threadIdx.x]; } __syncthreads(); const uint c_offset = y * numInputSamples; for (uint x = blockIdx.x * blockDim.x + threadIdx.x; x < numInputSamples; x += gridDim.x * blockDim.x) { outputActivations[c_offset + x] = inputActivations[c_offset + x] - maximums[threadIdx.x]; } } } void SoftMaxLayer::StabilizeInputs() { // Finding maximums of input activations. const uint c_numThreadsPerBlock = min((uint)Config::MAX_NUM_THREADS, RoundUp(m_inputDataCount, Config::WARP_SIZE)); const uint c_numBlocks = DivideUp(m_inputDataCount, c_numThreadsPerBlock); LAUNCH_KERNEL_ASYNC(FindMaximums, dim3(c_numBlocks), dim3(c_numThreadsPerBlock), m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataCount, m_activationDataSize, m_inputActivationsMaxBuffer); CudaAssert(hipGetLastError()); // Substracting maximums of input activations from all the input activations. const uint c_blockWidth = 64; const uint c_blockHeight = (uint)Config::MAX_NUM_THREADS / c_blockWidth; dim3 blockDimensions(c_blockWidth, c_blockHeight); const uint c_maxGridBlocks = 128; const uint c_gridWidth = min(c_maxGridBlocks, DivideUp(m_inputDataCount, c_blockWidth)); const uint c_gridHeight = min(c_maxGridBlocks / c_gridWidth, DivideUp(m_activationDataSize, c_blockHeight)); dim3 gridDimensions(c_gridWidth, c_gridHeight); LAUNCH_KERNEL_ASYNC((SubtractMaximums<c_blockWidth>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataCount, m_activationDataSize, m_inputActivationsMaxBuffer, m_activationDataBuffer); CudaAssert(hipGetLastError()); } /* Computes the exponentials of activations. */ __global__ void ComputeExponentials(float* activations, const uint activationsLength) { for (uint activationIndex = blockIdx.x * blockDim.x + threadIdx.x; activationIndex < activationsLength; activationIndex += gridDim.x * blockDim.x) { activations[activationIndex] = __expf(activations[activationIndex]); } } /* Computes sum of the exponentials of activations. */ __global__ void ComputeSumOfExponentials(float* activations, const uint numInputSamples, const uint numActivations, float* exponentialsSumBuffer) { const uint c_sampleIndex = blockIdx.x * blockDim.x + threadIdx.x; if (c_sampleIndex < numInputSamples) { float exponentialsSum = 0.f; for (uint activationIndex = 0; activationIndex < numActivations; ++activationIndex) { exponentialsSum += activations[activationIndex * numInputSamples + c_sampleIndex]; } exponentialsSumBuffer[c_sampleIndex] = exponentialsSum; } } /* Divides activation exponentials with their sum to get soft maximums. */ template <uint c_blockWidth> __global__ void DivideExponentialsWithSum(float* activationExponentials, const uint numInputSamples, const uint numActivations, float* exponentialsSumBuffer) { __shared__ float exponentialsSums[c_blockWidth]; for (uint y = blockIdx.y * blockDim.y + threadIdx.y; y < numActivations; y += gridDim.y * blockDim.y) { __syncthreads(); if (threadIdx.y == 0) { exponentialsSums[threadIdx.x] = exponentialsSumBuffer[blockIdx.x * blockDim.x + threadIdx.x]; } __syncthreads(); const uint c_offset = y * numInputSamples; for (uint x = blockIdx.x * blockDim.x + threadIdx.x; x < numInputSamples; x += gridDim.x * blockDim.x) { activationExponentials[c_offset + x] = __fdividef(activationExponentials[c_offset + x], exponentialsSums[threadIdx.x]); } } } void SoftMaxLayer::CalculateSoftMaximums() { // Computing the exponentials. const uint c_activationBufferLength = (uint)(m_activationBufferSize / sizeof(float)); uint numBlocks = 128; uint numThreadsPerBlock = 128; dim3 blockDimensions(numThreadsPerBlock); dim3 gridDimensions(min(numBlocks, DivideUp(c_activationBufferLength, numThreadsPerBlock))); LAUNCH_KERNEL_ASYNC(ComputeExponentials, gridDimensions, blockDimensions, m_deviceCalculationStream)(m_activationDataBuffer, c_activationBufferLength); CudaAssert(hipGetLastError()); // Computing sum of the exponentials. numThreadsPerBlock = min((uint)Config::MAX_NUM_THREADS, RoundUp(m_inputDataCount, Config::WARP_SIZE)); numBlocks = DivideUp(m_inputDataCount, numThreadsPerBlock); LAUNCH_KERNEL_ASYNC(ComputeSumOfExponentials, dim3(numBlocks), dim3(numThreadsPerBlock), m_deviceCalculationStream)(m_activationDataBuffer, m_inputDataCount, m_activationDataSize, m_exponentialsSumBuffer); CudaAssert(hipGetLastError()); // Dividing exponentials with their sum to get soft maximums. const uint c_blockWidth = 64; const uint c_blockHeight = (uint)Config::MAX_NUM_THREADS / c_blockWidth; blockDimensions = dim3(c_blockWidth, c_blockHeight); const uint c_maxGridBlocks = 128; const uint c_gridWidth = min(c_maxGridBlocks, DivideUp(m_inputDataCount, c_blockWidth)); const uint c_gridHeight = min(c_maxGridBlocks / c_gridWidth, DivideUp(m_activationDataSize, c_blockHeight)); gridDimensions = dim3(c_gridWidth, c_gridHeight); LAUNCH_KERNEL_ASYNC((DivideExponentialsWithSum<c_blockWidth>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_activationDataBuffer, m_inputDataCount, m_activationDataSize, m_exponentialsSumBuffer); CudaAssert(hipGetLastError()); } void SoftMaxLayer::DoForwardProp(PropagationMode propagationMode) { StabilizeInputs(); CalculateSoftMaximums(); } /* Calculates input gradients in case of logistic regression output layer. */ __global__ void CalculateLogisticRegressionInputGradients(float* activations, uint* dataLabels, const uint dataCount, const uint numActivations, float* inputGradients) { const uint c_dataIndex = blockIdx.x * blockDim.x + threadIdx.x; const uint c_activationIndex = blockIdx.y * blockDim.y + threadIdx.y; const uint c_activationsOffset = c_activationIndex * dataCount + c_dataIndex; if (c_dataIndex < dataCount && c_activationIndex < numActivations) { inputGradients[c_activationsOffset] = (dataLabels[c_dataIndex] == c_activationIndex ? 1.f : 0.f) - activations[c_activationsOffset]; } } void SoftMaxLayer::LogisticRegressionBackwardProp(uint* dataLabels) { const uint c_blockWidth = 32; const uint c_blockHeight = 4; dim3 blockDimensions(c_blockWidth, c_blockHeight); const uint c_gridWidth = DivideUp(m_inputDataCount, c_blockWidth); const uint c_gridHeight = DivideUp(m_activationDataSize, c_blockHeight); dim3 gridDimensions(c_gridWidth, c_gridHeight); LAUNCH_KERNEL_ASYNC(CalculateLogisticRegressionInputGradients, gridDimensions, blockDimensions, m_deviceCalculationStream)(m_activationDataBuffer, dataLabels, m_inputDataCount, m_activationDataSize, m_inputGradientsBuffer); CudaAssert(hipGetLastError()); } void SoftMaxLayer::DoBackwardProp() { if (m_nextLayers[0]->GetLayerType() == LayerType::Output) { OutputLayer* outputLayer = static_cast<OutputLayer*>(m_nextLayers[0]); if (outputLayer->GetLossFunctionType() == LossFunctionType::LogisticRegression) { LogisticRegressionBackwardProp(outputLayer->GetDataLabels()); } else { ShipAssert(false, "Currently not supported!"); } } else { ShipAssert(false, "Currently not supported!"); } }
dc3380789ed9a8b8558592e37bb5d7f964d85540.cu
// ---------------------------------------------------------------------------------------------------- // Copyrighted by Marko Rakita. // Author: Marko Rakita // File contains: Neural network softmax layer. // Created: 02/20/2016. // ---------------------------------------------------------------------------------------------------- #include "include/softmaxlayer.cuh" SoftMaxLayer::SoftMaxLayer(ParallelismMode parallelismMode, cudaStream_t deviceCalculationStream, cudaStream_t deviceMemoryStream, uint inputDataSize, uint inputDataCount, bool holdsInputData) { m_layerType = LayerType::SoftMax; m_parallelismMode = parallelismMode; m_deviceCalculationStream = deviceCalculationStream; m_deviceMemoryStream = deviceMemoryStream; m_indexInTier = 0; m_tierSize = 1; m_inputNumChannels = m_activationNumChannels = 1; m_inputDataWidth = m_activationDataWidth = inputDataSize; m_inputDataHeight = m_activationDataHeight = 1; m_inputDataSize = m_activationDataSize = inputDataSize; m_inputDataCount = inputDataCount; m_holdsInputData = holdsInputData; // Allocating input data buffer. m_inputBufferSize = m_inputDataSize * m_inputDataCount * sizeof(float); if (m_holdsInputData) { CudaAssert(cudaMalloc<float>(&m_inputDataBuffer, m_inputBufferSize)); } // Allocating input gradients buffer. CudaAssert(cudaMalloc<float>(&m_inputGradientsBuffer, m_inputBufferSize)); // Allocating input activations maximums buffer. CudaAssert(cudaMalloc<float>(&m_inputActivationsMaxBuffer, m_inputDataCount * sizeof(float))); // Allocating input activations maximums buffer. CudaAssert(cudaMalloc<float>(&m_exponentialsSumBuffer, m_inputDataCount * sizeof(float))); // Allocating activation data buffers. m_activationBufferSize = m_inputBufferSize; CudaAssert(cudaMalloc<float>(&m_activationDataBuffer, m_activationBufferSize)); m_holdsActivationGradients = false; } SoftMaxLayer::~SoftMaxLayer() { CudaAssert(cudaFree(m_inputActivationsMaxBuffer)); CudaAssert(cudaFree(m_exponentialsSumBuffer)); } void SoftMaxLayer::LoadInputs() { CommonLoadInputs(); } /* Finds maximum values of input activations for each input sample. */ __global__ void FindMaximums(float* inputActivations, const uint numInputSamples, const uint numInputActivations, float* inputActivationsMaximums) { const uint c_sampleIndex = blockIdx.x * blockDim.x + threadIdx.x; if (c_sampleIndex < numInputSamples) { float activationMaximum = inputActivations[c_sampleIndex]; for (uint activationIndex = 1; activationIndex < numInputActivations; ++activationIndex) { activationMaximum = max(activationMaximum, inputActivations[activationIndex * numInputSamples + c_sampleIndex]); } inputActivationsMaximums[c_sampleIndex] = activationMaximum; } } /* Subtracts maximum values of input activations from all input activations for each input sample. */ template <uint c_blockWidth> __global__ void SubtractMaximums(float* inputActivations, const uint numInputSamples, const uint numInputActivations, float* inputActivationsMaximums, float* outputActivations) { __shared__ float maximums[c_blockWidth]; for (uint y = blockIdx.y * blockDim.y + threadIdx.y; y < numInputActivations; y += gridDim.y * blockDim.y) { __syncthreads(); if (threadIdx.y == 0) { maximums[threadIdx.x] = inputActivationsMaximums[blockIdx.x * blockDim.x + threadIdx.x]; } __syncthreads(); const uint c_offset = y * numInputSamples; for (uint x = blockIdx.x * blockDim.x + threadIdx.x; x < numInputSamples; x += gridDim.x * blockDim.x) { outputActivations[c_offset + x] = inputActivations[c_offset + x] - maximums[threadIdx.x]; } } } void SoftMaxLayer::StabilizeInputs() { // Finding maximums of input activations. const uint c_numThreadsPerBlock = min((uint)Config::MAX_NUM_THREADS, RoundUp(m_inputDataCount, Config::WARP_SIZE)); const uint c_numBlocks = DivideUp(m_inputDataCount, c_numThreadsPerBlock); LAUNCH_KERNEL_ASYNC(FindMaximums, dim3(c_numBlocks), dim3(c_numThreadsPerBlock), m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataCount, m_activationDataSize, m_inputActivationsMaxBuffer); CudaAssert(cudaGetLastError()); // Substracting maximums of input activations from all the input activations. const uint c_blockWidth = 64; const uint c_blockHeight = (uint)Config::MAX_NUM_THREADS / c_blockWidth; dim3 blockDimensions(c_blockWidth, c_blockHeight); const uint c_maxGridBlocks = 128; const uint c_gridWidth = min(c_maxGridBlocks, DivideUp(m_inputDataCount, c_blockWidth)); const uint c_gridHeight = min(c_maxGridBlocks / c_gridWidth, DivideUp(m_activationDataSize, c_blockHeight)); dim3 gridDimensions(c_gridWidth, c_gridHeight); LAUNCH_KERNEL_ASYNC((SubtractMaximums<c_blockWidth>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataCount, m_activationDataSize, m_inputActivationsMaxBuffer, m_activationDataBuffer); CudaAssert(cudaGetLastError()); } /* Computes the exponentials of activations. */ __global__ void ComputeExponentials(float* activations, const uint activationsLength) { for (uint activationIndex = blockIdx.x * blockDim.x + threadIdx.x; activationIndex < activationsLength; activationIndex += gridDim.x * blockDim.x) { activations[activationIndex] = __expf(activations[activationIndex]); } } /* Computes sum of the exponentials of activations. */ __global__ void ComputeSumOfExponentials(float* activations, const uint numInputSamples, const uint numActivations, float* exponentialsSumBuffer) { const uint c_sampleIndex = blockIdx.x * blockDim.x + threadIdx.x; if (c_sampleIndex < numInputSamples) { float exponentialsSum = 0.f; for (uint activationIndex = 0; activationIndex < numActivations; ++activationIndex) { exponentialsSum += activations[activationIndex * numInputSamples + c_sampleIndex]; } exponentialsSumBuffer[c_sampleIndex] = exponentialsSum; } } /* Divides activation exponentials with their sum to get soft maximums. */ template <uint c_blockWidth> __global__ void DivideExponentialsWithSum(float* activationExponentials, const uint numInputSamples, const uint numActivations, float* exponentialsSumBuffer) { __shared__ float exponentialsSums[c_blockWidth]; for (uint y = blockIdx.y * blockDim.y + threadIdx.y; y < numActivations; y += gridDim.y * blockDim.y) { __syncthreads(); if (threadIdx.y == 0) { exponentialsSums[threadIdx.x] = exponentialsSumBuffer[blockIdx.x * blockDim.x + threadIdx.x]; } __syncthreads(); const uint c_offset = y * numInputSamples; for (uint x = blockIdx.x * blockDim.x + threadIdx.x; x < numInputSamples; x += gridDim.x * blockDim.x) { activationExponentials[c_offset + x] = __fdividef(activationExponentials[c_offset + x], exponentialsSums[threadIdx.x]); } } } void SoftMaxLayer::CalculateSoftMaximums() { // Computing the exponentials. const uint c_activationBufferLength = (uint)(m_activationBufferSize / sizeof(float)); uint numBlocks = 128; uint numThreadsPerBlock = 128; dim3 blockDimensions(numThreadsPerBlock); dim3 gridDimensions(min(numBlocks, DivideUp(c_activationBufferLength, numThreadsPerBlock))); LAUNCH_KERNEL_ASYNC(ComputeExponentials, gridDimensions, blockDimensions, m_deviceCalculationStream)(m_activationDataBuffer, c_activationBufferLength); CudaAssert(cudaGetLastError()); // Computing sum of the exponentials. numThreadsPerBlock = min((uint)Config::MAX_NUM_THREADS, RoundUp(m_inputDataCount, Config::WARP_SIZE)); numBlocks = DivideUp(m_inputDataCount, numThreadsPerBlock); LAUNCH_KERNEL_ASYNC(ComputeSumOfExponentials, dim3(numBlocks), dim3(numThreadsPerBlock), m_deviceCalculationStream)(m_activationDataBuffer, m_inputDataCount, m_activationDataSize, m_exponentialsSumBuffer); CudaAssert(cudaGetLastError()); // Dividing exponentials with their sum to get soft maximums. const uint c_blockWidth = 64; const uint c_blockHeight = (uint)Config::MAX_NUM_THREADS / c_blockWidth; blockDimensions = dim3(c_blockWidth, c_blockHeight); const uint c_maxGridBlocks = 128; const uint c_gridWidth = min(c_maxGridBlocks, DivideUp(m_inputDataCount, c_blockWidth)); const uint c_gridHeight = min(c_maxGridBlocks / c_gridWidth, DivideUp(m_activationDataSize, c_blockHeight)); gridDimensions = dim3(c_gridWidth, c_gridHeight); LAUNCH_KERNEL_ASYNC((DivideExponentialsWithSum<c_blockWidth>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_activationDataBuffer, m_inputDataCount, m_activationDataSize, m_exponentialsSumBuffer); CudaAssert(cudaGetLastError()); } void SoftMaxLayer::DoForwardProp(PropagationMode propagationMode) { StabilizeInputs(); CalculateSoftMaximums(); } /* Calculates input gradients in case of logistic regression output layer. */ __global__ void CalculateLogisticRegressionInputGradients(float* activations, uint* dataLabels, const uint dataCount, const uint numActivations, float* inputGradients) { const uint c_dataIndex = blockIdx.x * blockDim.x + threadIdx.x; const uint c_activationIndex = blockIdx.y * blockDim.y + threadIdx.y; const uint c_activationsOffset = c_activationIndex * dataCount + c_dataIndex; if (c_dataIndex < dataCount && c_activationIndex < numActivations) { inputGradients[c_activationsOffset] = (dataLabels[c_dataIndex] == c_activationIndex ? 1.f : 0.f) - activations[c_activationsOffset]; } } void SoftMaxLayer::LogisticRegressionBackwardProp(uint* dataLabels) { const uint c_blockWidth = 32; const uint c_blockHeight = 4; dim3 blockDimensions(c_blockWidth, c_blockHeight); const uint c_gridWidth = DivideUp(m_inputDataCount, c_blockWidth); const uint c_gridHeight = DivideUp(m_activationDataSize, c_blockHeight); dim3 gridDimensions(c_gridWidth, c_gridHeight); LAUNCH_KERNEL_ASYNC(CalculateLogisticRegressionInputGradients, gridDimensions, blockDimensions, m_deviceCalculationStream)(m_activationDataBuffer, dataLabels, m_inputDataCount, m_activationDataSize, m_inputGradientsBuffer); CudaAssert(cudaGetLastError()); } void SoftMaxLayer::DoBackwardProp() { if (m_nextLayers[0]->GetLayerType() == LayerType::Output) { OutputLayer* outputLayer = static_cast<OutputLayer*>(m_nextLayers[0]); if (outputLayer->GetLossFunctionType() == LossFunctionType::LogisticRegression) { LogisticRegressionBackwardProp(outputLayer->GetDataLabels()); } else { ShipAssert(false, "Currently not supported!"); } } else { ShipAssert(false, "Currently not supported!"); } }
9368061449bbc3a4ed1018d25c34d127f7bc141a.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/linalg/ternary_op.cuh> #include <raft/random/rng.cuh> #include <raft/util/cudart_utils.hpp> namespace raft { namespace linalg { template <typename InType, typename IdxType = int, typename OutType = InType> struct BinaryOpInputs { InType tolerance; IdxType len; unsigned long long int seed; }; template <typename InType, typename IdxType = int, typename OutType = InType> ::std::ostream& operator<<(::std::ostream& os, const BinaryOpInputs<InType, IdxType, OutType>& d) { return os; } template <typename T> class ternaryOpTest : public ::testing::TestWithParam<BinaryOpInputs<T>> { public: ternaryOpTest() : params(::testing::TestWithParam<BinaryOpInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), out_add_ref(params.len, stream), out_add(params.len, stream), out_mul_ref(params.len, stream), out_mul(params.len, stream) { } void SetUp() override { raft::random::RngState rng(params.seed); int len = params.len; rmm::device_uvector<T> in1(len, stream); rmm::device_uvector<T> in2(len, stream); rmm::device_uvector<T> in3(len, stream); fill(handle, rng, out_add_ref.data(), len, T(6.0)); fill(handle, rng, out_mul_ref.data(), len, T(6.0)); fill(handle, rng, in1.data(), len, T(1.0)); fill(handle, rng, in2.data(), len, T(2.0)); fill(handle, rng, in3.data(), len, T(3.0)); auto add = [] __device__(T a, T b, T c) { return a + b + c; }; auto mul = [] __device__(T a, T b, T c) { return a * b * c; }; auto out_add_view = raft::make_device_vector_view(out_add.data(), len); auto out_mul_view = raft::make_device_vector_view(out_mul.data(), len); auto in1_view = raft::make_device_vector_view<const T>(in1.data(), len); auto in2_view = raft::make_device_vector_view<const T>(in2.data(), len); auto in3_view = raft::make_device_vector_view<const T>(in3.data(), len); ternary_op(handle, in1_view, in2_view, in3_view, out_add_view, add); ternary_op(handle, in1_view, in2_view, in3_view, out_mul_view, mul); } protected: BinaryOpInputs<T> params; raft::resources handle; hipStream_t stream = 0; rmm::device_uvector<T> out_add_ref, out_add, out_mul_ref, out_mul; }; const std::vector<BinaryOpInputs<float>> inputsf = {{0.000001f, 1024 * 1024, 1234ULL}, {0.000001f, 1024 * 1024 + 2, 1234ULL}, {0.000001f, 1024 * 1024 + 1, 1234ULL}}; typedef ternaryOpTest<float> ternaryOpTestF; TEST_P(ternaryOpTestF, Result) { ASSERT_TRUE(devArrMatch( out_add_ref.data(), out_add.data(), params.len, raft::CompareApprox<float>(params.tolerance))); ASSERT_TRUE(devArrMatch( out_mul_ref.data(), out_mul.data(), params.len, raft::CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ternaryOpTests, ternaryOpTestF, ::testing::ValuesIn(inputsf)); const std::vector<BinaryOpInputs<double>> inputsd = {{0.00000001, 1024 * 1024, 1234ULL}, {0.00000001, 1024 * 1024 + 2, 1234ULL}, {0.00000001, 1024 * 1024 + 1, 1234ULL}}; typedef ternaryOpTest<double> ternaryOpTestD; TEST_P(ternaryOpTestD, Result) { ASSERT_TRUE(devArrMatch( out_add_ref.data(), out_add.data(), params.len, raft::CompareApprox<double>(params.tolerance))); ASSERT_TRUE(devArrMatch( out_mul_ref.data(), out_mul.data(), params.len, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ternaryOpTests, ternaryOpTestD, ::testing::ValuesIn(inputsd)); } // end namespace linalg } // end namespace raft
9368061449bbc3a4ed1018d25c34d127f7bc141a.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/linalg/ternary_op.cuh> #include <raft/random/rng.cuh> #include <raft/util/cudart_utils.hpp> namespace raft { namespace linalg { template <typename InType, typename IdxType = int, typename OutType = InType> struct BinaryOpInputs { InType tolerance; IdxType len; unsigned long long int seed; }; template <typename InType, typename IdxType = int, typename OutType = InType> ::std::ostream& operator<<(::std::ostream& os, const BinaryOpInputs<InType, IdxType, OutType>& d) { return os; } template <typename T> class ternaryOpTest : public ::testing::TestWithParam<BinaryOpInputs<T>> { public: ternaryOpTest() : params(::testing::TestWithParam<BinaryOpInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), out_add_ref(params.len, stream), out_add(params.len, stream), out_mul_ref(params.len, stream), out_mul(params.len, stream) { } void SetUp() override { raft::random::RngState rng(params.seed); int len = params.len; rmm::device_uvector<T> in1(len, stream); rmm::device_uvector<T> in2(len, stream); rmm::device_uvector<T> in3(len, stream); fill(handle, rng, out_add_ref.data(), len, T(6.0)); fill(handle, rng, out_mul_ref.data(), len, T(6.0)); fill(handle, rng, in1.data(), len, T(1.0)); fill(handle, rng, in2.data(), len, T(2.0)); fill(handle, rng, in3.data(), len, T(3.0)); auto add = [] __device__(T a, T b, T c) { return a + b + c; }; auto mul = [] __device__(T a, T b, T c) { return a * b * c; }; auto out_add_view = raft::make_device_vector_view(out_add.data(), len); auto out_mul_view = raft::make_device_vector_view(out_mul.data(), len); auto in1_view = raft::make_device_vector_view<const T>(in1.data(), len); auto in2_view = raft::make_device_vector_view<const T>(in2.data(), len); auto in3_view = raft::make_device_vector_view<const T>(in3.data(), len); ternary_op(handle, in1_view, in2_view, in3_view, out_add_view, add); ternary_op(handle, in1_view, in2_view, in3_view, out_mul_view, mul); } protected: BinaryOpInputs<T> params; raft::resources handle; cudaStream_t stream = 0; rmm::device_uvector<T> out_add_ref, out_add, out_mul_ref, out_mul; }; const std::vector<BinaryOpInputs<float>> inputsf = {{0.000001f, 1024 * 1024, 1234ULL}, {0.000001f, 1024 * 1024 + 2, 1234ULL}, {0.000001f, 1024 * 1024 + 1, 1234ULL}}; typedef ternaryOpTest<float> ternaryOpTestF; TEST_P(ternaryOpTestF, Result) { ASSERT_TRUE(devArrMatch( out_add_ref.data(), out_add.data(), params.len, raft::CompareApprox<float>(params.tolerance))); ASSERT_TRUE(devArrMatch( out_mul_ref.data(), out_mul.data(), params.len, raft::CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ternaryOpTests, ternaryOpTestF, ::testing::ValuesIn(inputsf)); const std::vector<BinaryOpInputs<double>> inputsd = {{0.00000001, 1024 * 1024, 1234ULL}, {0.00000001, 1024 * 1024 + 2, 1234ULL}, {0.00000001, 1024 * 1024 + 1, 1234ULL}}; typedef ternaryOpTest<double> ternaryOpTestD; TEST_P(ternaryOpTestD, Result) { ASSERT_TRUE(devArrMatch( out_add_ref.data(), out_add.data(), params.len, raft::CompareApprox<double>(params.tolerance))); ASSERT_TRUE(devArrMatch( out_mul_ref.data(), out_mul.data(), params.len, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ternaryOpTests, ternaryOpTestD, ::testing::ValuesIn(inputsd)); } // end namespace linalg } // end namespace raft
b940e34b57c7ac7f11f3d114afd5714563f85b82.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/copying.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/reshape.hpp> #include <cudf/strings/detail/utilities.cuh> #include <cudf/utilities/type_dispatcher.hpp> namespace cudf { namespace { struct byte_list_conversion { /** * @brief Function object for converting primitive types and string columns to lists of bytes. */ template <typename T> std::enable_if_t<!std::is_integral<T>::value and !is_floating_point<T>(), std::unique_ptr<column>> operator()(column_view const& input_column, flip_endianness configuration, rmm::mr::device_memory_resource* mr, hipStream_t stream) const { CUDF_FAIL("Unsupported non-numeric and non-string column"); } template <typename T> std::enable_if_t<is_floating_point<T>() or std::is_integral<T>::value, std::unique_ptr<column>> operator()(column_view const& input_column, flip_endianness configuration, rmm::mr::device_memory_resource* mr, hipStream_t stream) const { size_type num_bytes = input_column.size() * sizeof(T); auto byte_column = make_numeric_column( data_type{type_id::UINT8}, num_bytes, mask_state::UNALLOCATED, stream, mr); char* d_chars = reinterpret_cast<char*>(byte_column->mutable_view().data<uint8_t>()); char const* d_data = reinterpret_cast<char const*>(input_column.data<T>()); size_type mask = sizeof(T) - 1; if (configuration == flip_endianness::YES) { thrust::for_each(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(num_bytes), [d_chars, d_data, mask] __device__(auto index) { d_chars[index] = d_data[index + mask - ((index & mask) << 1)]; }); } else { thrust::copy_n(rmm::exec_policy(stream)->on(stream), d_data, num_bytes, d_chars); } auto begin = thrust::make_constant_iterator(cudf::size_of(input_column.type())); auto offsets_column = cudf::strings::detail::make_offsets_child_column( begin, begin + input_column.size(), mr, stream); rmm::device_buffer null_mask = copy_bitmask(input_column, stream, mr); return make_lists_column(input_column.size(), std::move(offsets_column), std::move(byte_column), input_column.null_count(), std::move(null_mask), stream, mr); } }; template <> std::unique_ptr<cudf::column> byte_list_conversion::operator()<string_view>( column_view const& input_column, flip_endianness configuration, rmm::mr::device_memory_resource* mr, hipStream_t stream) const { strings_column_view input_strings(input_column); auto strings_count = input_strings.size(); if (strings_count == 0) return cudf::empty_like(input_column); auto contents = std::make_unique<column>(input_column, stream, mr)->release(); return make_lists_column( input_column.size(), std::move(contents.children[cudf::strings_column_view::offsets_column_index]), std::move(contents.children[cudf::strings_column_view::chars_column_index]), input_column.null_count(), copy_bitmask(input_column, stream, mr), stream, mr); } } // namespace std::unique_ptr<column> byte_cast(column_view const& input_column, flip_endianness configuration, rmm::mr::device_memory_resource* mr, hipStream_t stream) { CUDF_FUNC_RANGE(); return type_dispatcher( input_column.type(), byte_list_conversion{}, input_column, configuration, mr, stream); } } // namespace cudf
b940e34b57c7ac7f11f3d114afd5714563f85b82.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/copying.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/reshape.hpp> #include <cudf/strings/detail/utilities.cuh> #include <cudf/utilities/type_dispatcher.hpp> namespace cudf { namespace { struct byte_list_conversion { /** * @brief Function object for converting primitive types and string columns to lists of bytes. */ template <typename T> std::enable_if_t<!std::is_integral<T>::value and !is_floating_point<T>(), std::unique_ptr<column>> operator()(column_view const& input_column, flip_endianness configuration, rmm::mr::device_memory_resource* mr, cudaStream_t stream) const { CUDF_FAIL("Unsupported non-numeric and non-string column"); } template <typename T> std::enable_if_t<is_floating_point<T>() or std::is_integral<T>::value, std::unique_ptr<column>> operator()(column_view const& input_column, flip_endianness configuration, rmm::mr::device_memory_resource* mr, cudaStream_t stream) const { size_type num_bytes = input_column.size() * sizeof(T); auto byte_column = make_numeric_column( data_type{type_id::UINT8}, num_bytes, mask_state::UNALLOCATED, stream, mr); char* d_chars = reinterpret_cast<char*>(byte_column->mutable_view().data<uint8_t>()); char const* d_data = reinterpret_cast<char const*>(input_column.data<T>()); size_type mask = sizeof(T) - 1; if (configuration == flip_endianness::YES) { thrust::for_each(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(num_bytes), [d_chars, d_data, mask] __device__(auto index) { d_chars[index] = d_data[index + mask - ((index & mask) << 1)]; }); } else { thrust::copy_n(rmm::exec_policy(stream)->on(stream), d_data, num_bytes, d_chars); } auto begin = thrust::make_constant_iterator(cudf::size_of(input_column.type())); auto offsets_column = cudf::strings::detail::make_offsets_child_column( begin, begin + input_column.size(), mr, stream); rmm::device_buffer null_mask = copy_bitmask(input_column, stream, mr); return make_lists_column(input_column.size(), std::move(offsets_column), std::move(byte_column), input_column.null_count(), std::move(null_mask), stream, mr); } }; template <> std::unique_ptr<cudf::column> byte_list_conversion::operator()<string_view>( column_view const& input_column, flip_endianness configuration, rmm::mr::device_memory_resource* mr, cudaStream_t stream) const { strings_column_view input_strings(input_column); auto strings_count = input_strings.size(); if (strings_count == 0) return cudf::empty_like(input_column); auto contents = std::make_unique<column>(input_column, stream, mr)->release(); return make_lists_column( input_column.size(), std::move(contents.children[cudf::strings_column_view::offsets_column_index]), std::move(contents.children[cudf::strings_column_view::chars_column_index]), input_column.null_count(), copy_bitmask(input_column, stream, mr), stream, mr); } } // namespace std::unique_ptr<column> byte_cast(column_view const& input_column, flip_endianness configuration, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { CUDF_FUNC_RANGE(); return type_dispatcher( input_column.type(), byte_list_conversion{}, input_column, configuration, mr, stream); } } // namespace cudf
584be07df2c00aa9bc456c47dbe1b232c9480f97.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Cuda accelerated motion estimation for VP8 libvpx encoder by Pietro Paglierani, Giuliano Grossi, Federico Pedersini and Alessandro Petrini for Italtel and Universita' degli Studi di Milano 2015-2016, Milano */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <wchar.h> #include <locale.h> #include "vpx_config.h" #include "cuda/typedef_cuda.h" #include "cuda/me_cuda.h" #ifdef __cplusplus extern "C" { #endif #if HAVE_CUDA_ENABLED_DEVICE __device__ __constant__ MV MV_16x12_lookup_tex[] = { {-12,-2}, {-12, 0}, {-12, 2}, // Unit: pixel {-10,-5}, {-10,-3}, {-10,-1}, {-10, 1}, {-10, 3}, {-10, 5}, {-8,-8}, {-8,-6}, {-8,-4}, {-8,-2}, {-8, 0}, {-8, 2}, {-8, 4}, {-8, 6}, {-8, 8}, {-6,-9}, {-6,-7}, {-6,-5}, {-6,-3}, {-6,-1}, {-6, 1}, {-6, 3}, {-6, 5}, {-6, 7}, {-6, 9}, {-4,-12}, {-4,-10}, {-4,-8}, {-4,-6}, {-4,-4}, {-4,-2}, {-4, 0}, {-4, 2}, {-4, 4}, {-4, 6}, {-4, 8}, {-4,10}, {-4,12}, {-2,-13}, {-2,-11}, {-2,-9}, {-2,-7}, {-2,-5}, {-2,-3}, {-2,-1}, {-2, 1}, {-2, 3}, {-2, 5}, {-2, 7}, {-2, 9}, {-2,11}, {-2,13}, {0,-16}, {0,-14}, {0,-12}, {0,-10}, {0,-8}, {0,-6}, {0,-4}, {0,-2}, {0, 0}, {0, 2}, {0, 4}, {0, 6}, {0, 8}, {0,10}, {0,12}, {0,14}, {0,16}, {2,-13}, {2,-11}, {2,-9}, {2,-7}, {2,-5}, {2,-3}, {2,-1}, {2, 1}, {2, 3}, {2, 5}, {2, 7}, {2, 9}, {2,11}, {2,13}, {4,-12}, {4,-10}, {4,-8}, {4,-6}, {4,-4}, {4,-2}, {4, 0}, {4, 2}, {4, 4}, {4, 6}, {4, 8}, {4,10}, {4,12}, {6,-9}, {6,-7}, {6,-5}, {6,-3}, {6,-1}, {6, 1}, {6, 3}, {6, 5}, {6, 7}, {6, 9}, {8,-8}, {8,-6}, {8,-4}, {8,-2}, {8, 0}, {8, 2}, {8, 4}, {8, 6}, {8, 8}, {10,-5}, {10,-3}, {10,-1}, {10, 1}, {10, 3}, {10, 5}, {12,-2}, {12, 0}, {12, 2}, {0, 0} }; // 127 + 1 candidati __device__ __constant__ MV_ref MV_lookup_refin_tex[] = { // Unit: pixel {-1.75,-0.25}, {-1.75, 0.25}, {-1.50,-0.50}, {-1.50, 0.00}, {-1.50, 0.50}, {-1.25,-0.75}, {-1.25,-0.25}, {-1.25, 0.25}, {-1.25, 0.75}, {-1.00,-1.00}, {-1.00,-0.50}, {-1.00, 0.00}, {-1.00, 0.50}, {-1.00, 1.00}, {-0.75,-1.25}, {-0.75,-0.75}, {-0.75,-0.25},{-0.75, 0.00},{-0.75, 0.25}, {-0.75, 0.75}, {-0.75, 1.25}, {-0.50,-1.50}, {-0.50,-1.00}, {-0.50,-0.50},{-0.50,-0.25},{-0.50, 0.00},{-0.50, 0.25},{-0.50, 0.50}, {-0.50, 1.00}, {-0.50, 1.50}, {-0.25,-1.75}, {-0.25,-1.25}, {-0.25,-0.75},{-0.25,-0.50},{-0.25,-0.25},{-0.25, 0.00},{-0.25, 0.25},{-0.25, 0.50},{-0.25, 0.75}, {-0.25, 1.25}, {-0.25, 1.75}, { 0.00,-1.50}, { 0.00,-1.00},{ 0.00,-0.75},{ 0.00,-0.50},{ 0.00,-0.25},{ 0.00, 0.00},{ 0.00, 0.25},{ 0.00, 0.50},{ 0.00, 0.75},{ 0.00, 1.00}, { 0.00, 1.50}, { 0.25,-1.75}, { 0.25,-1.25}, { 0.25,-0.75},{ 0.25,-0.50},{ 0.25,-0.25},{ 0.25, 0.00},{ 0.25, 0.25},{ 0.25, 0.50},{ 0.25, 0.75}, { 0.25, 1.25}, { 0.25, 1.75}, { 0.50,-1.50}, { 0.50,-1.00}, { 0.50,-0.50},{ 0.50,-0.25},{ 0.50, 0.00},{ 0.50, 0.25},{ 0.50, 0.50}, { 0.50, 1.00}, { 0.50, 1.50}, { 0.75,-1.25}, { 0.75,-0.75}, { 0.75,-0.25},{ 0.75, 0.00},{ 0.75, 0.25}, { 0.75, 0.75}, { 0.75, 1.25}, { 1.00,-1.00}, { 1.00,-0.50}, { 1.00, 0.00}, { 1.00, 0.50}, { 1.00, 1.00}, { 1.25,-0.75}, { 1.25,-0.25}, { 1.25, 0.25}, { 1.25, 0.75}, { 1.50,-0.50}, { 1.50, 0.00}, { 1.50, 0.50}, { 1.75,-0.25}, { 1.75, 0.25} }; // 93 candidati __inline__ __device__ uint32_t __vvariance4( uint32_t u, uint32_t v ) { uint32_t w = 0; asm volatile("{\ .reg .u32 t1;\ vabsdiff4.u32.u32.u32.sat t1, %1, %2, 0;\ vmad.u32.u32.u32.sat %0, t1.b0, t1.b0, 0;\ vmad.u32.u32.u32.sat %0, t1.b1, t1.b1, %0;\ vmad.u32.u32.u32.sat %0, t1.b2, t1.b2, %0;\ vmad.u32.u32.u32.sat %0, t1.b3, t1.b3, %0;}" : "=r"(w) : "r"(u), "r"(v)); return w; } __inline__ __device__ uint32_t __vabsdiff4( uint32_t u, uint32_t v ) { uint32_t w = 0; //asm volatile("vabsdiff4.u32.u32.u32.sat.add %0, %1, %2, %3;" : "=r"(w) : "r"(u), "r"(v), "r"(w)); //asm volatile("vabsdiff4.u32.u32.u32.sat.add %0, %1, %2, 0;" : "=r"(w) : "r"(u), "r"(v)); asm volatile("vabsdiff4.u32.u32.u32.add %0, %1, %2, %3;" : "=r"(w) : "r"(u), "r"(v), "r"(w)); //Solo compute 3.5 return w; } __global__ void me_cuda_tex ( const hipTextureObject_t in_tex, const hipTextureObject_t ref_tex, int const streamID, int const streamSize, int const stride, int const width, int const num_MB_width, int const split_on, int_mv * __restrict__ const MVs_g, int_mv * __restrict__ const MVs_split_g ) { __shared__ uint32_t diff[128][32]; __shared__ uint8_t minpos[32]; __shared__ uint8_t minpos_refin[32]; // configurazione di lancio: blocks per grid: 16 x 1 x 1 // threads per block: 4 x 8 x 1 int32_t TID = threadIdx.y * blockDim.x + threadIdx.x; // Thread Index (0..32) int32_t i, j; int32_t MBoffset = streamID * streamSize + blockIdx.x; int32_t blockX = MBoffset % num_MB_width; // colonna int32_t blockY = MBoffset / num_MB_width; // riga // Occhio: immagine di riferimento ha cornice (larghezza tot = stride) mentre immagine input no (largh tot = width) float intex_offset_x = 16 * blockX + 4 * threadIdx.x; float intex_offset_y = 16 * blockY + 2 * threadIdx.y; float reftex_offset_x = intex_offset_x + 32.0; float reftex_offset_y = intex_offset_y + 32.0; float saved_reftex_x = reftex_offset_x; float saved_reftex_y = reftex_offset_y; MV_ref iter_mv = {0.0,0.0}; /* uint32_t img0 = (uint32_t) ( ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 3.5, intex_offset_y + 0.5 ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 2.5, intex_offset_y + 0.5 ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 1.5, intex_offset_y + 0.5 ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 0.5, intex_offset_y + 0.5 ) * 255.0) ) ); uint32_t img1 = (uint32_t) ( ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 3.5, intex_offset_y + 1.5 ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 2.5, intex_offset_y + 1.5 ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 1.5, intex_offset_y + 1.5 ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 0.5, intex_offset_y + 1.5 ) * 255.0) ) ); */ uint32_t img0 = (uint32_t) ( ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 3.5, intex_offset_y + 1.5 ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 2.5, intex_offset_y + 0.5 ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 1.5, intex_offset_y + 1.5 ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 0.5, intex_offset_y + 0.5 ) * 255.0) ) ); #if HAVE_CUDA_MV_ITER int8_t maxloops = 3; while (maxloops > 0) { #endif /* Organizzazione dei thread all'interno del macroblocco. Ogni thread considera 4 pixel e i 4 immediatamente sottostanti. Accesso a memoria globale non e' ottimale (coalescenza a gruppi di quattro), ma questo schema permette di raggruppare le sad in somme parziali per calcolare tutte le splitmv. TID 0 TID 1 TID 2 TID 3 TID 4 TID 5 TID 6 TID 7 TID 9 TID 9 TID 10 TID 11 TID 12 TID 13 TID 14 TID 15 TID 16 TID 17 TID 18 TID 19 TID 20 TID 21 TID 22 TID 23 TID 24 TID 25 TID 26 TID 27 TID 28 TID 29 TID 30 TID 31 */ /* Calcolo delle sad, risultati memorizzati nella matrice diff. 32 32 TID = 32 sotto blocchi, ognuno contenente sad parziali / \ diff[128][32] 128 candidati mv Ogni thread si fa carico si un sottoblocco di 8 pixel e calcola la sad per ogni candidato mv */ for (i = 0; i < 128; i++){ int32_t sad_result; MV offset_mv = MV_16x12_lookup_tex[i]; /* uint32_t ref0 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 0.5 + offset_mv.row + iter_mv.row ) * 255) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 0.5 + offset_mv.row + iter_mv.row ) * 255) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 0.5 + offset_mv.row + iter_mv.row ) * 255) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 0.5 + offset_mv.row + iter_mv.row ) * 255) ) ); uint32_t ref1 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 1.5 + offset_mv.row + iter_mv.row ) * 255) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 1.5 + offset_mv.row + iter_mv.row ) * 255) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 1.5 + offset_mv.row + iter_mv.row ) * 255) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 1.5 + offset_mv.row + iter_mv.row ) * 255) ) ); sad_result = __vabsdiff4( img0, ref0 ); sad_result += __vabsdiff4( img1, ref1 ); */ //sad_result = __vvariance4( img0, ref0 ); //sad_result += __vvariance4( img1, ref1 ); uint32_t ref0 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 1.5 + offset_mv.row + iter_mv.row ) * 255) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 0.5 + offset_mv.row + iter_mv.row ) * 255) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 1.5 + offset_mv.row + iter_mv.row ) * 255) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 0.5 + offset_mv.row + iter_mv.row ) * 255) ) ); sad_result = __vabsdiff4( img0, ref0 ); diff[i][TID] = sad_result; } __syncthreads(); // Accumulazione delle colonne di diff in modo da formare sad di blocchi per ogni candidato mv // Prima reduction, generazione 16 sad 4x4 // 0 1 2 3 | 8 9 10 11 | 16 17 18 19 | 24 25 26 27 <- j // ^ ^ ^ ^ | ^ ^ ^ ^ | ^ ^ ^ ^ | ^ ^ ^ ^ // 4 5 6 7 | 12 13 14 15 | 20 21 22 23 | 28 29 30 31 <- j + 4 for (i = 0; i < 16; i++) { j = i + (i / 4) * 4; diff[TID ][j] += diff[TID ][j+4]; diff[TID+32][j] += diff[TID+32][j+4]; diff[TID+64][j] += diff[TID+64][j+4]; diff[TID+96][j] += diff[TID+96][j+4]; } __syncthreads(); // Seconda reduction, generazione 4 sad 8x8 // 4 | 12 | 20 | 28 <- (8 * i) + 4 // ^ | ^ | ^ | ^ // 0 1 8 9 | 2 3 10 11 | 16 17 24 25 | 18 19 26 27 <- [j j+1 j+8 j+9] for (i = 0; i < 4; i++) { j = 2 * i + (i / 2) * 12; // genera 0, 2, 16, 18 per i = 0 .. 3 diff[TID ][(8 * i) + 4] = diff[TID ][j] + diff[TID ][j + 1] + diff[TID ][j + 8] + diff[TID ][j + 9]; diff[TID+32][(8 * i) + 4] = diff[TID+32][j] + diff[TID+32][j + 1] + diff[TID+32][j + 8] + diff[TID+32][j + 9]; diff[TID+64][(8 * i) + 4] = diff[TID+64][j] + diff[TID+64][j + 1] + diff[TID+64][j + 8] + diff[TID+64][j + 9]; diff[TID+96][(8 * i) + 4] = diff[TID+96][j] + diff[TID+96][j + 1] + diff[TID+96][j + 8] + diff[TID+96][j + 9]; } __syncthreads(); // Terza reduction (a), generazione 2 sad 8x16 // 8x16 // 22 | 30 <- 22 + (i * 8) // ^ | ^ // 4 20 | 12 28 for (i = 0; i < 2; i++) { j = 4 + (8 * i); // genera 4, 12 per i = 0..1 diff[TID ][22 + (i * 8)] = diff[TID ][j] + diff[TID ][j + 16]; diff[TID+32][22 + (i * 8)] = diff[TID+32][j] + diff[TID+32][j + 16]; diff[TID+64][22 + (i * 8)] = diff[TID+64][j] + diff[TID+64][j + 16]; diff[TID+96][22 + (i * 8)] = diff[TID+96][j] + diff[TID+96][j + 16]; } //__syncthreads(); // potrebbe non servire! // Terza reduction (b), generazione 2 sad 16x8 // 16x8 // 6 | 14 <- 6*(i+1) + 2*i = 8 * i + 6 // ^ | ^ // 4 12 | 20 28 <- [j j+8] for (i = 0; i < 2; i++) { j = 4 + (16 * i); // genera 4, 20 per i = 0..1 diff[TID ][8 * i + 6] = diff[TID ][j] + diff[TID ][j + 8]; diff[TID+32][8 * i + 6] = diff[TID+32][j] + diff[TID+32][j + 8]; diff[TID+64][8 * i + 6] = diff[TID+64][j] + diff[TID+64][j + 8]; diff[TID+96][8 * i + 6] = diff[TID+96][j] + diff[TID+96][j + 8]; } __syncthreads(); // Quarta reduction, generazione 1 sad 16x16 // 31 // ^ // 6 14 diff[TID ][31] = diff[TID ][6] + diff[TID ][14]; diff[TID+32][31] = diff[TID+32][6] + diff[TID+32][14]; diff[TID+64][31] = diff[TID+64][6] + diff[TID+64][14]; diff[TID+96][31] = diff[TID+96][6] + diff[TID+96][14]; __syncthreads(); // Ricerca del minimo di ogni colonna. A noi interessano 25 delle 32 colonne, // ma per non creare divergenza tra i thread eseguiamo la ricerca anche dove non serve minpos[TID] = 0; __syncthreads(); // 32 thread, ognuno ricerca il minimo lungo una colonna for( i = 1; i < 128; i++ ){ if ( diff[0][TID] > diff[i][TID] ) { diff[0][TID] = diff[i][TID]; minpos[TID] = i; } } #if HAVE_CUDA_MV_ITER if (fabsf(MV_16x12_lookup_tex[ minpos[31] ].row) + fabsf(MV_16x12_lookup_tex[ minpos[31] ].col) < 14) // forse era < 12... break; iter_mv.col += MV_16x12_lookup_tex[ minpos[31] ].col * 1.75; iter_mv.row += MV_16x12_lookup_tex[ minpos[31] ].row * 1.75; maxloops -= 1; } #endif // Salva mv 16x16 // Questo potrebbe essere fatto meglio, conj 25 thread che lavorano contemporaneamente, // ma devo studiare come indicizzare l'accesso alla matrice globale. C'ho voglia? if ( TID == 31 ) { MVs_g[MBoffset].as_mv.row = (short)((MV_16x12_lookup_tex[ minpos[TID] ].row + iter_mv.row) * 8); MVs_g[MBoffset].as_mv.col = (short)((MV_16x12_lookup_tex[ minpos[TID] ].col + iter_mv.col) * 8); } if (split_on == SPLITMV_ON) { // salva mv 4x4 if ( TID < 16 ) { MVs_split_g[MBoffset*24 + TID].as_mv.row = (short)((MV_16x12_lookup_tex[ minpos[TID + (TID / 4) * 4] ].row + iter_mv.row) * 8); MVs_split_g[MBoffset*24 + TID].as_mv.col = (short)((MV_16x12_lookup_tex[ minpos[TID + (TID / 4) * 4] ].col + iter_mv.col) * 8); } // salva mv 8x8 if ( TID < 4 ) { MVs_split_g[MBoffset*24 + 16 + TID].as_mv.row = (short)((MV_16x12_lookup_tex[ minpos[8 * TID + 4] ].row + iter_mv.row) * 8); MVs_split_g[MBoffset*24 + 16 + TID].as_mv.col = (short)((MV_16x12_lookup_tex[ minpos[8 * TID + 4] ].col + iter_mv.col) * 8); } // salva mv 8x16 e 16x8 if ( TID < 2 ) { MVs_split_g[MBoffset*24 + 20 + TID].as_mv.row = (short)((MV_16x12_lookup_tex[ minpos[8 * TID + 22] ].row + iter_mv.row) * 8); MVs_split_g[MBoffset*24 + 20 + TID].as_mv.col = (short)((MV_16x12_lookup_tex[ minpos[8 * TID + 22] ].col + iter_mv.col) * 8); MVs_split_g[MBoffset*24 + 22 + TID].as_mv.row = (short)((MV_16x12_lookup_tex[ minpos[8 * TID + 6] ].row + iter_mv.row) * 8); MVs_split_g[MBoffset*24 + 22 + TID].as_mv.col = (short)((MV_16x12_lookup_tex[ minpos[8 * TID + 6] ].col + iter_mv.col) * 8); } } __syncthreads(); /////////////////////////////////////////////////////////////////////////////////////////// // STEP 2: pixel-scale Motion Vector Search // 1. // Ricerca di un MV per ogni blocco 4x4 // 16 blocchi, 2 thread per blocco. Stesso schema per decidere TID => thread 0 e 4 fanno 1 blocco; 1 e 5 il secondo, ecc... // Risultati sad memorizzati in diff[i][TID] con 0 < i < 15 // Questa volta non possiamo piu' sfruttare che refptr punti alla stesso indice, quindi posso // calcolare contemporaneamente ogni sad per tid e accumulare, ma posso sfruttare il // parallelismo tra mv dello stesso tipo: prima calcolo in parall tutte le 4x4, poi le 8x8, ecc... if (split_on == SPLITMV_ON) { // Update refpointer al miglior mv j = (TID % 4) + (TID / 8) * 8; // Genera 0 1 2 3 0 1 2 3 8 9 10 11 8 9 10 11 16 17... // perche' TID 0 e 4 vengono traslati dello stesso mv corrispondente // a quello ora presente in colonna 0 di minpos reftex_offset_x += (MV_16x12_lookup_tex[minpos[j]].col + iter_mv.col); reftex_offset_y += (MV_16x12_lookup_tex[minpos[j]].row + iter_mv.row); for (i = 0; i < 93; i++) { int32_t sad_result; MV_ref offset_mv = MV_lookup_refin_tex[i]; /*uint32_t ref0 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) ) ); uint32_t ref1 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) ) ); sad_result = __vabsdiff4( img0, ref0 ); sad_result += __vabsdiff4( img1, ref1 ); //sad_result = __vvariance4( img0, ref0 ); //sad_result += __vvariance4( img1, ref1 ); */ uint32_t ref0 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) ) ); sad_result = __vabsdiff4( img0, ref0 ); diff[i][TID] = sad_result; } __syncthreads(); for (i = 0; i < 16; i++) { j = i + (i / 4) * 4; diff[TID ][j] += diff[TID ][j+4]; diff[TID+32][j] += diff[TID+32][j+4]; diff[TID+64][j] += diff[TID+64][j+4]; } minpos_refin[TID] = 0; __syncthreads(); for( i = 1; i < 93; i++ ){ if ( diff[0][TID] > diff[i][TID] ) { diff[0][TID] = diff[i][TID]; minpos_refin[TID] = i; } } __syncthreads(); // salva MV della split 4x4 if ( TID < 16 ) { MVs_split_g[MBoffset*24 + TID].as_mv.row += (short)(MV_lookup_refin_tex[ minpos_refin[TID + (TID / 4) * 4] ].row * 8); MVs_split_g[MBoffset*24 + TID].as_mv.col += (short)(MV_lookup_refin_tex[ minpos_refin[TID + (TID / 4) * 4] ].col * 8); } // 2. // Ricerca di un mv per ogni blocco 8x8 // Procedura esattamente identica alla precedente: TID che elaborano stesso blocco avranno // mv impostato coerentemente. Differente accumulazione (per blocco 0: TID 0 1 4 5 8 9 12 13) // Update refpointer al miglior mv //j = (TID / 8) * 8 + 4; // Genera 4 4 4 4 4 4 4 4 12 12 12 12 12 12 12 12 20 20 20 20... j = 8 * ((TID / 2) % 2) + 4 + 16 * (TID / 16); // Genera 4 4 12 12 4 4 12 12 4 4 12 12 4 4 12 12 20 20 28 28 20 20 28 ecc.. reftex_offset_x = saved_reftex_x + MV_16x12_lookup_tex[minpos[j]].col + iter_mv.col; reftex_offset_y = saved_reftex_y + MV_16x12_lookup_tex[minpos[j]].row + iter_mv.row; for (i = 0; i < 93; i++) { int32_t sad_result; MV_ref offset_mv = MV_lookup_refin_tex[i]; /* uint32_t ref0 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) ) ); uint32_t ref1 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) ) ); sad_result = __vabsdiff4( img0, ref0 ); sad_result += __vabsdiff4( img1, ref1 ); */ //sad_result = __vvariance4( img0, ref0 ); //sad_result += __vvariance4( img1, ref1 ); uint32_t ref0 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) ) ); sad_result = __vabsdiff4( img0, ref0 ); diff[i][TID] = sad_result; } __syncthreads(); // Sono pigro, copio e incollo la stessa manfrina for (i = 0; i < 16; i++) { j = i + (i / 4) * 4; diff[TID ][j] += diff[TID ][j+4]; diff[TID+32][j] += diff[TID+32][j+4]; diff[TID+64][j] += diff[TID+64][j+4]; } __syncthreads(); for (i = 0; i < 4; i++) { j = 2 * i + (i / 2) * 12; diff[TID ][(8 * i) + 4] = diff[TID ][j] + diff[TID ][j + 1] + diff[TID ][j + 8] + diff[TID] [j + 9]; diff[TID+32][(8 * i) + 4] = diff[TID+32][j] + diff[TID+32][j + 1] + diff[TID+32][j + 8] + diff[TID+32][j + 9]; diff[TID+64][(8 * i) + 4] = diff[TID+64][j] + diff[TID+64][j + 1] + diff[TID+64][j + 8] + diff[TID+64][j + 9]; } __syncthreads(); minpos_refin[TID] = 0; __syncthreads(); // 32 thread, ognuno ricerca il minimo lungo ogni colonna // anche se le colonne interessanti sono solo la 4, 12, 20 e 28 for( i = 1; i < 93; i++ ){ if ( diff[0][TID] > diff[i][TID] ) { diff[0][TID] = diff[i][TID]; minpos_refin[TID] = i; } } __syncthreads(); // Salva i MV della split 8x8 if ( TID < 4 ) { MVs_split_g[MBoffset*24 + 16 + TID].as_mv.row += (short)(MV_lookup_refin_tex[ minpos_refin[8 * TID + 4] ].row * 8); MVs_split_g[MBoffset*24 + 16 + TID].as_mv.col += (short)(MV_lookup_refin_tex[ minpos_refin[8 * TID + 4] ].col * 8); } // 4. // Ricerca di un mv per ogni blocco 8x16 // TODO? // Ho paura che diventi un'operazione un po' troppo onerosa, per ora preferisco lasciare // le 8x16 con dettaglio a due pixel (senza refining search) } // 5. // Refining search su blocco 16x16 // Update RefPointer to the best motion vector reftex_offset_x = saved_reftex_x + (MV_16x12_lookup_tex[minpos[31]]).col + iter_mv.col; reftex_offset_y = saved_reftex_y + (MV_16x12_lookup_tex[minpos[31]]).row + iter_mv.row; for (i = 0; i < 93; i++) { int32_t sad_result; MV_ref offset_mv = MV_lookup_refin_tex[i]; /* uint32_t ref0 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) ) ); uint32_t ref1 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) ) ); sad_result = __vabsdiff4( img0, ref0 ); sad_result += __vabsdiff4( img1, ref1 ); //sad_result = __vvariance4( img0, ref0 ); //sad_result += __vvariance4( img1, ref1 ); */ uint32_t ref0 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) ) ); sad_result = __vabsdiff4( img0, ref0 ); diff[i][TID] = sad_result; } __syncthreads(); for (i=0; i<16; i++) { diff[TID ][i] += diff[TID ][i+16]; diff[TID+32][i] += diff[TID+32][i+16]; diff[TID+64][i] += diff[TID+64][i+16]; } __syncthreads(); for (i=0; i<8; i++) { diff[TID ][i] += diff[TID ][i+8]; diff[TID+32][i] += diff[TID+32][i+8]; diff[TID+64][i] += diff[TID+64][i+8]; } __syncthreads(); for (i=0; i<4; i++) { diff[TID ][i] += diff[TID ][i+4]; diff[TID+32][i] += diff[TID+32][i+4]; diff[TID+64][i] += diff[TID+64][i+4]; } __syncthreads(); diff[TID ][0] += (diff[TID ][1] + diff[TID ][2] + diff[TID ][3]); diff[TID+32][0] += (diff[TID+32][1] + diff[TID+32][2] + diff[TID+32][3]); diff[TID+64][0] += (diff[TID+64][1] + diff[TID+64][2] + diff[TID+64][3]); __syncthreads(); minpos_refin[TID] = 0; __syncthreads(); // 32 thread, ognuno ricerca il minimo lungo ogni colonna // anche se le colonne interessanti sono solo la 0 for( i = 1; i < 93; i++ ){ if ( diff[0][TID] > diff[i][TID] ) { diff[0][TID] = diff[i][TID]; minpos_refin[TID] = i; } } __syncthreads(); if( TID == 0 ) { MVs_g[MBoffset].as_mv.row += (short)(MV_lookup_refin_tex[ minpos_refin[0] ].row * 8); MVs_g[MBoffset].as_mv.col += (short)(MV_lookup_refin_tex[ minpos_refin[0] ].col * 8); } } inline void me_kernel_launch_tex( VP8_COMMON * const common, const hipTextureObject_t in_tex, const hipTextureObject_t ref_tex, int const streamID, int const split_on, int_mv * const MVs, int_mv * const MVs_split ) { #if CUDA_VERBOSE float elapsedTime; hipEvent_t start, stop; CHECK(hipEventCreate(&start)); CHECK(hipEventCreate(&stop)); CHECK(hipEventRecord(start)); #endif hipLaunchKernelGGL(( me_cuda_tex) , dim3(common->GPU.gridDim), dim3(common->GPU.blockDim), 0, common->GPU.streams.frame[streamID] , in_tex, ref_tex, streamID, common->GPU.streamSize, common->gpu_frame.stride, common->gpu_frame.width, common->gpu_frame.num_MB_width, split_on, MVs, MVs_split ); #if CUDA_VERBOSE CHECK(hipEventRecord(stop)); CHECK(hipEventSynchronize(stop)); CHECK(hipEventElapsedTime(&elapsedTime, start, stop)); printf("\n[GPU] ME elapsed time streams[%d]: %.4f ms\n",streamID,elapsedTime); CHECK(hipEventDestroy(start)); CHECK(hipEventDestroy(stop)); add_STATS((double)elapsedTime,0); #endif } void me_cuda_launch_interleaved_tex( VP8_COMMON * const cm, int fb_idx, int ref_frame_flags ) { //int MV_size_16 = 16*sizeof(int_mv); int MV_size_16 = cm->GPU.streamSize * sizeof(int_mv); // for printing informations about reference frame flags and their usage, I left a commented prinft at line 3625 // at the beginning of encode_frame_to_data_rate(..) in onyx_if.c for (int t = 0; t < cm->GPU.num_mb16th; t++) { int s = cm->GPU.streamLaunchOrder[t]; //int offset = 16*s; int offset = cm->GPU.streamSize * s; // bugfix per immagini il cui n di mb non e' divisibile per 16 // prima venivano lanciati troppi processi e hipMemcpyAsync andava a leggere oltre i limiti degli array if (offset + cm->GPU.streamSize > cm->gpu_frame.num_mv) MV_size_16 = ( offset + cm->GPU.streamSize - cm->gpu_frame.num_mv ) * sizeof( int_mv ); if ((ref_frame_flags & GPUFLAG_LAST_FRAME) && (cm->yv12_fb[cm->lst_fb_idx].flags & GPUFLAG_LAST_FRAME)) { me_kernel_launch_tex(cm, cm->gpu_frame.rawFbTex, (cm->gpu_frame.fbTex)[cm->lst_fb_idx], s, SPLITMV_ON, (cm->gpu_frame.MVs_g)[0], (cm->gpu_frame.MVs_split_g) ); CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[0][offset], &(cm->gpu_frame.MVs_g)[0][offset], MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s])); CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_split_h)[offset],&(cm->gpu_frame.MVs_split_g)[offset],24 * MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } // Se ref_frame_flags indica la presenza di un gold e se il flag del fb puntato da gld_fb_idx indica che e' gold, allora... if ((ref_frame_flags & GPUFLAG_GOLD_FRAME) && (cm->yv12_fb[cm->gld_fb_idx].flags & GPUFLAG_GOLD_FRAME)) { me_kernel_launch_tex(cm, cm->gpu_frame.rawFbTex, (cm->gpu_frame.fbTex)[cm->gld_fb_idx], s, SPLITMV_OFF, (cm->gpu_frame.MVs_g)[1], 0 ); CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[1][offset], &(cm->gpu_frame.MVs_g)[1][offset], MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s])); //CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_split_h)[1][offset],&(cm->gpu_frame.MVs_split_g)[1][offset],24 * MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } // Se ref_frame_flags indica la presenza di un altref e se il flag del fb puntato da alt_fb_idx indica che e' altref, allora... if ((ref_frame_flags & GPUFLAG_ALTR_FRAME) && (cm->yv12_fb[cm->alt_fb_idx].flags & GPUFLAG_ALTR_FRAME)) { me_kernel_launch_tex(cm, cm->gpu_frame.rawFbTex, (cm->gpu_frame.fbTex)[cm->alt_fb_idx], s, SPLITMV_OFF, (cm->gpu_frame.MVs_g)[2], 0 ); CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[2][offset], &(cm->gpu_frame.MVs_g)[2][offset], MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s])); //CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_split_h)[2][offset],&(cm->gpu_frame.MVs_split_g)[2][offset],24 * MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } } } #endif /* HAVE_CUDA_ENABLED_DEVICE */ #ifdef __cplusplus } #endif
584be07df2c00aa9bc456c47dbe1b232c9480f97.cu
/* Cuda accelerated motion estimation for VP8 libvpx encoder by Pietro Paglierani, Giuliano Grossi, Federico Pedersini and Alessandro Petrini for Italtel and Universita' degli Studi di Milano 2015-2016, Milano */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <wchar.h> #include <locale.h> #include "vpx_config.h" #include "cuda/typedef_cuda.h" #include "cuda/me_cuda.h" #ifdef __cplusplus extern "C" { #endif #if HAVE_CUDA_ENABLED_DEVICE __device__ __constant__ MV MV_16x12_lookup_tex[] = { {-12,-2}, {-12, 0}, {-12, 2}, // Unit: pixel {-10,-5}, {-10,-3}, {-10,-1}, {-10, 1}, {-10, 3}, {-10, 5}, {-8,-8}, {-8,-6}, {-8,-4}, {-8,-2}, {-8, 0}, {-8, 2}, {-8, 4}, {-8, 6}, {-8, 8}, {-6,-9}, {-6,-7}, {-6,-5}, {-6,-3}, {-6,-1}, {-6, 1}, {-6, 3}, {-6, 5}, {-6, 7}, {-6, 9}, {-4,-12}, {-4,-10}, {-4,-8}, {-4,-6}, {-4,-4}, {-4,-2}, {-4, 0}, {-4, 2}, {-4, 4}, {-4, 6}, {-4, 8}, {-4,10}, {-4,12}, {-2,-13}, {-2,-11}, {-2,-9}, {-2,-7}, {-2,-5}, {-2,-3}, {-2,-1}, {-2, 1}, {-2, 3}, {-2, 5}, {-2, 7}, {-2, 9}, {-2,11}, {-2,13}, {0,-16}, {0,-14}, {0,-12}, {0,-10}, {0,-8}, {0,-6}, {0,-4}, {0,-2}, {0, 0}, {0, 2}, {0, 4}, {0, 6}, {0, 8}, {0,10}, {0,12}, {0,14}, {0,16}, {2,-13}, {2,-11}, {2,-9}, {2,-7}, {2,-5}, {2,-3}, {2,-1}, {2, 1}, {2, 3}, {2, 5}, {2, 7}, {2, 9}, {2,11}, {2,13}, {4,-12}, {4,-10}, {4,-8}, {4,-6}, {4,-4}, {4,-2}, {4, 0}, {4, 2}, {4, 4}, {4, 6}, {4, 8}, {4,10}, {4,12}, {6,-9}, {6,-7}, {6,-5}, {6,-3}, {6,-1}, {6, 1}, {6, 3}, {6, 5}, {6, 7}, {6, 9}, {8,-8}, {8,-6}, {8,-4}, {8,-2}, {8, 0}, {8, 2}, {8, 4}, {8, 6}, {8, 8}, {10,-5}, {10,-3}, {10,-1}, {10, 1}, {10, 3}, {10, 5}, {12,-2}, {12, 0}, {12, 2}, {0, 0} }; // 127 + 1 candidati __device__ __constant__ MV_ref MV_lookup_refin_tex[] = { // Unit: pixel {-1.75,-0.25}, {-1.75, 0.25}, {-1.50,-0.50}, {-1.50, 0.00}, {-1.50, 0.50}, {-1.25,-0.75}, {-1.25,-0.25}, {-1.25, 0.25}, {-1.25, 0.75}, {-1.00,-1.00}, {-1.00,-0.50}, {-1.00, 0.00}, {-1.00, 0.50}, {-1.00, 1.00}, {-0.75,-1.25}, {-0.75,-0.75}, {-0.75,-0.25},{-0.75, 0.00},{-0.75, 0.25}, {-0.75, 0.75}, {-0.75, 1.25}, {-0.50,-1.50}, {-0.50,-1.00}, {-0.50,-0.50},{-0.50,-0.25},{-0.50, 0.00},{-0.50, 0.25},{-0.50, 0.50}, {-0.50, 1.00}, {-0.50, 1.50}, {-0.25,-1.75}, {-0.25,-1.25}, {-0.25,-0.75},{-0.25,-0.50},{-0.25,-0.25},{-0.25, 0.00},{-0.25, 0.25},{-0.25, 0.50},{-0.25, 0.75}, {-0.25, 1.25}, {-0.25, 1.75}, { 0.00,-1.50}, { 0.00,-1.00},{ 0.00,-0.75},{ 0.00,-0.50},{ 0.00,-0.25},{ 0.00, 0.00},{ 0.00, 0.25},{ 0.00, 0.50},{ 0.00, 0.75},{ 0.00, 1.00}, { 0.00, 1.50}, { 0.25,-1.75}, { 0.25,-1.25}, { 0.25,-0.75},{ 0.25,-0.50},{ 0.25,-0.25},{ 0.25, 0.00},{ 0.25, 0.25},{ 0.25, 0.50},{ 0.25, 0.75}, { 0.25, 1.25}, { 0.25, 1.75}, { 0.50,-1.50}, { 0.50,-1.00}, { 0.50,-0.50},{ 0.50,-0.25},{ 0.50, 0.00},{ 0.50, 0.25},{ 0.50, 0.50}, { 0.50, 1.00}, { 0.50, 1.50}, { 0.75,-1.25}, { 0.75,-0.75}, { 0.75,-0.25},{ 0.75, 0.00},{ 0.75, 0.25}, { 0.75, 0.75}, { 0.75, 1.25}, { 1.00,-1.00}, { 1.00,-0.50}, { 1.00, 0.00}, { 1.00, 0.50}, { 1.00, 1.00}, { 1.25,-0.75}, { 1.25,-0.25}, { 1.25, 0.25}, { 1.25, 0.75}, { 1.50,-0.50}, { 1.50, 0.00}, { 1.50, 0.50}, { 1.75,-0.25}, { 1.75, 0.25} }; // 93 candidati __inline__ __device__ uint32_t __vvariance4( uint32_t u, uint32_t v ) { uint32_t w = 0; asm volatile("{\ .reg .u32 t1;\ vabsdiff4.u32.u32.u32.sat t1, %1, %2, 0;\ vmad.u32.u32.u32.sat %0, t1.b0, t1.b0, 0;\ vmad.u32.u32.u32.sat %0, t1.b1, t1.b1, %0;\ vmad.u32.u32.u32.sat %0, t1.b2, t1.b2, %0;\ vmad.u32.u32.u32.sat %0, t1.b3, t1.b3, %0;}" : "=r"(w) : "r"(u), "r"(v)); return w; } __inline__ __device__ uint32_t __vabsdiff4( uint32_t u, uint32_t v ) { uint32_t w = 0; //asm volatile("vabsdiff4.u32.u32.u32.sat.add %0, %1, %2, %3;" : "=r"(w) : "r"(u), "r"(v), "r"(w)); //asm volatile("vabsdiff4.u32.u32.u32.sat.add %0, %1, %2, 0;" : "=r"(w) : "r"(u), "r"(v)); asm volatile("vabsdiff4.u32.u32.u32.add %0, %1, %2, %3;" : "=r"(w) : "r"(u), "r"(v), "r"(w)); //Solo compute 3.5 return w; } __global__ void me_cuda_tex ( const cudaTextureObject_t in_tex, const cudaTextureObject_t ref_tex, int const streamID, int const streamSize, int const stride, int const width, int const num_MB_width, int const split_on, int_mv * __restrict__ const MVs_g, int_mv * __restrict__ const MVs_split_g ) { __shared__ uint32_t diff[128][32]; __shared__ uint8_t minpos[32]; __shared__ uint8_t minpos_refin[32]; // configurazione di lancio: blocks per grid: 16 x 1 x 1 // threads per block: 4 x 8 x 1 int32_t TID = threadIdx.y * blockDim.x + threadIdx.x; // Thread Index (0..32) int32_t i, j; int32_t MBoffset = streamID * streamSize + blockIdx.x; int32_t blockX = MBoffset % num_MB_width; // colonna int32_t blockY = MBoffset / num_MB_width; // riga // Occhio: immagine di riferimento ha cornice (larghezza tot = stride) mentre immagine input no (largh tot = width) float intex_offset_x = 16 * blockX + 4 * threadIdx.x; float intex_offset_y = 16 * blockY + 2 * threadIdx.y; float reftex_offset_x = intex_offset_x + 32.0; float reftex_offset_y = intex_offset_y + 32.0; float saved_reftex_x = reftex_offset_x; float saved_reftex_y = reftex_offset_y; MV_ref iter_mv = {0.0,0.0}; /* uint32_t img0 = (uint32_t) ( ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 3.5, intex_offset_y + 0.5 ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 2.5, intex_offset_y + 0.5 ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 1.5, intex_offset_y + 0.5 ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 0.5, intex_offset_y + 0.5 ) * 255.0) ) ); uint32_t img1 = (uint32_t) ( ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 3.5, intex_offset_y + 1.5 ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 2.5, intex_offset_y + 1.5 ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 1.5, intex_offset_y + 1.5 ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 0.5, intex_offset_y + 1.5 ) * 255.0) ) ); */ uint32_t img0 = (uint32_t) ( ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 3.5, intex_offset_y + 1.5 ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 2.5, intex_offset_y + 0.5 ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 1.5, intex_offset_y + 1.5 ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( in_tex, intex_offset_x + 0.5, intex_offset_y + 0.5 ) * 255.0) ) ); #if HAVE_CUDA_MV_ITER int8_t maxloops = 3; while (maxloops > 0) { #endif /* Organizzazione dei thread all'interno del macroblocco. Ogni thread considera 4 pixel e i 4 immediatamente sottostanti. Accesso a memoria globale non e' ottimale (coalescenza a gruppi di quattro), ma questo schema permette di raggruppare le sad in somme parziali per calcolare tutte le splitmv. ╔══════════╦══════════╦══════════╦══════════╗ ║ TID 0 ║ TID 1 ║ TID 2 ║ TID 3 ║ ╟──────────╫──────────╫──────────╫──────────╢ ║ TID 4 ║ TID 5 ║ TID 6 ║ TID 7 ║ ╠══════════╬══════════╬══════════╬══════════╣ ║ TID 9 ║ TID 9 ║ TID 10 ║ TID 11 ║ ╟──────────╫──────────╫──────────╫──────────╢ ║ TID 12 ║ TID 13 ║ TID 14 ║ TID 15 ║ ╠══════════╬══════════╬══════════╬══════════╣ ║ TID 16 ║ TID 17 ║ TID 18 ║ TID 19 ║ ╟──────────╫──────────╫──────────╫──────────╢ ║ TID 20 ║ TID 21 ║ TID 22 ║ TID 23 ║ ╠══════════╬══════════╬══════════╬══════════╣ ║ TID 24 ║ TID 25 ║ TID 26 ║ TID 27 ║ ╟──────────╫──────────╫──────────╫──────────╢ ║ TID 28 ║ TID 29 ║ TID 30 ║ TID 31 ║ ╚══════════╩══════════╩══════════╩══════════╝ */ /* Calcolo delle sad, risultati memorizzati nella matrice diff. 32 32 TID = 32 sotto blocchi, ognuno contenente sad parziali / \ ┌───────────────┐ │ │ │ │ │ │ │ diff[128][32] │ 128 candidati mv │ │ │ │ │ │ └───────────────┘ Ogni thread si fa carico si un sottoblocco di 8 pixel e calcola la sad per ogni candidato mv */ for (i = 0; i < 128; i++){ int32_t sad_result; MV offset_mv = MV_16x12_lookup_tex[i]; /* uint32_t ref0 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 0.5 + offset_mv.row + iter_mv.row ) * 255) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 0.5 + offset_mv.row + iter_mv.row ) * 255) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 0.5 + offset_mv.row + iter_mv.row ) * 255) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 0.5 + offset_mv.row + iter_mv.row ) * 255) ) ); uint32_t ref1 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 1.5 + offset_mv.row + iter_mv.row ) * 255) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 1.5 + offset_mv.row + iter_mv.row ) * 255) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 1.5 + offset_mv.row + iter_mv.row ) * 255) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 1.5 + offset_mv.row + iter_mv.row ) * 255) ) ); sad_result = __vabsdiff4( img0, ref0 ); sad_result += __vabsdiff4( img1, ref1 ); */ //sad_result = __vvariance4( img0, ref0 ); //sad_result += __vvariance4( img1, ref1 ); uint32_t ref0 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 1.5 + offset_mv.row + iter_mv.row ) * 255) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 0.5 + offset_mv.row + iter_mv.row ) * 255) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 1.5 + offset_mv.row + iter_mv.row ) * 255) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col + iter_mv.col, reftex_offset_y + 0.5 + offset_mv.row + iter_mv.row ) * 255) ) ); sad_result = __vabsdiff4( img0, ref0 ); diff[i][TID] = sad_result; } __syncthreads(); // Accumulazione delle colonne di diff in modo da formare sad di blocchi per ogni candidato mv // Prima reduction, generazione 16 sad 4x4 // 0 1 2 3 | 8 9 10 11 | 16 17 18 19 | 24 25 26 27 <- j // ^ ^ ^ ^ | ^ ^ ^ ^ | ^ ^ ^ ^ | ^ ^ ^ ^ // 4 5 6 7 | 12 13 14 15 | 20 21 22 23 | 28 29 30 31 <- j + 4 for (i = 0; i < 16; i++) { j = i + (i / 4) * 4; diff[TID ][j] += diff[TID ][j+4]; diff[TID+32][j] += diff[TID+32][j+4]; diff[TID+64][j] += diff[TID+64][j+4]; diff[TID+96][j] += diff[TID+96][j+4]; } __syncthreads(); // Seconda reduction, generazione 4 sad 8x8 // 4 | 12 | 20 | 28 <- (8 * i) + 4 // ^ | ^ | ^ | ^ // 0 1 8 9 | 2 3 10 11 | 16 17 24 25 | 18 19 26 27 <- [j j+1 j+8 j+9] for (i = 0; i < 4; i++) { j = 2 * i + (i / 2) * 12; // genera 0, 2, 16, 18 per i = 0 .. 3 diff[TID ][(8 * i) + 4] = diff[TID ][j] + diff[TID ][j + 1] + diff[TID ][j + 8] + diff[TID ][j + 9]; diff[TID+32][(8 * i) + 4] = diff[TID+32][j] + diff[TID+32][j + 1] + diff[TID+32][j + 8] + diff[TID+32][j + 9]; diff[TID+64][(8 * i) + 4] = diff[TID+64][j] + diff[TID+64][j + 1] + diff[TID+64][j + 8] + diff[TID+64][j + 9]; diff[TID+96][(8 * i) + 4] = diff[TID+96][j] + diff[TID+96][j + 1] + diff[TID+96][j + 8] + diff[TID+96][j + 9]; } __syncthreads(); // Terza reduction (a), generazione 2 sad 8x16 // 8x16 // 22 | 30 <- 22 + (i * 8) // ^ | ^ // 4 20 | 12 28 for (i = 0; i < 2; i++) { j = 4 + (8 * i); // genera 4, 12 per i = 0..1 diff[TID ][22 + (i * 8)] = diff[TID ][j] + diff[TID ][j + 16]; diff[TID+32][22 + (i * 8)] = diff[TID+32][j] + diff[TID+32][j + 16]; diff[TID+64][22 + (i * 8)] = diff[TID+64][j] + diff[TID+64][j + 16]; diff[TID+96][22 + (i * 8)] = diff[TID+96][j] + diff[TID+96][j + 16]; } //__syncthreads(); // potrebbe non servire! // Terza reduction (b), generazione 2 sad 16x8 // 16x8 // 6 | 14 <- 6*(i+1) + 2*i = 8 * i + 6 // ^ | ^ // 4 12 | 20 28 <- [j j+8] for (i = 0; i < 2; i++) { j = 4 + (16 * i); // genera 4, 20 per i = 0..1 diff[TID ][8 * i + 6] = diff[TID ][j] + diff[TID ][j + 8]; diff[TID+32][8 * i + 6] = diff[TID+32][j] + diff[TID+32][j + 8]; diff[TID+64][8 * i + 6] = diff[TID+64][j] + diff[TID+64][j + 8]; diff[TID+96][8 * i + 6] = diff[TID+96][j] + diff[TID+96][j + 8]; } __syncthreads(); // Quarta reduction, generazione 1 sad 16x16 // 31 // ^ // 6 14 diff[TID ][31] = diff[TID ][6] + diff[TID ][14]; diff[TID+32][31] = diff[TID+32][6] + diff[TID+32][14]; diff[TID+64][31] = diff[TID+64][6] + diff[TID+64][14]; diff[TID+96][31] = diff[TID+96][6] + diff[TID+96][14]; __syncthreads(); // Ricerca del minimo di ogni colonna. A noi interessano 25 delle 32 colonne, // ma per non creare divergenza tra i thread eseguiamo la ricerca anche dove non serve minpos[TID] = 0; __syncthreads(); // 32 thread, ognuno ricerca il minimo lungo una colonna for( i = 1; i < 128; i++ ){ if ( diff[0][TID] > diff[i][TID] ) { diff[0][TID] = diff[i][TID]; minpos[TID] = i; } } #if HAVE_CUDA_MV_ITER if (fabsf(MV_16x12_lookup_tex[ minpos[31] ].row) + fabsf(MV_16x12_lookup_tex[ minpos[31] ].col) < 14) // forse era < 12... break; iter_mv.col += MV_16x12_lookup_tex[ minpos[31] ].col * 1.75; iter_mv.row += MV_16x12_lookup_tex[ minpos[31] ].row * 1.75; maxloops -= 1; } #endif // Salva mv 16x16 // Questo potrebbe essere fatto meglio, conj 25 thread che lavorano contemporaneamente, // ma devo studiare come indicizzare l'accesso alla matrice globale. C'ho voglia? if ( TID == 31 ) { MVs_g[MBoffset].as_mv.row = (short)((MV_16x12_lookup_tex[ minpos[TID] ].row + iter_mv.row) * 8); MVs_g[MBoffset].as_mv.col = (short)((MV_16x12_lookup_tex[ minpos[TID] ].col + iter_mv.col) * 8); } if (split_on == SPLITMV_ON) { // salva mv 4x4 if ( TID < 16 ) { MVs_split_g[MBoffset*24 + TID].as_mv.row = (short)((MV_16x12_lookup_tex[ minpos[TID + (TID / 4) * 4] ].row + iter_mv.row) * 8); MVs_split_g[MBoffset*24 + TID].as_mv.col = (short)((MV_16x12_lookup_tex[ minpos[TID + (TID / 4) * 4] ].col + iter_mv.col) * 8); } // salva mv 8x8 if ( TID < 4 ) { MVs_split_g[MBoffset*24 + 16 + TID].as_mv.row = (short)((MV_16x12_lookup_tex[ minpos[8 * TID + 4] ].row + iter_mv.row) * 8); MVs_split_g[MBoffset*24 + 16 + TID].as_mv.col = (short)((MV_16x12_lookup_tex[ minpos[8 * TID + 4] ].col + iter_mv.col) * 8); } // salva mv 8x16 e 16x8 if ( TID < 2 ) { MVs_split_g[MBoffset*24 + 20 + TID].as_mv.row = (short)((MV_16x12_lookup_tex[ minpos[8 * TID + 22] ].row + iter_mv.row) * 8); MVs_split_g[MBoffset*24 + 20 + TID].as_mv.col = (short)((MV_16x12_lookup_tex[ minpos[8 * TID + 22] ].col + iter_mv.col) * 8); MVs_split_g[MBoffset*24 + 22 + TID].as_mv.row = (short)((MV_16x12_lookup_tex[ minpos[8 * TID + 6] ].row + iter_mv.row) * 8); MVs_split_g[MBoffset*24 + 22 + TID].as_mv.col = (short)((MV_16x12_lookup_tex[ minpos[8 * TID + 6] ].col + iter_mv.col) * 8); } } __syncthreads(); /////////////////////////////////////////////////////////////////////////////////////////// // STEP 2: pixel-scale Motion Vector Search // 1. // Ricerca di un MV per ogni blocco 4x4 // 16 blocchi, 2 thread per blocco. Stesso schema per decidere TID => thread 0 e 4 fanno 1 blocco; 1 e 5 il secondo, ecc... // Risultati sad memorizzati in diff[i][TID] con 0 < i < 15 // Questa volta non possiamo piu' sfruttare che refptr punti alla stesso indice, quindi posso // calcolare contemporaneamente ogni sad per tid e accumulare, ma posso sfruttare il // parallelismo tra mv dello stesso tipo: prima calcolo in parall tutte le 4x4, poi le 8x8, ecc... if (split_on == SPLITMV_ON) { // Update refpointer al miglior mv j = (TID % 4) + (TID / 8) * 8; // Genera 0 1 2 3 0 1 2 3 8 9 10 11 8 9 10 11 16 17... // perche' TID 0 e 4 vengono traslati dello stesso mv corrispondente // a quello ora presente in colonna 0 di minpos reftex_offset_x += (MV_16x12_lookup_tex[minpos[j]].col + iter_mv.col); reftex_offset_y += (MV_16x12_lookup_tex[minpos[j]].row + iter_mv.row); for (i = 0; i < 93; i++) { int32_t sad_result; MV_ref offset_mv = MV_lookup_refin_tex[i]; /*uint32_t ref0 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) ) ); uint32_t ref1 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) ) ); sad_result = __vabsdiff4( img0, ref0 ); sad_result += __vabsdiff4( img1, ref1 ); //sad_result = __vvariance4( img0, ref0 ); //sad_result += __vvariance4( img1, ref1 ); */ uint32_t ref0 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) ) ); sad_result = __vabsdiff4( img0, ref0 ); diff[i][TID] = sad_result; } __syncthreads(); for (i = 0; i < 16; i++) { j = i + (i / 4) * 4; diff[TID ][j] += diff[TID ][j+4]; diff[TID+32][j] += diff[TID+32][j+4]; diff[TID+64][j] += diff[TID+64][j+4]; } minpos_refin[TID] = 0; __syncthreads(); for( i = 1; i < 93; i++ ){ if ( diff[0][TID] > diff[i][TID] ) { diff[0][TID] = diff[i][TID]; minpos_refin[TID] = i; } } __syncthreads(); // salva MV della split 4x4 if ( TID < 16 ) { MVs_split_g[MBoffset*24 + TID].as_mv.row += (short)(MV_lookup_refin_tex[ minpos_refin[TID + (TID / 4) * 4] ].row * 8); MVs_split_g[MBoffset*24 + TID].as_mv.col += (short)(MV_lookup_refin_tex[ minpos_refin[TID + (TID / 4) * 4] ].col * 8); } // 2. // Ricerca di un mv per ogni blocco 8x8 // Procedura esattamente identica alla precedente: TID che elaborano stesso blocco avranno // mv impostato coerentemente. Differente accumulazione (per blocco 0: TID 0 1 4 5 8 9 12 13) // Update refpointer al miglior mv //j = (TID / 8) * 8 + 4; // Genera 4 4 4 4 4 4 4 4 12 12 12 12 12 12 12 12 20 20 20 20... j = 8 * ((TID / 2) % 2) + 4 + 16 * (TID / 16); // Genera 4 4 12 12 4 4 12 12 4 4 12 12 4 4 12 12 20 20 28 28 20 20 28 ecc.. reftex_offset_x = saved_reftex_x + MV_16x12_lookup_tex[minpos[j]].col + iter_mv.col; reftex_offset_y = saved_reftex_y + MV_16x12_lookup_tex[minpos[j]].row + iter_mv.row; for (i = 0; i < 93; i++) { int32_t sad_result; MV_ref offset_mv = MV_lookup_refin_tex[i]; /* uint32_t ref0 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) ) ); uint32_t ref1 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) ) ); sad_result = __vabsdiff4( img0, ref0 ); sad_result += __vabsdiff4( img1, ref1 ); */ //sad_result = __vvariance4( img0, ref0 ); //sad_result += __vvariance4( img1, ref1 ); uint32_t ref0 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) ) ); sad_result = __vabsdiff4( img0, ref0 ); diff[i][TID] = sad_result; } __syncthreads(); // Sono pigro, copio e incollo la stessa manfrina for (i = 0; i < 16; i++) { j = i + (i / 4) * 4; diff[TID ][j] += diff[TID ][j+4]; diff[TID+32][j] += diff[TID+32][j+4]; diff[TID+64][j] += diff[TID+64][j+4]; } __syncthreads(); for (i = 0; i < 4; i++) { j = 2 * i + (i / 2) * 12; diff[TID ][(8 * i) + 4] = diff[TID ][j] + diff[TID ][j + 1] + diff[TID ][j + 8] + diff[TID] [j + 9]; diff[TID+32][(8 * i) + 4] = diff[TID+32][j] + diff[TID+32][j + 1] + diff[TID+32][j + 8] + diff[TID+32][j + 9]; diff[TID+64][(8 * i) + 4] = diff[TID+64][j] + diff[TID+64][j + 1] + diff[TID+64][j + 8] + diff[TID+64][j + 9]; } __syncthreads(); minpos_refin[TID] = 0; __syncthreads(); // 32 thread, ognuno ricerca il minimo lungo ogni colonna // anche se le colonne interessanti sono solo la 4, 12, 20 e 28 for( i = 1; i < 93; i++ ){ if ( diff[0][TID] > diff[i][TID] ) { diff[0][TID] = diff[i][TID]; minpos_refin[TID] = i; } } __syncthreads(); // Salva i MV della split 8x8 if ( TID < 4 ) { MVs_split_g[MBoffset*24 + 16 + TID].as_mv.row += (short)(MV_lookup_refin_tex[ minpos_refin[8 * TID + 4] ].row * 8); MVs_split_g[MBoffset*24 + 16 + TID].as_mv.col += (short)(MV_lookup_refin_tex[ minpos_refin[8 * TID + 4] ].col * 8); } // 4. // Ricerca di un mv per ogni blocco 8x16 // TODO? // Ho paura che diventi un'operazione un po' troppo onerosa, per ora preferisco lasciare // le 8x16 con dettaglio a due pixel (senza refining search) } // 5. // Refining search su blocco 16x16 // Update RefPointer to the best motion vector reftex_offset_x = saved_reftex_x + (MV_16x12_lookup_tex[minpos[31]]).col + iter_mv.col; reftex_offset_y = saved_reftex_y + (MV_16x12_lookup_tex[minpos[31]]).row + iter_mv.row; for (i = 0; i < 93; i++) { int32_t sad_result; MV_ref offset_mv = MV_lookup_refin_tex[i]; /* uint32_t ref0 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) ) ); uint32_t ref1 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) ) ); sad_result = __vabsdiff4( img0, ref0 ); sad_result += __vabsdiff4( img1, ref1 ); //sad_result = __vvariance4( img0, ref0 ); //sad_result += __vvariance4( img1, ref1 ); */ uint32_t ref0 = (uint32_t) ( ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 3.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 24) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 2.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) << 16) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 1.5 + offset_mv.col, reftex_offset_y + 1.5 + offset_mv.row ) * 255.0) << 8) | ( (uint8_t)(tex2D<float>( ref_tex, reftex_offset_x + 0.5 + offset_mv.col, reftex_offset_y + 0.5 + offset_mv.row ) * 255.0) ) ); sad_result = __vabsdiff4( img0, ref0 ); diff[i][TID] = sad_result; } __syncthreads(); for (i=0; i<16; i++) { diff[TID ][i] += diff[TID ][i+16]; diff[TID+32][i] += diff[TID+32][i+16]; diff[TID+64][i] += diff[TID+64][i+16]; } __syncthreads(); for (i=0; i<8; i++) { diff[TID ][i] += diff[TID ][i+8]; diff[TID+32][i] += diff[TID+32][i+8]; diff[TID+64][i] += diff[TID+64][i+8]; } __syncthreads(); for (i=0; i<4; i++) { diff[TID ][i] += diff[TID ][i+4]; diff[TID+32][i] += diff[TID+32][i+4]; diff[TID+64][i] += diff[TID+64][i+4]; } __syncthreads(); diff[TID ][0] += (diff[TID ][1] + diff[TID ][2] + diff[TID ][3]); diff[TID+32][0] += (diff[TID+32][1] + diff[TID+32][2] + diff[TID+32][3]); diff[TID+64][0] += (diff[TID+64][1] + diff[TID+64][2] + diff[TID+64][3]); __syncthreads(); minpos_refin[TID] = 0; __syncthreads(); // 32 thread, ognuno ricerca il minimo lungo ogni colonna // anche se le colonne interessanti sono solo la 0 for( i = 1; i < 93; i++ ){ if ( diff[0][TID] > diff[i][TID] ) { diff[0][TID] = diff[i][TID]; minpos_refin[TID] = i; } } __syncthreads(); if( TID == 0 ) { MVs_g[MBoffset].as_mv.row += (short)(MV_lookup_refin_tex[ minpos_refin[0] ].row * 8); MVs_g[MBoffset].as_mv.col += (short)(MV_lookup_refin_tex[ minpos_refin[0] ].col * 8); } } inline void me_kernel_launch_tex( VP8_COMMON * const common, const cudaTextureObject_t in_tex, const cudaTextureObject_t ref_tex, int const streamID, int const split_on, int_mv * const MVs, int_mv * const MVs_split ) { #if CUDA_VERBOSE float elapsedTime; cudaEvent_t start, stop; CHECK(cudaEventCreate(&start)); CHECK(cudaEventCreate(&stop)); CHECK(cudaEventRecord(start)); #endif me_cuda_tex <<< common->GPU.gridDim, common->GPU.blockDim, 0, common->GPU.streams.frame[streamID] >>> (in_tex, ref_tex, streamID, common->GPU.streamSize, common->gpu_frame.stride, common->gpu_frame.width, common->gpu_frame.num_MB_width, split_on, MVs, MVs_split ); #if CUDA_VERBOSE CHECK(cudaEventRecord(stop)); CHECK(cudaEventSynchronize(stop)); CHECK(cudaEventElapsedTime(&elapsedTime, start, stop)); printf("\n[GPU] ME elapsed time streams[%d]: %.4f ms\n",streamID,elapsedTime); CHECK(cudaEventDestroy(start)); CHECK(cudaEventDestroy(stop)); add_STATS((double)elapsedTime,0); #endif } void me_cuda_launch_interleaved_tex( VP8_COMMON * const cm, int fb_idx, int ref_frame_flags ) { //int MV_size_16 = 16*sizeof(int_mv); int MV_size_16 = cm->GPU.streamSize * sizeof(int_mv); // for printing informations about reference frame flags and their usage, I left a commented prinft at line 3625 // at the beginning of encode_frame_to_data_rate(..) in onyx_if.c for (int t = 0; t < cm->GPU.num_mb16th; t++) { int s = cm->GPU.streamLaunchOrder[t]; //int offset = 16*s; int offset = cm->GPU.streamSize * s; // bugfix per immagini il cui n di mb non e' divisibile per 16 // prima venivano lanciati troppi processi e cudaMemcpyAsync andava a leggere oltre i limiti degli array if (offset + cm->GPU.streamSize > cm->gpu_frame.num_mv) MV_size_16 = ( offset + cm->GPU.streamSize - cm->gpu_frame.num_mv ) * sizeof( int_mv ); if ((ref_frame_flags & GPUFLAG_LAST_FRAME) && (cm->yv12_fb[cm->lst_fb_idx].flags & GPUFLAG_LAST_FRAME)) { me_kernel_launch_tex(cm, cm->gpu_frame.rawFbTex, (cm->gpu_frame.fbTex)[cm->lst_fb_idx], s, SPLITMV_ON, (cm->gpu_frame.MVs_g)[0], (cm->gpu_frame.MVs_split_g) ); CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[0][offset], &(cm->gpu_frame.MVs_g)[0][offset], MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s])); CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_split_h)[offset],&(cm->gpu_frame.MVs_split_g)[offset],24 * MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } // Se ref_frame_flags indica la presenza di un gold e se il flag del fb puntato da gld_fb_idx indica che e' gold, allora... if ((ref_frame_flags & GPUFLAG_GOLD_FRAME) && (cm->yv12_fb[cm->gld_fb_idx].flags & GPUFLAG_GOLD_FRAME)) { me_kernel_launch_tex(cm, cm->gpu_frame.rawFbTex, (cm->gpu_frame.fbTex)[cm->gld_fb_idx], s, SPLITMV_OFF, (cm->gpu_frame.MVs_g)[1], 0 ); CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[1][offset], &(cm->gpu_frame.MVs_g)[1][offset], MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s])); //CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_split_h)[1][offset],&(cm->gpu_frame.MVs_split_g)[1][offset],24 * MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } // Se ref_frame_flags indica la presenza di un altref e se il flag del fb puntato da alt_fb_idx indica che e' altref, allora... if ((ref_frame_flags & GPUFLAG_ALTR_FRAME) && (cm->yv12_fb[cm->alt_fb_idx].flags & GPUFLAG_ALTR_FRAME)) { me_kernel_launch_tex(cm, cm->gpu_frame.rawFbTex, (cm->gpu_frame.fbTex)[cm->alt_fb_idx], s, SPLITMV_OFF, (cm->gpu_frame.MVs_g)[2], 0 ); CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[2][offset], &(cm->gpu_frame.MVs_g)[2][offset], MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s])); //CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_split_h)[2][offset],&(cm->gpu_frame.MVs_split_g)[2][offset],24 * MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } } } #endif /* HAVE_CUDA_ENABLED_DEVICE */ #ifdef __cplusplus } #endif
1997a49b5ffc8477f241875ab6f443cb7b17440b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include <hip/hip_fp16.h> #include "core/providers/cuda/cu_inc/common.cuh" #include "contrib_ops/cuda/math/isfinite.cuh" namespace onnxruntime { namespace cuda { template <typename TSrc> __global__ void _IsFinite(const TSrc* input, bool* output, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); output[id] = IsFiniteScalar(input[id]); } template <typename TSrc> void IsFinite(hipStream_t stream, const TSrc* input, bool* output, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); hipLaunchKernelGGL(( _IsFinite), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, input, output, N); } #define SPECIALIZE_ISFINITE_IMPL(T) \ template void IsFinite(hipStream_t stream, const T* input, bool* output, size_t count); SPECIALIZE_ISFINITE_IMPL(half) SPECIALIZE_ISFINITE_IMPL(float) SPECIALIZE_ISFINITE_IMPL(double) } // namespace cuda } // namespace onnxruntime
1997a49b5ffc8477f241875ab6f443cb7b17440b.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include <cuda_fp16.h> #include "core/providers/cuda/cu_inc/common.cuh" #include "contrib_ops/cuda/math/isfinite.cuh" namespace onnxruntime { namespace cuda { template <typename TSrc> __global__ void _IsFinite(const TSrc* input, bool* output, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); output[id] = IsFiniteScalar(input[id]); } template <typename TSrc> void IsFinite(cudaStream_t stream, const TSrc* input, bool* output, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); _IsFinite<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(input, output, N); } #define SPECIALIZE_ISFINITE_IMPL(T) \ template void IsFinite(cudaStream_t stream, const T* input, bool* output, size_t count); SPECIALIZE_ISFINITE_IMPL(half) SPECIALIZE_ISFINITE_IMPL(float) SPECIALIZE_ISFINITE_IMPL(double) } // namespace cuda } // namespace onnxruntime
b8387fca2ecb44b237c1031ca23732132828ac64.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <string.h> #include "devicedefine.h" #define Rol(word,bits) (((word) << (bits)) | ((word) >> (32-(bits)))) #define Ror(word,bits) (((word) >> (bits)) | ((word) << (32-(bits)))) __device__ inline unsigned long f(unsigned long B,unsigned long C,unsigned long D, int t) { if (t < 20) { return ((B & C)|((~B) & D)); } else if ((t > 19) && (t < 40)) { return (B ^ C ^ D); } else if ((t > 39) && (t < 60)) { return ((B & C)|(B & D)|(C & D)); } else if (t > 59) { return (B ^ C ^ D); } else return 0; } __device__ inline void SHA1(int s[],int ln, unsigned long *H) { unsigned long K[80]; unsigned long A,B,C,D,E,TEMP; int r,k; int ln_static = ln; H[0]=0x67452301; H[1]=0xefcdab89; H[2]=0x98badcfe; H[3]=0x10325476; H[4]=0xc3d2e1f0; r = (ln+1)/64; //r la so khoi chia message(salt) thanh cac khoi co do dai 512bit hay 64byte //kiem tra neu phan du chia cho 64 lon hon 54 thi tang them mot khoi nua, neu //khong thi thoi if (((ln+1)-r*64) > 56) r=r+1; // initialize Constants for(int t=0; t<80; t++) { if (t<20) { K[t] = 0x5a827999; } if ((t>19)&&(t<40)) { K[t] = 0x6ED9EBA1; } if ((t>39)&&(t<60)) { K[t] = 0x8F1BBCDC; } if (t>59) { K[t] = 0xca62c1d6; } } //Lap lai phep xu ly cho moi khoi duoc chia for(int l=0; l <= r; l++) { unsigned long W[80]; for (int i=0; i<80; i++) W[i]=0; //Initialize Text for (int i=0; i<16; i++) { for(int j=0; j<4; j++) { if (4*i+j <= ln) { k = s[64*l+4*i+j]; } else { k = 0; } if (k<0) { k = k +256; } if (4*i+j == ln) { k = 0x80; } int temp=1; for (int z=0;z<3-j;z++) temp*=256; W[i]+=k*temp; } } if ((W[14]==0)&&(W[15]==0)) { W[15]=8*ln_static; } // Hash Cycle for (int t = 16; t <80; t++) { W[t] = Rol(W[t-3]^W[t-8]^W[t-14]^W[t-16],1); } A = H[0]; B = H[1]; C = H[2]; D = H[3]; E = H[4]; for(int t = 0; t < 80; t++) { TEMP = Rol(A,5) + f(B,C,D,t) + E + W[t] + K[t]; E = D; D = C; C = Rol(B,30); B = A; A = TEMP; } H[0] = H[0] + A; H[1] = H[1] + B; H[2] = H[2] + C; H[3] = H[3] + D; H[4] = H[4] + E; ln -= 64; } } __device__ inline void HMAC(int S[],int saltLen, int P[],int passLen,unsigned long *H) { int s[160]; unsigned long Key[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; unsigned long X[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; unsigned long Y[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; int k,i1,j1; //Process string key into sub-key //Hash key in case it is less than 64 bytes for (int i=0;i<160;i++) s[i]=0; if (passLen > 64) { SHA1(P,passLen,H); Key[0] = H[0]; Key[1] = H[1]; Key[2] = H[2]; Key[3] = H[3]; Key[4] = H[4]; } else { for(int i=0; i<16; i++) { Key[i]=s[i]=0; for(int j=0; j<4; j++) { if (4*i+j < passLen) { k = P[4*i+j]; } else { k = 0; } if (k<0) { k = k + 256; } int temp=1; for (int z=0;z<3-j;z++) temp*=256; Key[i]+=k*temp; } } } for(int i=0; i<16; i++) { X[i] = Key[i]^0x36363636; Y[i] = Key[i]^0x5c5c5c5c; } //Turn X-Array into a String unsigned long X1[16]; for (int i=0; i<16;i++) X1[i]=X[i]; i1=0; for(int i=0; i<16; i++) { for(int j=0; j<4; j++) { X[i] = X1[i]; s[i1]= (X[i] >> 8*(3-j)) % 256; i1++; } } for(j1=i1; j1<(saltLen+i1); j1++) { s[j1] = S[j1-i1]; } SHA1(s,saltLen+i1,H); for(j1=0; j1 < i1 + saltLen; j1++) { s[j1] = 0; } //Turn Y-Array into a String unsigned long Y1[16]; for (int i=0; i<16;i++) Y1[i]=Y[i]; i1=0; for(int i=0; i<16; i++) { for(int j=0; j<4; j++) { Y[i]=Y1[i]; s[i1]= (Y[i] >> 8*(3-j)) % 256; i1++; } } unsigned long H1[5]; for(int i=0; i<5; i++) H1[i]=H[i]; //Append Hashed X-Array to Y-Array in string for(int i=0; i<5; i++) { for(int j=0; j<4; j++) { H1[i]=H[i]; s[i1]= (H1[i] >> 8*(3-j)) % 256; i1++; } } //Hash final concatenated string SHA1(s,i1,H); } __device__ inline int PBKDF2(int S[],int saltLen,int stored_pvv[],int dkLen,int *P, int passLen, unsigned char *Key) { unsigned long T[17]={0},H[5]; int U[20]={0}; unsigned long L[5]={0,0,0,0,0}; int i1, j1; dkLen = 2*dkLen + 2; int l = (dkLen/20)+1; for(int i=1; i<=l; i++) { for(i1 =0; i1 < saltLen; i1++) U[i1] = S[i1]; U[i1++]=0x00; U[i1++]=0x00; U[i1++]=0x00; U[i1++]=i; //U chinh la S trong truong hop nay L[0] = L[1] = L[2] = L[3] = L[4] = 0; for(int j=1; j<=1000; j++) { HMAC(U,i1,P,passLen,H); L[0] = L[0]^H[0]; L[1] = L[1]^H[1]; L[2] = L[2]^H[2]; L[3] = L[3]^H[3]; L[4] = L[4]^H[4]; for(j1= 0; j1 < i1; j1 ++) U[j1] = 0; i1 =0; for(int x=0; x<5; x++) { for(int y=0; y<4; y++) { U[i1]= (H[x] >> 8*(3-y)) % 256; i1++; } } } T[5*(i-1)] = L[0]; T[5*(i-1)+1] = L[1]; T[5*(i-1)+2] = L[2]; T[5*(i-1)+3] = L[3]; T[5*(i-1)+4] = L[4]; } //Lay ra de kiem tra password verification value i1= (dkLen -2) /4; if ((stored_pvv[0] == ((T[i1] >> 24)&0x000000FF)) && (stored_pvv[1] == ((T[i1] >> 16) & 0x000000FF)) ) { for (int i=0;i<4;i++) { Key[4*i ]=(unsigned char)(T[i]>>24); Key[4*i+1]=(unsigned char)(T[i]>>16); Key[4*i+2]=(unsigned char)(T[i]>>8); Key[4*i+3]=(unsigned char)(T[i]); } return 1; } else return 0; } /*Ham xu ly sinh mat khau dung tu dien*/ __device__ int d_strlen(char *str) { int len = 0; for(int i=0; ; i++) { if(str[i] != '\0' && str[i] != '\n') len++; else break; } return len; } __device__ int d_atoi(char *str) { int a[10] = {0,1,2,3,4,5,6,7,8,9}; int result = 0; for(int i =0; i< d_strlen(str); i++) { result = result + a[str[i] - 48]*(int)__powf(10, d_strlen(str) - i - 1); } return result; } __device__ void d_strcpy(char *str1, char *str2) { for(int i=0; i< d_strlen(str2); i++) { str1[i] = str2[i]; } str1[d_strlen(str2)] = '\0'; } __device__ void d_strcat(char *str1, char *str2) { int len = d_strlen(str1); for(int i=0; i< d_strlen(str2); i++) { str1[len + i] = str2[i]; } str1[len + d_strlen(str2)] = '\0'; } __device__ int d_strcmp(char *str1, char *str2) { int len = d_strlen(str2); int result = 1; for(int i=0; i< len; i++) { if (str1[i] == str2[i]) result = 1; else { result = 0; break; } } return result; } __global__ void RunKernel(int base,char *devPass, char *devPtr, int width, int qualities,int wordCount,char *pre_terminal,int pre_terminal_len, int *salt, int saltLen, int *stored_pvv, int dkLen,unsigned char *data,unsigned int len, unsigned char *out, unsigned char extension, unsigned char *Key,int* ret,fcrypt_ctx *zcx, aes_ctx *encr_ctx,z_stream *strm,struct inflate_state FAR *d_state) { int index = 0,ok=1; int begin; int end; char part1[20]=""; char part2[20]=""; char part3[20]=""; char password[60] = ""; char number_characters[5]=""; unsigned char tmp_Key[16]; int k1,k2,k3; const unsigned int id = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; unsigned char PDF[7]={0x25,0x50,0x44,0x46,0x2d,0x31,0x2e}; unsigned char DOC[14]={0xd0,0xcf,0x11,0xe0,0xa1,0xb1,0x1a,0xe1,0x00,0x00,0x00,0x00,0x00,0x00}; //pre_terminal duoc chia thanh ba phan, trong do part2 la can duoc ghep voi //tu co nghia trong tu dien. Part1 va part3 giua nguyen. //Luu vi tri phan chia for(int i= pre_terminal_len -1; i>=0; i--) { if(pre_terminal[i] == 'H') { index++; if( ((index + 1)/2 == qualities) && (index % 2 == 1)) { end = i; } if( ((index/2)== qualities) && (index % 2 == 0)) { begin = i; break; } } } //Bat dau chia k1=k2=k3=0; for(int i=0; i< pre_terminal_len; i++) { if(i< begin) { part1[k1++] = pre_terminal[i]; } else if(i>= begin && i<= end) { part2[k2++] = pre_terminal[i]; } else { part3[k3++] = pre_terminal[i]; } } part1[k1] = '\0'; part2[k2] = '\0'; part3[k3] = '\0'; //Lay ra do dai xau can chen. index = 0; for(int i=0; i< k2;i++) { if(part2[i] >= '0' && part2[i] <='9') { number_characters[index++] = part2[i]; } } number_characters[index] = '\0'; if (base + id <= wordCount) { d_strcpy((char*)devPass+ id*width, ""); //Lay ve buffer, chua mot tu char *buffer = (char*)((char*)devPtr + (base + id)*width); //Chinh la row if(d_strlen(buffer) == d_atoi(number_characters)) { for(int g=0; g<60; g++) password[g] = '\0'; d_strcpy(password,part1); d_strcat(password,buffer); d_strcat(password,part3); if(qualities == 1) { int myPass[60]; for(int kk=0; kk< d_strlen(password); kk++) myPass[kk] = password[kk]; if (PBKDF2(salt,saltLen,stored_pvv,dkLen,myPass,d_strlen(password),tmp_Key)) { // for (int kk=0;kk<16;kk++) (Key+id*16)[kk]=tmp_Key[kk]; // ret[id]=id; //giai ma aes_set_encrypt_key(tmp_Key, KEY_LENGTH(1), encr_ctx+id); encr_data(data+id*CHUNK,len,zcx+id,encr_ctx+id); //giai nen (strm+id)->avail_in = 0; (strm+id)->next_in = Z_NULL; (void)inflateInit2(strm+id,-13,d_state+id); (strm+id)->avail_in = len; (strm+id)->next_in = data+id*CHUNK; (strm+id)->avail_out = CHUNK; (strm+id)->next_out = out + id*CHUNK; (void)inflate(strm+id, Z_NO_FLUSH,d_state+id); //nhan dang switch(extension) { case dotpdf: for (int i=0;i<7;i++) if (PDF[i]!=(out+id*CHUNK)[i]) ok=0; break; case dotdoc: for (int i=0;i<14;i++) if (DOC[i]!=(out+id*CHUNK)[i]) ok=0; break; case dottxt: for (int i=0;i<len;i++) if (((out+id*CHUNK)[i] < 0x20) || ((out+id*CHUNK)[i] > 0x7E)) ok=0; break; default: ok=0; break; } //lay ket qua dung if (ok) d_strcpy((char*)devPass+ ((unsigned int)id)*width,password); } } else { //giam qualities va goi de quy. } } } } /*Ket thuc phan them*/
b8387fca2ecb44b237c1031ca23732132828ac64.cu
#include <string.h> #include "devicedefine.h" #define Rol(word,bits) (((word) << (bits)) | ((word) >> (32-(bits)))) #define Ror(word,bits) (((word) >> (bits)) | ((word) << (32-(bits)))) __device__ inline unsigned long f(unsigned long B,unsigned long C,unsigned long D, int t) { if (t < 20) { return ((B & C)|((~B) & D)); } else if ((t > 19) && (t < 40)) { return (B ^ C ^ D); } else if ((t > 39) && (t < 60)) { return ((B & C)|(B & D)|(C & D)); } else if (t > 59) { return (B ^ C ^ D); } else return 0; } __device__ inline void SHA1(int s[],int ln, unsigned long *H) { unsigned long K[80]; unsigned long A,B,C,D,E,TEMP; int r,k; int ln_static = ln; H[0]=0x67452301; H[1]=0xefcdab89; H[2]=0x98badcfe; H[3]=0x10325476; H[4]=0xc3d2e1f0; r = (ln+1)/64; //r la so khoi chia message(salt) thanh cac khoi co do dai 512bit hay 64byte //kiem tra neu phan du chia cho 64 lon hon 54 thi tang them mot khoi nua, neu //khong thi thoi if (((ln+1)-r*64) > 56) r=r+1; // initialize Constants for(int t=0; t<80; t++) { if (t<20) { K[t] = 0x5a827999; } if ((t>19)&&(t<40)) { K[t] = 0x6ED9EBA1; } if ((t>39)&&(t<60)) { K[t] = 0x8F1BBCDC; } if (t>59) { K[t] = 0xca62c1d6; } } //Lap lai phep xu ly cho moi khoi duoc chia for(int l=0; l <= r; l++) { unsigned long W[80]; for (int i=0; i<80; i++) W[i]=0; //Initialize Text for (int i=0; i<16; i++) { for(int j=0; j<4; j++) { if (4*i+j <= ln) { k = s[64*l+4*i+j]; } else { k = 0; } if (k<0) { k = k +256; } if (4*i+j == ln) { k = 0x80; } int temp=1; for (int z=0;z<3-j;z++) temp*=256; W[i]+=k*temp; } } if ((W[14]==0)&&(W[15]==0)) { W[15]=8*ln_static; } // Hash Cycle for (int t = 16; t <80; t++) { W[t] = Rol(W[t-3]^W[t-8]^W[t-14]^W[t-16],1); } A = H[0]; B = H[1]; C = H[2]; D = H[3]; E = H[4]; for(int t = 0; t < 80; t++) { TEMP = Rol(A,5) + f(B,C,D,t) + E + W[t] + K[t]; E = D; D = C; C = Rol(B,30); B = A; A = TEMP; } H[0] = H[0] + A; H[1] = H[1] + B; H[2] = H[2] + C; H[3] = H[3] + D; H[4] = H[4] + E; ln -= 64; } } __device__ inline void HMAC(int S[],int saltLen, int P[],int passLen,unsigned long *H) { int s[160]; unsigned long Key[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; unsigned long X[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; unsigned long Y[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; int k,i1,j1; //Process string key into sub-key //Hash key in case it is less than 64 bytes for (int i=0;i<160;i++) s[i]=0; if (passLen > 64) { SHA1(P,passLen,H); Key[0] = H[0]; Key[1] = H[1]; Key[2] = H[2]; Key[3] = H[3]; Key[4] = H[4]; } else { for(int i=0; i<16; i++) { Key[i]=s[i]=0; for(int j=0; j<4; j++) { if (4*i+j < passLen) { k = P[4*i+j]; } else { k = 0; } if (k<0) { k = k + 256; } int temp=1; for (int z=0;z<3-j;z++) temp*=256; Key[i]+=k*temp; } } } for(int i=0; i<16; i++) { X[i] = Key[i]^0x36363636; Y[i] = Key[i]^0x5c5c5c5c; } //Turn X-Array into a String unsigned long X1[16]; for (int i=0; i<16;i++) X1[i]=X[i]; i1=0; for(int i=0; i<16; i++) { for(int j=0; j<4; j++) { X[i] = X1[i]; s[i1]= (X[i] >> 8*(3-j)) % 256; i1++; } } for(j1=i1; j1<(saltLen+i1); j1++) { s[j1] = S[j1-i1]; } SHA1(s,saltLen+i1,H); for(j1=0; j1 < i1 + saltLen; j1++) { s[j1] = 0; } //Turn Y-Array into a String unsigned long Y1[16]; for (int i=0; i<16;i++) Y1[i]=Y[i]; i1=0; for(int i=0; i<16; i++) { for(int j=0; j<4; j++) { Y[i]=Y1[i]; s[i1]= (Y[i] >> 8*(3-j)) % 256; i1++; } } unsigned long H1[5]; for(int i=0; i<5; i++) H1[i]=H[i]; //Append Hashed X-Array to Y-Array in string for(int i=0; i<5; i++) { for(int j=0; j<4; j++) { H1[i]=H[i]; s[i1]= (H1[i] >> 8*(3-j)) % 256; i1++; } } //Hash final concatenated string SHA1(s,i1,H); } __device__ inline int PBKDF2(int S[],int saltLen,int stored_pvv[],int dkLen,int *P, int passLen, unsigned char *Key) { unsigned long T[17]={0},H[5]; int U[20]={0}; unsigned long L[5]={0,0,0,0,0}; int i1, j1; dkLen = 2*dkLen + 2; int l = (dkLen/20)+1; for(int i=1; i<=l; i++) { for(i1 =0; i1 < saltLen; i1++) U[i1] = S[i1]; U[i1++]=0x00; U[i1++]=0x00; U[i1++]=0x00; U[i1++]=i; //U chinh la S trong truong hop nay L[0] = L[1] = L[2] = L[3] = L[4] = 0; for(int j=1; j<=1000; j++) { HMAC(U,i1,P,passLen,H); L[0] = L[0]^H[0]; L[1] = L[1]^H[1]; L[2] = L[2]^H[2]; L[3] = L[3]^H[3]; L[4] = L[4]^H[4]; for(j1= 0; j1 < i1; j1 ++) U[j1] = 0; i1 =0; for(int x=0; x<5; x++) { for(int y=0; y<4; y++) { U[i1]= (H[x] >> 8*(3-y)) % 256; i1++; } } } T[5*(i-1)] = L[0]; T[5*(i-1)+1] = L[1]; T[5*(i-1)+2] = L[2]; T[5*(i-1)+3] = L[3]; T[5*(i-1)+4] = L[4]; } //Lay ra de kiem tra password verification value i1= (dkLen -2) /4; if ((stored_pvv[0] == ((T[i1] >> 24)&0x000000FF)) && (stored_pvv[1] == ((T[i1] >> 16) & 0x000000FF)) ) { for (int i=0;i<4;i++) { Key[4*i ]=(unsigned char)(T[i]>>24); Key[4*i+1]=(unsigned char)(T[i]>>16); Key[4*i+2]=(unsigned char)(T[i]>>8); Key[4*i+3]=(unsigned char)(T[i]); } return 1; } else return 0; } /*Ham xu ly sinh mat khau dung tu dien*/ __device__ int d_strlen(char *str) { int len = 0; for(int i=0; ; i++) { if(str[i] != '\0' && str[i] != '\n') len++; else break; } return len; } __device__ int d_atoi(char *str) { int a[10] = {0,1,2,3,4,5,6,7,8,9}; int result = 0; for(int i =0; i< d_strlen(str); i++) { result = result + a[str[i] - 48]*(int)__powf(10, d_strlen(str) - i - 1); } return result; } __device__ void d_strcpy(char *str1, char *str2) { for(int i=0; i< d_strlen(str2); i++) { str1[i] = str2[i]; } str1[d_strlen(str2)] = '\0'; } __device__ void d_strcat(char *str1, char *str2) { int len = d_strlen(str1); for(int i=0; i< d_strlen(str2); i++) { str1[len + i] = str2[i]; } str1[len + d_strlen(str2)] = '\0'; } __device__ int d_strcmp(char *str1, char *str2) { int len = d_strlen(str2); int result = 1; for(int i=0; i< len; i++) { if (str1[i] == str2[i]) result = 1; else { result = 0; break; } } return result; } __global__ void RunKernel(int base,char *devPass, char *devPtr, int width, int qualities,int wordCount,char *pre_terminal,int pre_terminal_len, int *salt, int saltLen, int *stored_pvv, int dkLen,unsigned char *data,unsigned int len, unsigned char *out, unsigned char extension, unsigned char *Key,int* ret,fcrypt_ctx *zcx, aes_ctx *encr_ctx,z_stream *strm,struct inflate_state FAR *d_state) { int index = 0,ok=1; int begin; int end; char part1[20]=""; char part2[20]=""; char part3[20]=""; char password[60] = ""; char number_characters[5]=""; unsigned char tmp_Key[16]; int k1,k2,k3; const unsigned int id = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; unsigned char PDF[7]={0x25,0x50,0x44,0x46,0x2d,0x31,0x2e}; unsigned char DOC[14]={0xd0,0xcf,0x11,0xe0,0xa1,0xb1,0x1a,0xe1,0x00,0x00,0x00,0x00,0x00,0x00}; //pre_terminal duoc chia thanh ba phan, trong do part2 la can duoc ghep voi //tu co nghia trong tu dien. Part1 va part3 giua nguyen. //Luu vi tri phan chia for(int i= pre_terminal_len -1; i>=0; i--) { if(pre_terminal[i] == 'H') { index++; if( ((index + 1)/2 == qualities) && (index % 2 == 1)) { end = i; } if( ((index/2)== qualities) && (index % 2 == 0)) { begin = i; break; } } } //Bat dau chia k1=k2=k3=0; for(int i=0; i< pre_terminal_len; i++) { if(i< begin) { part1[k1++] = pre_terminal[i]; } else if(i>= begin && i<= end) { part2[k2++] = pre_terminal[i]; } else { part3[k3++] = pre_terminal[i]; } } part1[k1] = '\0'; part2[k2] = '\0'; part3[k3] = '\0'; //Lay ra do dai xau can chen. index = 0; for(int i=0; i< k2;i++) { if(part2[i] >= '0' && part2[i] <='9') { number_characters[index++] = part2[i]; } } number_characters[index] = '\0'; if (base + id <= wordCount) { d_strcpy((char*)devPass+ id*width, ""); //Lay ve buffer, chua mot tu char *buffer = (char*)((char*)devPtr + (base + id)*width); //Chinh la row if(d_strlen(buffer) == d_atoi(number_characters)) { for(int g=0; g<60; g++) password[g] = '\0'; d_strcpy(password,part1); d_strcat(password,buffer); d_strcat(password,part3); if(qualities == 1) { int myPass[60]; for(int kk=0; kk< d_strlen(password); kk++) myPass[kk] = password[kk]; if (PBKDF2(salt,saltLen,stored_pvv,dkLen,myPass,d_strlen(password),tmp_Key)) { // for (int kk=0;kk<16;kk++) (Key+id*16)[kk]=tmp_Key[kk]; // ret[id]=id; //giai ma aes_set_encrypt_key(tmp_Key, KEY_LENGTH(1), encr_ctx+id); encr_data(data+id*CHUNK,len,zcx+id,encr_ctx+id); //giai nen (strm+id)->avail_in = 0; (strm+id)->next_in = Z_NULL; (void)inflateInit2(strm+id,-13,d_state+id); (strm+id)->avail_in = len; (strm+id)->next_in = data+id*CHUNK; (strm+id)->avail_out = CHUNK; (strm+id)->next_out = out + id*CHUNK; (void)inflate(strm+id, Z_NO_FLUSH,d_state+id); //nhan dang switch(extension) { case dotpdf: for (int i=0;i<7;i++) if (PDF[i]!=(out+id*CHUNK)[i]) ok=0; break; case dotdoc: for (int i=0;i<14;i++) if (DOC[i]!=(out+id*CHUNK)[i]) ok=0; break; case dottxt: for (int i=0;i<len;i++) if (((out+id*CHUNK)[i] < 0x20) || ((out+id*CHUNK)[i] > 0x7E)) ok=0; break; default: ok=0; break; } //lay ket qua dung if (ok) d_strcpy((char*)devPass+ ((unsigned int)id)*width,password); } } else { //giam qualities va goi de quy. } } } } /*Ket thuc phan them*/
0efb26a552df65731e0900c5a05ea26e6c10b964.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // You need to write a simple program to perform computation with 1D array in CPU and GPU, then compare the result. // includes, system #include <stdio.h> #include <assert.h> // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char *msg); // Part 3 of 5: implement the kernel __global__ void calculate1DKernel(int *d_a) { } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { // pointer for host memory int *h_a; // pointer for device memory int *d_a; // define grid and block size int numBlocks = 8; int numThreadsPerBlock = 8; // Part 1 of 5: allocate host and device memory // Part 2 of 5: launch kernel // block until the device has completed hipDeviceSynchronize(); // check if kernel execution generated an error checkCUDAError("kernel execution"); // Part 4 of 5: device to host copy // Check for any CUDA errors checkCUDAError("hipMemcpy"); // Part 5 of 5: verify the data returned to the host is correct // free device memory hipFree(d_a); // free host memory free(h_a); // If the program makes it this far, then the results are correct and // there are no run-time errors. Good work! printf("Correct!\n"); return 0; } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(-1); } }
0efb26a552df65731e0900c5a05ea26e6c10b964.cu
// You need to write a simple program to perform computation with 1D array in CPU and GPU, then compare the result. // includes, system #include <stdio.h> #include <assert.h> // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char *msg); // Part 3 of 5: implement the kernel __global__ void calculate1DKernel(int *d_a) { } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { // pointer for host memory int *h_a; // pointer for device memory int *d_a; // define grid and block size int numBlocks = 8; int numThreadsPerBlock = 8; // Part 1 of 5: allocate host and device memory // Part 2 of 5: launch kernel // block until the device has completed cudaThreadSynchronize(); // check if kernel execution generated an error checkCUDAError("kernel execution"); // Part 4 of 5: device to host copy // Check for any CUDA errors checkCUDAError("cudaMemcpy"); // Part 5 of 5: verify the data returned to the host is correct // free device memory cudaFree(d_a); // free host memory free(h_a); // If the program makes it this far, then the results are correct and // there are no run-time errors. Good work! printf("Correct!\n"); return 0; } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(-1); } }
42fc4026004dbc53fe6dbc79e6b3e6c0de66aa20.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "saber/funcs/impl/cuda/saber_argmax.h" #include "hip/hip_fp16.h" #include <cfloat> namespace anakin { namespace saber { template <typename Dtype, unsigned int blockSize> __global__ void top1(const Dtype* in_data, const int height, const int width, bool out_max_val, Dtype* out_data) { if (blockIdx.x > height) { return; } __shared__ Dtype share_data[CUDA_NUM_THREADS]; __shared__ Dtype share_index[CUDA_NUM_THREADS]; int offset = blockIdx.x * width; const Dtype* tmp_in_data = in_data + offset; Dtype minest = -1e32; int index = threadIdx.x; if (index < width) { Dtype result = tmp_in_data[index]; Dtype idx = index; for (int tid = index + blockDim.x; tid < width; tid += blockDim.x) { if (result < tmp_in_data[tid]) { result = tmp_in_data[tid]; idx = tid; } } share_data[index] = result; share_index[index] = idx; } else { share_data[index] = minest; share_index[index] = -1; } __syncthreads(); #if 0 for (int stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if (index < stride) { int index2 = index + stride; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } #else if (blockSize >= 512) { if (index < 256) { int index2 = index + 256; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } if (blockSize >= 256) { if (index < 128) { int index2 = index + 128; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } if (blockSize >= 128) { if (index < 64) { int index2 = index + 64; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } if (index < 32) { volatile Dtype *vmax = share_data; volatile Dtype *vindex = share_index; if (blockSize >= 64) { int index2 = index + 64; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 32) { int index2 = index + 16; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 16) { int index2 = index + 8; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 8) { int index2 = index + 4; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 4) { int index2 = index + 2; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 2) { int index2 = index + 1; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } } __syncthreads(); #endif if (index == 0) { if (!out_max_val) { out_data[blockIdx.x] = share_index[0]; } else { out_data[2 * blockIdx.x] = share_index[0]; out_data[2 * blockIdx.x + 1] = share_data[0]; } } } template <typename Dtype, unsigned int blockSize> __global__ void block_top1(const Dtype* in_data, const int height, const int width, Dtype* out_data, Dtype* out_index) { __shared__ Dtype share_data[CUDA_NUM_THREADS]; __shared__ Dtype share_index[CUDA_NUM_THREADS]; int offset = blockIdx.y * width + blockIdx.x * CUDA_NUM_THREADS; const Dtype* tmp_in_data = in_data + offset; Dtype minest = -1e32; int index = threadIdx.x; if (index + blockIdx.x * CUDA_NUM_THREADS < width) { share_data[index] = tmp_in_data[index]; share_index[index] = threadIdx.x; } else { share_data[index] = minest; share_index[index] = -1; } __syncthreads(); #if 0 for (int stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if (index < stride) { int index2 = index + stride; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } #else if (blockSize >= 512) { if (index < 256) { int index2 = index + 256; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } if (blockSize >= 256) { if (index < 128) { int index2 = index + 128; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } if (blockSize >= 128) { if (index < 64) { int index2 = index + 64; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } if (index < 32) { volatile Dtype *vmax = share_data; volatile Dtype *vindex = share_index; if (blockSize >= 64) { int index2 = index + 64; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 32) { int index2 = index + 16; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 16) { int index2 = index + 8; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 8) { int index2 = index + 4; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 4) { int index2 = index + 2; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 2) { int index2 = index + 1; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } } __syncthreads(); #endif if (index == 0) { int offset = blockIdx.y * gridDim.x + blockIdx.x; out_data[offset] = share_data[0]; out_index[offset] = share_index[0]; } } template <typename Dtype, unsigned int blockSize> __global__ void top1(const Dtype* in_data, const Dtype* in_index, const int height, const int width, bool out_max_val, Dtype* out_data) { __shared__ Dtype share_data[blockSize]; __shared__ Dtype share_index[blockSize]; int offset = blockIdx.x * width; const Dtype* tmp_in_data = in_data + offset; const Dtype* tmp_in_index = in_index + offset; Dtype minest = -1e10; int index = threadIdx.x; if (index < width) { Dtype result = tmp_in_data[index]; Dtype idx = index; for (int tid = index + blockDim.x; tid < width; tid += blockDim.x) { if (result < tmp_in_data[tid]) { result = tmp_in_data[tid]; idx = tid; } } share_data[index] = result; share_index[index] = idx; } else { share_data[index] = minest; share_index[index] = -1; } __syncthreads(); #if 0 for (int stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if (index < stride) { int index2 = index + stride; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } #else if (blockSize >= 512) { if (index < 256) { int index2 = index + 256; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } if (blockSize >= 256) { if (index < 128) { int index2 = index + 128; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } if (blockSize >= 128) { if (index < 64) { int index2 = index + 64; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } if (index < 32) { volatile Dtype *vmax = share_data; volatile Dtype *vindex = share_index; if (blockSize >= 64) { int index2 = index + 64; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 32) { int index2 = index + 16; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 16) { int index2 = index + 8; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 8) { int index2 = index + 4; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 4) { int index2 = index + 2; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 2) { int index2 = index + 1; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } } __syncthreads(); #endif if (index == 0) { int block_id = share_index[0]; if (!out_max_val) { out_data[blockIdx.x] = block_id * CUDA_NUM_THREADS + tmp_in_index[block_id]; } else { out_data[2 * blockIdx.x] = block_id * CUDA_NUM_THREADS + tmp_in_index[block_id]; out_data[2 * blockIdx.x + 1] = share_data[0]; } } } template <typename Dtype> __global__ void top1_channel(const Dtype* in_data, const int num, const int channel, const int inner_dim, bool out_max_val, Dtype* out_data) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; if (thread_id > num * inner_dim) { return; } int num_id = thread_id / inner_dim; int inner_id = thread_id % inner_dim; // const Dtype* tmp_in_data = in_data + num_id * channel * inner_dim + inner_id; Dtype max_data = tmp_in_data[0]; Dtype max_id = 0; for (int i = 1; i < channel; i++) { Dtype data = tmp_in_data[i*inner_dim]; if (max_data < data) { max_data = data; max_id = i; } } out_data[thread_id] = out_max_val ? max_data : max_id; } template <typename Dtype> __device__ void adjust_small_heap_with_index_device(Dtype* tree, Dtype *index_tree,int index,int length){ while (2 * index + 1 < length) { int child_index = 2 * index + 1; if (child_index + 1 < length && tree[child_index + 1] < tree[child_index]) { child_index++; } if (tree[index] > tree[child_index]) { Dtype t = tree[index]; tree[index] = tree[child_index]; tree[child_index] = t; int t_index = index_tree[index]; index_tree[index] = index_tree[child_index]; index_tree[child_index] = t_index; index = child_index; } else { break; } } } template <typename Dtype> __device__ void adjust_small_heap_with_index_device_stride(Dtype* tree, Dtype *index_tree,int index,int length, int stride){ while (2 * index + 1 < length) { int child_index = 2 * index + 1; int off_0 = child_index * stride; int off_1 = (child_index + 1) * stride; if (child_index + 1 < length && tree[off_1] < tree[off_0]) { child_index++; } int child_off = child_index * stride; int cur_off = index * stride; if (tree[cur_off] > tree[child_off]) { Dtype t = tree[cur_off]; tree[cur_off] = tree[child_off]; tree[child_off] = t; int t_index = index_tree[cur_off]; index_tree[cur_off] = index_tree[child_off]; index_tree[child_off] = t_index; index = child_index; } else { break; } } } template <typename Dtype> __global__ void topk_channel(const Dtype* in_data, const int num, const int channel, const int inner_dim, const int top_k, bool out_max_val, Dtype* out_data) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; if (thread_id > num * inner_dim) { return; } int num_id = thread_id / inner_dim; int inner_id = thread_id % inner_dim; // const Dtype* tmp_in_data = in_data + num_id * channel * inner_dim + inner_id; extern __shared__ Dtype trees[]; Dtype* small_heap_tree = trees + thread_id * top_k; Dtype* tree_index = trees + thread_id * top_k + blockDim.x * top_k; for (int i = 0; i < top_k; i++) { small_heap_tree[i] = -FLT_MAX; tree_index[i] = -1; } for (int i = 0; i < channel; i++) { Dtype data = tmp_in_data[i*inner_dim]; if (data > small_heap_tree[0]) { small_heap_tree[0] = data; tree_index[i] = i; adjust_small_heap_with_index_device(small_heap_tree, tree_index, 0, top_k); } } Dtype* out = out_data + num_id * top_k * inner_dim + inner_id; for (int i = top_k - 1; i >= 0; i--) { out[i * inner_dim] = out_max_val ? small_heap_tree[0] : tree_index[0]; small_heap_tree[0] = FLT_MAX; tree_index[0] = -1; adjust_small_heap_with_index_device(small_heap_tree, tree_index, 0, top_k); } } /*trees size is k * blockDim.x*/ template <typename Dtype, int blockSize> __global__ void topk_heap_shared(Dtype *out_data, int n, int inner_dim, const int top_k, const bool out_max_val, const Dtype *in_data){ extern __shared__ Dtype trees[]; const int block_id = blockIdx.x; const int tid = threadIdx.x; Dtype *cur_tree = trees + tid * top_k; Dtype *cur_tree_index = cur_tree + top_k * blockDim.x; for (int i = 0; i < top_k; i++){ cur_tree[i] = -FLT_MAX; cur_tree_index[i] = -1; } /*build small heap for every thread in one picture*/ const Dtype* in = in_data + block_id * inner_dim; for (int i = tid; i < inner_dim; i += blockDim.x){ if (in[i] > cur_tree[0]) { cur_tree[0] = in[i]; cur_tree_index[0] = i; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } __syncthreads(); if (blockSize >= 512) { if (tid < 256) { Dtype* next_tree = cur_tree + 256 * top_k; Dtype* next_tree_index = cur_tree_index + 256 * top_k; for (int i = 0; i < top_k; i++) { if (next_tree[i] > cur_tree[0]) { cur_tree[0] = next_tree[i]; cur_tree_index[0] = next_tree_index[i]; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { Dtype* next_tree = cur_tree + 128 * top_k; Dtype* next_tree_index = cur_tree_index + 128 * top_k; for (int i = 0; i < top_k; i++) { if (next_tree[i] > cur_tree[0]) { cur_tree[0] = next_tree[i]; cur_tree_index[0] = next_tree_index[i]; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { Dtype* next_tree = cur_tree + 64 * top_k; Dtype* next_tree_index = cur_tree_index + 64 * top_k; for (int i = 0; i < top_k; i++) { if (next_tree[i] > cur_tree[0]) { cur_tree[0] = next_tree[i]; cur_tree_index[0] = next_tree_index[i]; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } } __syncthreads(); } if (blockSize >= 64) { if (tid < 32) { Dtype* next_tree = cur_tree + 32 * top_k; Dtype* next_tree_index = cur_tree_index + 32 * top_k; for (int i = 0; i < top_k; i++) { if (next_tree[i] > cur_tree[0]) { cur_tree[0] = next_tree[i]; cur_tree_index[0] = next_tree_index[i]; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } } __syncthreads(); } if (blockSize >= 32) { if (tid < 16) { Dtype* next_tree = cur_tree + 16 * top_k; Dtype* next_tree_index = cur_tree_index + 16 * top_k; for (int i = 0; i < top_k; i++) { if (next_tree[i] > cur_tree[0]) { cur_tree[0] = next_tree[i]; cur_tree_index[0] = next_tree_index[i]; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } } __syncthreads(); } if (blockSize >= 16) { if (tid < 8) { Dtype* next_tree = cur_tree + 8 * top_k; Dtype* next_tree_index = cur_tree_index + 8 * top_k; for (int i = 0; i < top_k; i++) { if (next_tree[i] > cur_tree[0]) { cur_tree[0] = next_tree[i]; cur_tree_index[0] = next_tree_index[i]; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } } __syncthreads(); } if (blockSize >= 8) { if (tid < 4) { Dtype* next_tree = cur_tree + 4 * top_k; Dtype* next_tree_index = cur_tree_index + 4 * top_k; for (int i = 0; i < top_k; i++) { if (next_tree[i] > cur_tree[0]) { cur_tree[0] = next_tree[i]; cur_tree_index[0] = next_tree_index[i]; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } } __syncthreads(); } if (blockSize >= 4) { if (tid < 2) { Dtype* next_tree = cur_tree + 2 * top_k; Dtype* next_tree_index = cur_tree_index + 2 * top_k; for (int i = 0; i < top_k; i++) { if (next_tree[i] > cur_tree[0]) { cur_tree[0] = next_tree[i]; cur_tree_index[0] = next_tree_index[i]; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } } __syncthreads(); } if (blockSize >= 2) { if (tid < 1) { Dtype* next_tree = cur_tree + 1 * top_k; Dtype* next_tree_index = cur_tree_index + 1 * top_k; for (int i = 0; i < top_k; i++) { if (next_tree[i] > cur_tree[0]) { cur_tree[0] = next_tree[i]; cur_tree_index[0] = next_tree_index[i]; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } } __syncthreads(); } // if (tid < 32) { // if (blockSize >= 64) { // volatile Dtype* cur = cur_tree; // volatile Dtype* cur_index = cur_tree_index; // volatile Dtype* next = cur + 32 * top_k; // volatile Dtype* next_index = cur_index + 32 * top_k; // for (int i = 0; i < top_k; i++) { // if (next[i] > cur[0]) { // cur[0] = next[i]; // cur_index[0] = next_index[i]; // adjust_small_heap_with_index_device(cur, cur_index, 0, top_k); // } // } // } // if (blockSize >= 32) { // volatile Dtype* cur = cur_tree; // volatile Dtype* cur_index = cur_tree_index; // volatile Dtype* next = cur + 16 * top_k; // volatile Dtype* next_index = cur_index + 16 * top_k; // for (int i = 0; i < top_k; i++) { // if (next[i] > cur[0]) { // cur[0] = next[i]; // cur_index[0] = next_index[i]; // adjust_small_heap_with_index_device(cur, cur_index, 0, top_k); // } // } // } // if (blockSize >= 16) { // volatile Dtype* cur = cur_tree; // volatile Dtype* cur_index = cur_tree_index; // volatile Dtype* next = cur + 8 * top_k; // volatile Dtype* next_index = cur_index + 8 * top_k; // for (int i = 0; i < top_k; i++) { // if (next[i] > cur[0]) { // cur[0] = next[i]; // cur_index[0] = next_index[i]; // adjust_small_heap_with_index_device(cur, cur_index, 0, top_k); // } // } // if (tid < 8) { // for(int i = 0; i < top_k; i++) { // printf("block_id:%d, tid:%d, i:%d, cur_tree:%f, \n", block_id, tid, i, cur[i]); // } // } // } // if (blockSize >= 8) { // volatile Dtype* cur = cur_tree; // volatile Dtype* cur_index = cur_tree_index; // volatile Dtype* next = cur + 4 * top_k; // volatile Dtype* next_index = cur_index + 4 * top_k; // for (int i = 0; i < top_k; i++) { // if (next[i] > cur[0]) { // cur[0] = next[i]; // cur_index[0] = next_index[i]; // adjust_small_heap_with_index_device(cur, cur_index, 0, top_k); // if (block_id == 0 && tid < 1) { // for(int m = 0; m < top_k; m++) { // printf("block_id:%d, tid:%d, i:%d, m:%d, cur_tree:%f, next:%f\n", block_id, tid, i, m, cur[m], next[m]); // } // } // } // } // } // if (blockSize >= 4) { // volatile Dtype* cur = cur_tree; // volatile Dtype* cur_index = cur_tree_index; // volatile Dtype* next = cur + 2 * top_k; // volatile Dtype* next_index = cur_index + 2 * top_k; // for (int i = 0; i < top_k; i++) { // if (next[i] > cur[0]) { // cur[0] = next[i]; // cur_index[0] = next_index[i]; // adjust_small_heap_with_index_device(cur, cur_index, 0, top_k); // } // } // } // if (blockSize >= 2) { // volatile Dtype* cur = cur_tree; // volatile Dtype* cur_index = cur_tree_index; // volatile Dtype* next = cur + 1 * top_k; // volatile Dtype* next_index = cur_index + 1 * top_k; // if (tid == 0) { // for (int i = 0; i < top_k; i++) { // printf("block_id:%d, i:%d, cur_val:%f, cur_index:%f, next_val:%f, next_val:%f\n", block_id, i, cur[i], cur_index[i], next[i], next_index[i]); // //if (next[i] > cur[0]) { // // cur[0] = next[i]; // // cur_index[0] = next_index[i]; // // adjust_small_heap_with_index_device(cur, cur_index, 0, top_k); // //} // } // } // } // } if (tid == 0) { int stride = out_max_val ? block_id * top_k * 2 : block_id * top_k; Dtype* out = out_data + stride; for (int i = top_k - 1; i >= 0; i--) { if (!out_max_val) { out[i] = cur_tree_index[0]; } else { out[i] = cur_tree[0]; out[i + top_k] = cur_tree_index[0]; } cur_tree[0] = FLT_MAX; cur_tree_index[0] = -1; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } } template <typename Dtype, int blockSize> __global__ void topk_heap_shared_no_bank(Dtype *out_data, int n, int inner_dim, const int top_k, const bool out_max_val, const Dtype *in_data){ extern __shared__ Dtype trees[]; const int block_id = blockIdx.x; const int tid = threadIdx.x; Dtype *cur_tree = trees + tid ; Dtype *cur_tree_index = cur_tree + top_k * blockDim.x; for (int i = 0; i < top_k; i++){ cur_tree[i*blockDim.x] = -FLT_MAX; cur_tree_index[i * blockDim.x] = -1; } int stride = blockDim.x; /*build small heap for every thread in one picture*/ const Dtype* in = in_data + block_id * inner_dim; for (int i = tid; i < inner_dim; i += blockDim.x){ if (in[i] > cur_tree[0]) { cur_tree[0] = in[i]; cur_tree_index[0] = i; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } __syncthreads(); if (blockSize >= 512) { if (tid < 256) { Dtype* next_tree = cur_tree + 256; Dtype* next_tree_index = cur_tree_index + 256; for (int i = 0; i < top_k; i++) { int off = i*stride; if (next_tree[off] > cur_tree[0]) { cur_tree[0] = next_tree[off]; cur_tree_index[0] = next_tree_index[off]; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { Dtype* next_tree = cur_tree + 128; Dtype* next_tree_index = cur_tree_index + 128; for (int i = 0; i < top_k; i++) { int off = i*stride; if (next_tree[off] > cur_tree[0]) { cur_tree[0] = next_tree[off]; cur_tree_index[0] = next_tree_index[off]; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { Dtype* next_tree = cur_tree + 64; Dtype* next_tree_index = cur_tree_index + 64; for (int i = 0; i < top_k; i++) { int off = i*stride; if (next_tree[off] > cur_tree[0]) { cur_tree[0] = next_tree[off]; cur_tree_index[0] = next_tree_index[off]; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } } __syncthreads(); } if (blockSize >= 64) { if (tid < 32) { Dtype* next_tree = cur_tree + 32; Dtype* next_tree_index = cur_tree_index + 32; for (int i = 0; i < top_k; i++) { int off = i*stride; if (next_tree[off] > cur_tree[0]) { cur_tree[0] = next_tree[off]; cur_tree_index[0] = next_tree_index[off]; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } } __syncthreads(); } if (blockSize >= 32) { if (tid < 16) { Dtype* next_tree = cur_tree + 16; Dtype* next_tree_index = cur_tree_index + 16; for (int i = 0; i < top_k; i++) { int off = i*stride; if (next_tree[off] > cur_tree[0]) { cur_tree[0] = next_tree[off]; cur_tree_index[0] = next_tree_index[off]; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } } __syncthreads(); } if (blockSize >= 16) { if (tid < 8) { Dtype* next_tree = cur_tree + 8; Dtype* next_tree_index = cur_tree_index + 8; for (int i = 0; i < top_k; i++) { int off = i*stride; if (next_tree[off] > cur_tree[0]) { cur_tree[0] = next_tree[off]; cur_tree_index[0] = next_tree_index[off]; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } } __syncthreads(); } if (blockSize >= 8) { if (tid < 4) { Dtype* next_tree = cur_tree + 4; Dtype* next_tree_index = cur_tree_index + 4; for (int i = 0; i < top_k; i++) { int off = i*stride; if (next_tree[off] > cur_tree[0]) { cur_tree[0] = next_tree[off]; cur_tree_index[0] = next_tree_index[off]; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } } __syncthreads(); } if (blockSize >= 4) { if (tid < 2) { Dtype* next_tree = cur_tree + 2; Dtype* next_tree_index = cur_tree_index + 2; for (int i = 0; i < top_k; i++) { int off = i*stride; if (next_tree[off] > cur_tree[0]) { cur_tree[0] = next_tree[off]; cur_tree_index[0] = next_tree_index[off]; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } } __syncthreads(); } if (blockSize >= 2) { if (tid < 1) { Dtype* next_tree = cur_tree + 1; Dtype* next_tree_index = cur_tree_index + 1; for (int i = 0; i < top_k; i++) { int off = i*stride; if (next_tree[off] > cur_tree[0]) { cur_tree[0] = next_tree[off]; cur_tree_index[0] = next_tree_index[off]; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } } __syncthreads(); } if (tid == 0) { int stride = out_max_val ? block_id * top_k * 2 : block_id * top_k; Dtype* out = out_data + stride; for (int i = top_k - 1; i >= 0; i--) { if (!out_max_val) { out[i] = cur_tree_index[0]; } else { out[i] = cur_tree[0]; out[i + top_k] = cur_tree_index[0]; } cur_tree[0] = FLT_MAX; cur_tree_index[0] = -1; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } } /* template<DataType dataType, typename LayOutType> SaberStatus SaberArgmax<dataType, LayOutType>::dispatch( const std::vector<DataTensor *> inputs, std::vector<DataTensor *> outputs, ArgmaxParam<void> &param) { */ template <DataType OpDtype, DataType inDtype, DataType outDtype, typename LayOutType_op, typename LayOutType_in, typename LayOutType_out> SaberStatus SaberArgmax<NV, OpDtype, inDtype, outDtype,\ LayOutType_op, LayOutType_in, LayOutType_out>::dispatch(const std::vector<DataTensor_in *>& inputs, std::vector<DataTensor_out *>& outputs, ArgmaxParam<OpTensor>& param) { hipStream_t cuda_stream = this->_ctx.get_compute_stream(); outputs[0]->set_seq_offset(inputs[0]->get_seq_offset()); const InDataType * in_data = inputs[0]->data(); OutDataType * out_data = outputs[0]->mutable_data(); int outer_dim = inputs[0]->count(0, param.axis); if (param.has_axis) { int count = inputs[0]->count(0, inputs[0]->dims()); int dim = inputs[0]->shape()[param.axis]; int inner_dim = inputs[0]->count(param.axis + 1, inputs[0]->dims()); int total_threads = count / dim; if (param.top_k == 1) { hipLaunchKernelGGL(( top1_channel<InDataType>), dim3(CUDA_GET_BLOCKS(total_threads)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, in_data, outer_dim, dim, inner_dim, param.out_max_val, out_data); } else { hipLaunchKernelGGL(( topk_channel<InDataType>), dim3(CUDA_GET_BLOCKS(total_threads)), dim3(CUDA_NUM_THREADS), sizeof(InDataType) * CUDA_NUM_THREADS * param.top_k, cuda_stream, in_data, outer_dim, dim, inner_dim, param.top_k, param.out_max_val, out_data); } } else { int inner_dim = inputs[0]->count(1, inputs[0]->dims()); int outer_dim = inputs[0]->num(); if (param.top_k == 1) { if (inner_dim / CUDA_NUM_THREADS < 10) { int block_size = pow(2, ceil(log(inner_dim) / log(2))); block_size = block_size > CUDA_NUM_THREADS ? CUDA_NUM_THREADS : block_size; hipLaunchKernelGGL(( top1<InDataType, CUDA_NUM_THREADS>), dim3(outer_dim), dim3(CUDA_NUM_THREADS), 0, cuda_stream, in_data, outer_dim, inner_dim, param.out_max_val, out_data); } else { int block_num = CUDA_GET_BLOCKS(inner_dim); dim3 grid(block_num, outer_dim); hipLaunchKernelGGL(( block_top1<InDataType, CUDA_NUM_THREADS>), dim3(grid), dim3(CUDA_NUM_THREADS), 0, cuda_stream, in_data, outer_dim, inner_dim, _block_max_value.mutable_data(), _block_max_index.mutable_data()); hipLaunchKernelGGL(( top1<InDataType, CUDA_NUM_THREADS>), dim3(outer_dim), dim3(CUDA_NUM_THREADS), 0, cuda_stream, _block_max_value.data(), _block_max_index.data(), outer_dim, block_num, param.out_max_val, out_data); } } else { //topk_heap_shared<InDataType, CUDA_NUM_THREADS><<<outer_dim, CUDA_NUM_THREADS, sizeof(InDataType) * CUDA_NUM_THREADS * param.top_k * 2, cuda_stream>>>(out_data, outer_dim, inner_dim, param.top_k, param.out_max_val, in_data); hipLaunchKernelGGL(( topk_heap_shared_no_bank<InDataType, CUDA_NUM_THREADS>), dim3(outer_dim), dim3(CUDA_NUM_THREADS), sizeof(InDataType) * CUDA_NUM_THREADS * param.top_k * 2, cuda_stream, out_data, outer_dim, inner_dim, param.top_k, param.out_max_val, in_data); } } return SaberSuccess; } } }
42fc4026004dbc53fe6dbc79e6b3e6c0de66aa20.cu
#include "saber/funcs/impl/cuda/saber_argmax.h" #include "cuda_fp16.h" #include <cfloat> namespace anakin { namespace saber { template <typename Dtype, unsigned int blockSize> __global__ void top1(const Dtype* in_data, const int height, const int width, bool out_max_val, Dtype* out_data) { if (blockIdx.x > height) { return; } __shared__ Dtype share_data[CUDA_NUM_THREADS]; __shared__ Dtype share_index[CUDA_NUM_THREADS]; int offset = blockIdx.x * width; const Dtype* tmp_in_data = in_data + offset; Dtype minest = -1e32; int index = threadIdx.x; if (index < width) { Dtype result = tmp_in_data[index]; Dtype idx = index; for (int tid = index + blockDim.x; tid < width; tid += blockDim.x) { if (result < tmp_in_data[tid]) { result = tmp_in_data[tid]; idx = tid; } } share_data[index] = result; share_index[index] = idx; } else { share_data[index] = minest; share_index[index] = -1; } __syncthreads(); #if 0 for (int stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if (index < stride) { int index2 = index + stride; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } #else if (blockSize >= 512) { if (index < 256) { int index2 = index + 256; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } if (blockSize >= 256) { if (index < 128) { int index2 = index + 128; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } if (blockSize >= 128) { if (index < 64) { int index2 = index + 64; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } if (index < 32) { volatile Dtype *vmax = share_data; volatile Dtype *vindex = share_index; if (blockSize >= 64) { int index2 = index + 64; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 32) { int index2 = index + 16; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 16) { int index2 = index + 8; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 8) { int index2 = index + 4; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 4) { int index2 = index + 2; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 2) { int index2 = index + 1; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } } __syncthreads(); #endif if (index == 0) { if (!out_max_val) { out_data[blockIdx.x] = share_index[0]; } else { out_data[2 * blockIdx.x] = share_index[0]; out_data[2 * blockIdx.x + 1] = share_data[0]; } } } template <typename Dtype, unsigned int blockSize> __global__ void block_top1(const Dtype* in_data, const int height, const int width, Dtype* out_data, Dtype* out_index) { __shared__ Dtype share_data[CUDA_NUM_THREADS]; __shared__ Dtype share_index[CUDA_NUM_THREADS]; int offset = blockIdx.y * width + blockIdx.x * CUDA_NUM_THREADS; const Dtype* tmp_in_data = in_data + offset; Dtype minest = -1e32; int index = threadIdx.x; if (index + blockIdx.x * CUDA_NUM_THREADS < width) { share_data[index] = tmp_in_data[index]; share_index[index] = threadIdx.x; } else { share_data[index] = minest; share_index[index] = -1; } __syncthreads(); #if 0 for (int stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if (index < stride) { int index2 = index + stride; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } #else if (blockSize >= 512) { if (index < 256) { int index2 = index + 256; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } if (blockSize >= 256) { if (index < 128) { int index2 = index + 128; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } if (blockSize >= 128) { if (index < 64) { int index2 = index + 64; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } if (index < 32) { volatile Dtype *vmax = share_data; volatile Dtype *vindex = share_index; if (blockSize >= 64) { int index2 = index + 64; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 32) { int index2 = index + 16; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 16) { int index2 = index + 8; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 8) { int index2 = index + 4; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 4) { int index2 = index + 2; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 2) { int index2 = index + 1; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } } __syncthreads(); #endif if (index == 0) { int offset = blockIdx.y * gridDim.x + blockIdx.x; out_data[offset] = share_data[0]; out_index[offset] = share_index[0]; } } template <typename Dtype, unsigned int blockSize> __global__ void top1(const Dtype* in_data, const Dtype* in_index, const int height, const int width, bool out_max_val, Dtype* out_data) { __shared__ Dtype share_data[blockSize]; __shared__ Dtype share_index[blockSize]; int offset = blockIdx.x * width; const Dtype* tmp_in_data = in_data + offset; const Dtype* tmp_in_index = in_index + offset; Dtype minest = -1e10; int index = threadIdx.x; if (index < width) { Dtype result = tmp_in_data[index]; Dtype idx = index; for (int tid = index + blockDim.x; tid < width; tid += blockDim.x) { if (result < tmp_in_data[tid]) { result = tmp_in_data[tid]; idx = tid; } } share_data[index] = result; share_index[index] = idx; } else { share_data[index] = minest; share_index[index] = -1; } __syncthreads(); #if 0 for (int stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if (index < stride) { int index2 = index + stride; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } #else if (blockSize >= 512) { if (index < 256) { int index2 = index + 256; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } if (blockSize >= 256) { if (index < 128) { int index2 = index + 128; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } if (blockSize >= 128) { if (index < 64) { int index2 = index + 64; if (share_data[index2] > share_data[index]) { share_data[index] = share_data[index2]; share_index[index] = share_index[index2]; } } __syncthreads(); } if (index < 32) { volatile Dtype *vmax = share_data; volatile Dtype *vindex = share_index; if (blockSize >= 64) { int index2 = index + 64; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 32) { int index2 = index + 16; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 16) { int index2 = index + 8; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 8) { int index2 = index + 4; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 4) { int index2 = index + 2; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } if (blockSize >= 2) { int index2 = index + 1; if (vmax[index2] > vmax[index]) { vmax[index] = vmax[index2]; vindex[index] = vindex[index2]; } } } __syncthreads(); #endif if (index == 0) { int block_id = share_index[0]; if (!out_max_val) { out_data[blockIdx.x] = block_id * CUDA_NUM_THREADS + tmp_in_index[block_id]; } else { out_data[2 * blockIdx.x] = block_id * CUDA_NUM_THREADS + tmp_in_index[block_id]; out_data[2 * blockIdx.x + 1] = share_data[0]; } } } template <typename Dtype> __global__ void top1_channel(const Dtype* in_data, const int num, const int channel, const int inner_dim, bool out_max_val, Dtype* out_data) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; if (thread_id > num * inner_dim) { return; } int num_id = thread_id / inner_dim; int inner_id = thread_id % inner_dim; // const Dtype* tmp_in_data = in_data + num_id * channel * inner_dim + inner_id; Dtype max_data = tmp_in_data[0]; Dtype max_id = 0; for (int i = 1; i < channel; i++) { Dtype data = tmp_in_data[i*inner_dim]; if (max_data < data) { max_data = data; max_id = i; } } out_data[thread_id] = out_max_val ? max_data : max_id; } template <typename Dtype> __device__ void adjust_small_heap_with_index_device(Dtype* tree, Dtype *index_tree,int index,int length){ while (2 * index + 1 < length) { int child_index = 2 * index + 1; if (child_index + 1 < length && tree[child_index + 1] < tree[child_index]) { child_index++; } if (tree[index] > tree[child_index]) { Dtype t = tree[index]; tree[index] = tree[child_index]; tree[child_index] = t; int t_index = index_tree[index]; index_tree[index] = index_tree[child_index]; index_tree[child_index] = t_index; index = child_index; } else { break; } } } template <typename Dtype> __device__ void adjust_small_heap_with_index_device_stride(Dtype* tree, Dtype *index_tree,int index,int length, int stride){ while (2 * index + 1 < length) { int child_index = 2 * index + 1; int off_0 = child_index * stride; int off_1 = (child_index + 1) * stride; if (child_index + 1 < length && tree[off_1] < tree[off_0]) { child_index++; } int child_off = child_index * stride; int cur_off = index * stride; if (tree[cur_off] > tree[child_off]) { Dtype t = tree[cur_off]; tree[cur_off] = tree[child_off]; tree[child_off] = t; int t_index = index_tree[cur_off]; index_tree[cur_off] = index_tree[child_off]; index_tree[child_off] = t_index; index = child_index; } else { break; } } } template <typename Dtype> __global__ void topk_channel(const Dtype* in_data, const int num, const int channel, const int inner_dim, const int top_k, bool out_max_val, Dtype* out_data) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; if (thread_id > num * inner_dim) { return; } int num_id = thread_id / inner_dim; int inner_id = thread_id % inner_dim; // const Dtype* tmp_in_data = in_data + num_id * channel * inner_dim + inner_id; extern __shared__ Dtype trees[]; Dtype* small_heap_tree = trees + thread_id * top_k; Dtype* tree_index = trees + thread_id * top_k + blockDim.x * top_k; for (int i = 0; i < top_k; i++) { small_heap_tree[i] = -FLT_MAX; tree_index[i] = -1; } for (int i = 0; i < channel; i++) { Dtype data = tmp_in_data[i*inner_dim]; if (data > small_heap_tree[0]) { small_heap_tree[0] = data; tree_index[i] = i; adjust_small_heap_with_index_device(small_heap_tree, tree_index, 0, top_k); } } Dtype* out = out_data + num_id * top_k * inner_dim + inner_id; for (int i = top_k - 1; i >= 0; i--) { out[i * inner_dim] = out_max_val ? small_heap_tree[0] : tree_index[0]; small_heap_tree[0] = FLT_MAX; tree_index[0] = -1; adjust_small_heap_with_index_device(small_heap_tree, tree_index, 0, top_k); } } /*trees size is k * blockDim.x*/ template <typename Dtype, int blockSize> __global__ void topk_heap_shared(Dtype *out_data, int n, int inner_dim, const int top_k, const bool out_max_val, const Dtype *in_data){ extern __shared__ Dtype trees[]; const int block_id = blockIdx.x; const int tid = threadIdx.x; Dtype *cur_tree = trees + tid * top_k; Dtype *cur_tree_index = cur_tree + top_k * blockDim.x; for (int i = 0; i < top_k; i++){ cur_tree[i] = -FLT_MAX; cur_tree_index[i] = -1; } /*build small heap for every thread in one picture*/ const Dtype* in = in_data + block_id * inner_dim; for (int i = tid; i < inner_dim; i += blockDim.x){ if (in[i] > cur_tree[0]) { cur_tree[0] = in[i]; cur_tree_index[0] = i; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } __syncthreads(); if (blockSize >= 512) { if (tid < 256) { Dtype* next_tree = cur_tree + 256 * top_k; Dtype* next_tree_index = cur_tree_index + 256 * top_k; for (int i = 0; i < top_k; i++) { if (next_tree[i] > cur_tree[0]) { cur_tree[0] = next_tree[i]; cur_tree_index[0] = next_tree_index[i]; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { Dtype* next_tree = cur_tree + 128 * top_k; Dtype* next_tree_index = cur_tree_index + 128 * top_k; for (int i = 0; i < top_k; i++) { if (next_tree[i] > cur_tree[0]) { cur_tree[0] = next_tree[i]; cur_tree_index[0] = next_tree_index[i]; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { Dtype* next_tree = cur_tree + 64 * top_k; Dtype* next_tree_index = cur_tree_index + 64 * top_k; for (int i = 0; i < top_k; i++) { if (next_tree[i] > cur_tree[0]) { cur_tree[0] = next_tree[i]; cur_tree_index[0] = next_tree_index[i]; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } } __syncthreads(); } if (blockSize >= 64) { if (tid < 32) { Dtype* next_tree = cur_tree + 32 * top_k; Dtype* next_tree_index = cur_tree_index + 32 * top_k; for (int i = 0; i < top_k; i++) { if (next_tree[i] > cur_tree[0]) { cur_tree[0] = next_tree[i]; cur_tree_index[0] = next_tree_index[i]; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } } __syncthreads(); } if (blockSize >= 32) { if (tid < 16) { Dtype* next_tree = cur_tree + 16 * top_k; Dtype* next_tree_index = cur_tree_index + 16 * top_k; for (int i = 0; i < top_k; i++) { if (next_tree[i] > cur_tree[0]) { cur_tree[0] = next_tree[i]; cur_tree_index[0] = next_tree_index[i]; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } } __syncthreads(); } if (blockSize >= 16) { if (tid < 8) { Dtype* next_tree = cur_tree + 8 * top_k; Dtype* next_tree_index = cur_tree_index + 8 * top_k; for (int i = 0; i < top_k; i++) { if (next_tree[i] > cur_tree[0]) { cur_tree[0] = next_tree[i]; cur_tree_index[0] = next_tree_index[i]; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } } __syncthreads(); } if (blockSize >= 8) { if (tid < 4) { Dtype* next_tree = cur_tree + 4 * top_k; Dtype* next_tree_index = cur_tree_index + 4 * top_k; for (int i = 0; i < top_k; i++) { if (next_tree[i] > cur_tree[0]) { cur_tree[0] = next_tree[i]; cur_tree_index[0] = next_tree_index[i]; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } } __syncthreads(); } if (blockSize >= 4) { if (tid < 2) { Dtype* next_tree = cur_tree + 2 * top_k; Dtype* next_tree_index = cur_tree_index + 2 * top_k; for (int i = 0; i < top_k; i++) { if (next_tree[i] > cur_tree[0]) { cur_tree[0] = next_tree[i]; cur_tree_index[0] = next_tree_index[i]; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } } __syncthreads(); } if (blockSize >= 2) { if (tid < 1) { Dtype* next_tree = cur_tree + 1 * top_k; Dtype* next_tree_index = cur_tree_index + 1 * top_k; for (int i = 0; i < top_k; i++) { if (next_tree[i] > cur_tree[0]) { cur_tree[0] = next_tree[i]; cur_tree_index[0] = next_tree_index[i]; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } } __syncthreads(); } // if (tid < 32) { // if (blockSize >= 64) { // volatile Dtype* cur = cur_tree; // volatile Dtype* cur_index = cur_tree_index; // volatile Dtype* next = cur + 32 * top_k; // volatile Dtype* next_index = cur_index + 32 * top_k; // for (int i = 0; i < top_k; i++) { // if (next[i] > cur[0]) { // cur[0] = next[i]; // cur_index[0] = next_index[i]; // adjust_small_heap_with_index_device(cur, cur_index, 0, top_k); // } // } // } // if (blockSize >= 32) { // volatile Dtype* cur = cur_tree; // volatile Dtype* cur_index = cur_tree_index; // volatile Dtype* next = cur + 16 * top_k; // volatile Dtype* next_index = cur_index + 16 * top_k; // for (int i = 0; i < top_k; i++) { // if (next[i] > cur[0]) { // cur[0] = next[i]; // cur_index[0] = next_index[i]; // adjust_small_heap_with_index_device(cur, cur_index, 0, top_k); // } // } // } // if (blockSize >= 16) { // volatile Dtype* cur = cur_tree; // volatile Dtype* cur_index = cur_tree_index; // volatile Dtype* next = cur + 8 * top_k; // volatile Dtype* next_index = cur_index + 8 * top_k; // for (int i = 0; i < top_k; i++) { // if (next[i] > cur[0]) { // cur[0] = next[i]; // cur_index[0] = next_index[i]; // adjust_small_heap_with_index_device(cur, cur_index, 0, top_k); // } // } // if (tid < 8) { // for(int i = 0; i < top_k; i++) { // printf("block_id:%d, tid:%d, i:%d, cur_tree:%f, \n", block_id, tid, i, cur[i]); // } // } // } // if (blockSize >= 8) { // volatile Dtype* cur = cur_tree; // volatile Dtype* cur_index = cur_tree_index; // volatile Dtype* next = cur + 4 * top_k; // volatile Dtype* next_index = cur_index + 4 * top_k; // for (int i = 0; i < top_k; i++) { // if (next[i] > cur[0]) { // cur[0] = next[i]; // cur_index[0] = next_index[i]; // adjust_small_heap_with_index_device(cur, cur_index, 0, top_k); // if (block_id == 0 && tid < 1) { // for(int m = 0; m < top_k; m++) { // printf("block_id:%d, tid:%d, i:%d, m:%d, cur_tree:%f, next:%f\n", block_id, tid, i, m, cur[m], next[m]); // } // } // } // } // } // if (blockSize >= 4) { // volatile Dtype* cur = cur_tree; // volatile Dtype* cur_index = cur_tree_index; // volatile Dtype* next = cur + 2 * top_k; // volatile Dtype* next_index = cur_index + 2 * top_k; // for (int i = 0; i < top_k; i++) { // if (next[i] > cur[0]) { // cur[0] = next[i]; // cur_index[0] = next_index[i]; // adjust_small_heap_with_index_device(cur, cur_index, 0, top_k); // } // } // } // if (blockSize >= 2) { // volatile Dtype* cur = cur_tree; // volatile Dtype* cur_index = cur_tree_index; // volatile Dtype* next = cur + 1 * top_k; // volatile Dtype* next_index = cur_index + 1 * top_k; // if (tid == 0) { // for (int i = 0; i < top_k; i++) { // printf("block_id:%d, i:%d, cur_val:%f, cur_index:%f, next_val:%f, next_val:%f\n", block_id, i, cur[i], cur_index[i], next[i], next_index[i]); // //if (next[i] > cur[0]) { // // cur[0] = next[i]; // // cur_index[0] = next_index[i]; // // adjust_small_heap_with_index_device(cur, cur_index, 0, top_k); // //} // } // } // } // } if (tid == 0) { int stride = out_max_val ? block_id * top_k * 2 : block_id * top_k; Dtype* out = out_data + stride; for (int i = top_k - 1; i >= 0; i--) { if (!out_max_val) { out[i] = cur_tree_index[0]; } else { out[i] = cur_tree[0]; out[i + top_k] = cur_tree_index[0]; } cur_tree[0] = FLT_MAX; cur_tree_index[0] = -1; adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k); } } } template <typename Dtype, int blockSize> __global__ void topk_heap_shared_no_bank(Dtype *out_data, int n, int inner_dim, const int top_k, const bool out_max_val, const Dtype *in_data){ extern __shared__ Dtype trees[]; const int block_id = blockIdx.x; const int tid = threadIdx.x; Dtype *cur_tree = trees + tid ; Dtype *cur_tree_index = cur_tree + top_k * blockDim.x; for (int i = 0; i < top_k; i++){ cur_tree[i*blockDim.x] = -FLT_MAX; cur_tree_index[i * blockDim.x] = -1; } int stride = blockDim.x; /*build small heap for every thread in one picture*/ const Dtype* in = in_data + block_id * inner_dim; for (int i = tid; i < inner_dim; i += blockDim.x){ if (in[i] > cur_tree[0]) { cur_tree[0] = in[i]; cur_tree_index[0] = i; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } __syncthreads(); if (blockSize >= 512) { if (tid < 256) { Dtype* next_tree = cur_tree + 256; Dtype* next_tree_index = cur_tree_index + 256; for (int i = 0; i < top_k; i++) { int off = i*stride; if (next_tree[off] > cur_tree[0]) { cur_tree[0] = next_tree[off]; cur_tree_index[0] = next_tree_index[off]; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { Dtype* next_tree = cur_tree + 128; Dtype* next_tree_index = cur_tree_index + 128; for (int i = 0; i < top_k; i++) { int off = i*stride; if (next_tree[off] > cur_tree[0]) { cur_tree[0] = next_tree[off]; cur_tree_index[0] = next_tree_index[off]; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { Dtype* next_tree = cur_tree + 64; Dtype* next_tree_index = cur_tree_index + 64; for (int i = 0; i < top_k; i++) { int off = i*stride; if (next_tree[off] > cur_tree[0]) { cur_tree[0] = next_tree[off]; cur_tree_index[0] = next_tree_index[off]; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } } __syncthreads(); } if (blockSize >= 64) { if (tid < 32) { Dtype* next_tree = cur_tree + 32; Dtype* next_tree_index = cur_tree_index + 32; for (int i = 0; i < top_k; i++) { int off = i*stride; if (next_tree[off] > cur_tree[0]) { cur_tree[0] = next_tree[off]; cur_tree_index[0] = next_tree_index[off]; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } } __syncthreads(); } if (blockSize >= 32) { if (tid < 16) { Dtype* next_tree = cur_tree + 16; Dtype* next_tree_index = cur_tree_index + 16; for (int i = 0; i < top_k; i++) { int off = i*stride; if (next_tree[off] > cur_tree[0]) { cur_tree[0] = next_tree[off]; cur_tree_index[0] = next_tree_index[off]; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } } __syncthreads(); } if (blockSize >= 16) { if (tid < 8) { Dtype* next_tree = cur_tree + 8; Dtype* next_tree_index = cur_tree_index + 8; for (int i = 0; i < top_k; i++) { int off = i*stride; if (next_tree[off] > cur_tree[0]) { cur_tree[0] = next_tree[off]; cur_tree_index[0] = next_tree_index[off]; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } } __syncthreads(); } if (blockSize >= 8) { if (tid < 4) { Dtype* next_tree = cur_tree + 4; Dtype* next_tree_index = cur_tree_index + 4; for (int i = 0; i < top_k; i++) { int off = i*stride; if (next_tree[off] > cur_tree[0]) { cur_tree[0] = next_tree[off]; cur_tree_index[0] = next_tree_index[off]; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } } __syncthreads(); } if (blockSize >= 4) { if (tid < 2) { Dtype* next_tree = cur_tree + 2; Dtype* next_tree_index = cur_tree_index + 2; for (int i = 0; i < top_k; i++) { int off = i*stride; if (next_tree[off] > cur_tree[0]) { cur_tree[0] = next_tree[off]; cur_tree_index[0] = next_tree_index[off]; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } } __syncthreads(); } if (blockSize >= 2) { if (tid < 1) { Dtype* next_tree = cur_tree + 1; Dtype* next_tree_index = cur_tree_index + 1; for (int i = 0; i < top_k; i++) { int off = i*stride; if (next_tree[off] > cur_tree[0]) { cur_tree[0] = next_tree[off]; cur_tree_index[0] = next_tree_index[off]; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } } __syncthreads(); } if (tid == 0) { int stride = out_max_val ? block_id * top_k * 2 : block_id * top_k; Dtype* out = out_data + stride; for (int i = top_k - 1; i >= 0; i--) { if (!out_max_val) { out[i] = cur_tree_index[0]; } else { out[i] = cur_tree[0]; out[i + top_k] = cur_tree_index[0]; } cur_tree[0] = FLT_MAX; cur_tree_index[0] = -1; adjust_small_heap_with_index_device_stride(cur_tree, cur_tree_index, 0, top_k, stride); } } } /* template<DataType dataType, typename LayOutType> SaberStatus SaberArgmax<dataType, LayOutType>::dispatch( const std::vector<DataTensor *> inputs, std::vector<DataTensor *> outputs, ArgmaxParam<void> &param) { */ template <DataType OpDtype, DataType inDtype, DataType outDtype, typename LayOutType_op, typename LayOutType_in, typename LayOutType_out> SaberStatus SaberArgmax<NV, OpDtype, inDtype, outDtype,\ LayOutType_op, LayOutType_in, LayOutType_out>::dispatch(const std::vector<DataTensor_in *>& inputs, std::vector<DataTensor_out *>& outputs, ArgmaxParam<OpTensor>& param) { cudaStream_t cuda_stream = this->_ctx.get_compute_stream(); outputs[0]->set_seq_offset(inputs[0]->get_seq_offset()); const InDataType * in_data = inputs[0]->data(); OutDataType * out_data = outputs[0]->mutable_data(); int outer_dim = inputs[0]->count(0, param.axis); if (param.has_axis) { int count = inputs[0]->count(0, inputs[0]->dims()); int dim = inputs[0]->shape()[param.axis]; int inner_dim = inputs[0]->count(param.axis + 1, inputs[0]->dims()); int total_threads = count / dim; if (param.top_k == 1) { top1_channel<InDataType><<<CUDA_GET_BLOCKS(total_threads), CUDA_NUM_THREADS, 0, cuda_stream>>>(in_data, outer_dim, dim, inner_dim, param.out_max_val, out_data); } else { topk_channel<InDataType><<<CUDA_GET_BLOCKS(total_threads), CUDA_NUM_THREADS, sizeof(InDataType) * CUDA_NUM_THREADS * param.top_k, cuda_stream>>>(in_data, outer_dim, dim, inner_dim, param.top_k, param.out_max_val, out_data); } } else { int inner_dim = inputs[0]->count(1, inputs[0]->dims()); int outer_dim = inputs[0]->num(); if (param.top_k == 1) { if (inner_dim / CUDA_NUM_THREADS < 10) { int block_size = pow(2, ceil(log(inner_dim) / log(2))); block_size = block_size > CUDA_NUM_THREADS ? CUDA_NUM_THREADS : block_size; top1<InDataType, CUDA_NUM_THREADS><<<outer_dim, CUDA_NUM_THREADS, 0, cuda_stream>>>(in_data, outer_dim, inner_dim, param.out_max_val, out_data); } else { int block_num = CUDA_GET_BLOCKS(inner_dim); dim3 grid(block_num, outer_dim); block_top1<InDataType, CUDA_NUM_THREADS><<<grid, CUDA_NUM_THREADS, 0, cuda_stream>>>(in_data, outer_dim, inner_dim, _block_max_value.mutable_data(), _block_max_index.mutable_data()); top1<InDataType, CUDA_NUM_THREADS><<<outer_dim, CUDA_NUM_THREADS, 0, cuda_stream>>>(_block_max_value.data(), _block_max_index.data(), outer_dim, block_num, param.out_max_val, out_data); } } else { //topk_heap_shared<InDataType, CUDA_NUM_THREADS><<<outer_dim, CUDA_NUM_THREADS, sizeof(InDataType) * CUDA_NUM_THREADS * param.top_k * 2, cuda_stream>>>(out_data, outer_dim, inner_dim, param.top_k, param.out_max_val, in_data); topk_heap_shared_no_bank<InDataType, CUDA_NUM_THREADS><<<outer_dim, CUDA_NUM_THREADS, sizeof(InDataType) * CUDA_NUM_THREADS * param.top_k * 2, cuda_stream>>>(out_data, outer_dim, inner_dim, param.top_k, param.out_max_val, in_data); } } return SaberSuccess; } } }
9a6c266641e16c0dee76e9625e7cba378505a9db.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "backend/kernel_compiler/gpu/cuda_impl/uniform_candidate_sampler_impl.cuh" template <typename S> __global__ void AssignToOutput(const int64_t size, const S prob_val, S *output_array) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { output_array[pos] = prob_val; } } template <typename S> void CalUniformCandidateSampler(const int64_t true_size, const int64_t num_sampled, const S prob_val, S *true_expected_count, S *sampled_expected_count, hipStream_t cuda_stream) { hipLaunchKernelGGL(( AssignToOutput), dim3(GET_BLOCKS(true_size)), dim3(GET_THREADS), 0, cuda_stream, true_size, prob_val, true_expected_count); hipLaunchKernelGGL(( AssignToOutput), dim3(GET_BLOCKS(num_sampled)), dim3(GET_THREADS), 0, cuda_stream, num_sampled, prob_val, sampled_expected_count); } template void CalUniformCandidateSampler<float>(const int64_t true_size, const int64_t num_sampled, const float prob_val, float *true_expected_count, float *sampled_expected_count, hipStream_t cuda_stream);
9a6c266641e16c0dee76e9625e7cba378505a9db.cu
/** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "backend/kernel_compiler/gpu/cuda_impl/uniform_candidate_sampler_impl.cuh" template <typename S> __global__ void AssignToOutput(const int64_t size, const S prob_val, S *output_array) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { output_array[pos] = prob_val; } } template <typename S> void CalUniformCandidateSampler(const int64_t true_size, const int64_t num_sampled, const S prob_val, S *true_expected_count, S *sampled_expected_count, cudaStream_t cuda_stream) { AssignToOutput<<<GET_BLOCKS(true_size), GET_THREADS, 0, cuda_stream>>>(true_size, prob_val, true_expected_count); AssignToOutput<<<GET_BLOCKS(num_sampled), GET_THREADS, 0, cuda_stream>>>(num_sampled, prob_val, sampled_expected_count); } template void CalUniformCandidateSampler<float>(const int64_t true_size, const int64_t num_sampled, const float prob_val, float *true_expected_count, float *sampled_expected_count, cudaStream_t cuda_stream);
c64f03025b1ce1a74168e7fb591cb4999cbfea3e.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <eigensolvers/eigensolver.h> #include <eigensolvers/eigenvector_solver.h> #include <norm.h> #include <amgx_timer.h> #include <blas.h> #include <memory_info.h> #include "amgx_types/util.h" #include "amgx_types/rand.h" #include "amgx_types/io.h" #include <sstream> #include <iomanip> namespace amgx { template <class TConfig> EigenSolver<TConfig>::EigenSolver(AMG_Config &cfg, const std::string &cfg_scope) : m_A(0), m_converged(false), m_curr_iter(0), m_num_iters(0), m_max_iters(0), m_ref_count(1) { m_want_eigenvectors = cfg.getParameter<int>("eig_eigenvector", cfg_scope); m_tolerance = cfg.getParameter<double>("eig_tolerance", cfg_scope); m_shift = types::util<ValueTypeVec>::get_one() * cfg.getParameter<double>("eig_shift", cfg_scope); m_damping_factor = cfg.getParameter<double>("eig_damping_factor", cfg_scope); m_max_iters = cfg.getParameter<int>("eig_max_iters", cfg_scope); m_verbosity_level = cfg.getParameter<int>("verbosity_level", cfg_scope); m_eigenvector_solver_name = cfg.getParameter<std::string>("eig_eigenvector_solver", cfg_scope); m_norm_type = cfg.getParameter<NormType>("norm", cfg_scope); std::string which = cfg.getParameter<std::string>("eig_which", cfg_scope); if (which == "smallest") { m_which = EIG_SMALLEST; } else if (which == "largest") { m_which = EIG_LARGEST; } else if (which == "pagerank") { m_which = EIG_PAGERANK; } else if (which == "shift") { m_which = EIG_SHIFT; } else { FatalError("EigenSolver: invalid target spectrum.", AMGX_ERR_CONFIGURATION); } // Allocate events. hipEventCreate(&m_setup_start); hipEventCreate(&m_setup_stop); hipEventCreate(&m_solve_start); hipEventCreate(&m_solve_stop); hipEventCreate(&m_iter_start); hipEventCreate(&m_iter_stop); m_setup_time = 0.0f; m_solve_time = 0.0f; } template <class TConfig> EigenSolver<TConfig>::~EigenSolver() { m_eigenvalues.clear(); m_eigenvectors.clear(); hipEventDestroy(m_setup_start); hipEventDestroy(m_setup_stop); hipEventDestroy(m_solve_start); hipEventDestroy(m_solve_stop); hipEventDestroy(m_iter_start); hipEventDestroy(m_iter_stop); } template <class TConfig> int EigenSolver<TConfig>::get_num_iters() const { return m_num_iters; } template <class TConfig> void EigenSolver<TConfig>::set_max_iters(int max_iters) { m_max_iters = max_iters; } template <class TConfig> void EigenSolver<TConfig>::set_tolerance(double tol) { m_tolerance = tol; } template <class TConfig> void EigenSolver<TConfig>::set_shift(ValueTypeVec shift) { m_shift = shift; } template <class TConfig> bool EigenSolver<TConfig>::converged() const { return m_converged; } template <class TConfig> void EigenSolver<TConfig>::setup(Operator<TConfig> &A) { m_A = &A; m_converged = false; #ifdef AMGX_WITH_MPI #ifdef MPI_SOLVE_PROFILE MPI_Barrier(MPI_COMM_WORLD); #endif #endif hipEventRecord(m_setup_start); solver_setup(); #ifdef AMGX_WITH_MPI #ifdef MPI_SOLVE_PROFILE MPI_Barrier(MPI_COMM_WORLD); #endif #endif hipEventRecord(m_setup_stop); hipEventSynchronize(m_setup_stop); hipEventElapsedTime(&m_setup_time, m_setup_start, m_setup_stop); m_setup_time *= 1e-3f; } template<class TConfig> void EigenSolver<TConfig>::exchangeSolveResultsConsolidation(AMGX_STATUS &status) { std::vector<PODVector_h> m_res_history; PODVector_h res(1); for (int i = 0; i < m_residuals.size(); i++) { res[0] = m_residuals[i]; m_res_history.push_back(res); } this->m_A->getManager()->exchangeSolveResultsConsolidation(m_num_iters, m_res_history, status, true /*looks like we always store residual history*/); } template<class TConfig> AMGX_ERROR EigenSolver<TConfig>::solve_no_throw(VVector &x, AMGX_STATUS &status) { AMGX_ERROR rc = AMGX_OK; try { // Check if fine level is consolidated and not a root partition if ( !(this->m_A->getManager() != NULL && this->m_A->getManager()->isFineLevelConsolidated() && !this->m_A->getManager()->isFineLevelRootPartition() )) { // If matrix is consolidated on fine level and not a root partition if (x.tag == -1) { x.tag = 4242 * 100 + 1; } status = this->solve(x); } // Exchange residual history, number of iterations, solve status if fine level consoildation was used if (this->m_A->getManager() != NULL && this->m_A->getManager()->isFineLevelConsolidated()) { this->exchangeSolveResultsConsolidation(status); } } AMGX_CATCHES(rc) return rc; } template <class TConfig> AMGX_STATUS EigenSolver<TConfig>::solve(VVector &x) { // initial vector is empty, initialize it with random values. if (x.empty()) { Operator<TConfig> &A = *this->m_A; ViewType oldView = A.currentView(); A.setViewExterior(); int N = A.get_num_cols(); Vector_h h_x(N); for (int i = 0; i < N; ++i) { h_x[i] = types::get_rand<ValueTypeVec>(); } x = h_x; A.setView(oldView); } // This code is needed for MPI implementation of eigensolvers. x.set_block_dimx(1); x.set_block_dimy(m_A->get_block_dimx()); if (x.tag == -1) { x.tag = 1; } x.dirtybit = 1; x.delayed_send = 1; m_eigenvectors.clear(); m_eigenvalues.clear(); #ifdef AMGX_WITH_MPI #ifdef MPI_SOLVE_PROFILE MPI_Barrier(MPI_COMM_WORLD); #endif #endif hipEventRecord(m_solve_start); solve_init(x); bool done = false; for (m_curr_iter = 0; m_curr_iter < m_max_iters && !done; ++m_curr_iter) { done = solve_iteration(x); // solve_iteration did not update the residuals, add an undefined norm. if (m_residuals.size() == m_curr_iter) { m_residuals.push_back( types::util<PODValueB>::get_minus_one()); } if (m_verbosity_level == 3) { print_iter_stats(); } } print_final_stats(); if (done) { m_converged = true; } m_num_iters = m_curr_iter; solve_finalize(); if (m_want_eigenvectors && m_eigenvectors.empty()) { std::string str = "Eigenvectors requested but not provided by solver.\n"; amgx_output(str.c_str(), str.length()); if (m_eigenvector_solver_name.empty()) { FatalError("Eigenvectors requested but no eigenvector solver provided", AMGX_ERR_CONFIGURATION); } EigenVectorSolver<TConfig> *eigenvector_solver = EigenVectorSolverFactory<TConfig>::create(m_eigenvector_solver_name); ValueTypeVec eigenvalue = m_eigenvalues.front(); eigenvector_solver->setup(*m_A); m_eigenvectors.resize(m_eigenvalues.size()); eigenvector_solver->solve(eigenvalue, m_eigenvectors[0]); delete eigenvector_solver; } #ifdef AMGX_WITH_MPI #ifdef MPI_SOLVE_PROFILE MPI_Barrier(MPI_COMM_WORLD); #endif #endif hipEventRecord(m_solve_stop); hipEventSynchronize(m_solve_stop); hipEventElapsedTime(&m_solve_time, m_solve_start, m_solve_stop); m_solve_time *= 1e-3f; if (m_verbosity_level == 3) { std::stringstream ss; if (m_converged) { ss << "Eigensolver converged after " << get_num_iters() << " iterations." << std::endl; std::vector<ValueTypeVec> eigenvalues = get_eigenvalues(); ss << "Eigenvalue: "; for (int i = 0; i < eigenvalues.size(); ++i) { ss << eigenvalues[i] << " "; } ss << std::endl; } else { ss << "Eigensolver did not converge after " << this->get_num_iters() << " iterations." << std::endl; } amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); print_timings(); } return m_converged ? AMGX_ST_CONVERGED : AMGX_ST_NOT_CONVERGED; } template<class TConfig> void EigenSolver<TConfig>::postprocess_eigenpairs() { // If the smallest eigenvalues were computed (with A^-1), // we need to invert the eigenvalue. if (m_which == EIG_SMALLEST) { for (int i = 0; i < m_eigenvalues.size(); ++i) { ValueTypeVec eigenvalue = m_eigenvalues[i]; ValueTypeVec inv_eigenvalue = types::util<ValueTypeVec>::get_one() / eigenvalue; m_eigenvalues[i] = inv_eigenvalue + m_shift; } } } template<class TConfig> void EigenSolver<TConfig>::print_timings() { std::stringstream ss; ss << "Total Time: " << m_setup_time + m_solve_time << std::endl; ss << " setup: " << m_setup_time << " s\n"; ss << " solve: " << m_solve_time << " s\n"; ss << " solve(per iteration): " << ((m_num_iters == 0) ? m_num_iters : m_solve_time / m_num_iters) << " s\n"; amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); } template <typename TConfig> void EigenSolver<TConfig>::print_iter_stats() { if (m_curr_iter == 0) { std::stringstream ss; ss << std::setw(15) << "iter" << std::setw(20) << " Mem Usage (GB)" << std::setw(15) << "residual"; ss << std::setw(15) << "rate"; ss << std::endl; ss << " --------------------------------------------------------------"; ss << std::endl; amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); } std::stringstream ss; ss << std::setw(15) << m_curr_iter; MemoryInfo::updateMaxMemoryUsage(); ss << std::setw(20) << MemoryInfo::getMaxMemoryUsage(); PODValueB iter_residual = m_residuals[m_curr_iter]; if (iter_residual >= 0) { ss << std::scientific << std::setprecision(6) << std::setw(15) << iter_residual; // Compute convergence rate. if (m_curr_iter > 0) { PODValueB prev_residual = m_residuals[m_curr_iter - 1]; if (prev_residual > 0) { ss << std::setw(15); ss << std::fixed << std::setprecision(4) << iter_residual / prev_residual; } } } ss << std::endl; amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); } template <typename TConfig> void EigenSolver<TConfig>::print_final_stats() { std::stringstream ss; ss << " --------------------------------------------------------------"; ss << std::endl; amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); } template<class TConfig> typename EigenSolverFactory<TConfig>::EigenSolverFactoryMap & EigenSolverFactory<TConfig>::getFactories() { static EigenSolverFactoryMap factories; return factories; } template<class TConfig> void EigenSolverFactory<TConfig>::registerFactory(const std::string &name, EigenSolverFactory<TConfig> *f) { EigenSolverFactoryMap &factories = getFactories(); typename EigenSolverFactoryMap::const_iterator it = factories.find(name); if (it != factories.end()) { std::string error = "EigenSolverFactory '" + name + "' has already been registered\n"; FatalError(error.c_str(), AMGX_ERR_CORE); } factories[name] = f; } template<class TConfig> void EigenSolverFactory<TConfig>::unregisterFactory(const std::string &name) { EigenSolverFactoryMap &factories = getFactories(); typename EigenSolverFactoryMap::iterator it = factories.find(name); if (it == factories.end()) { std::string error = "EigenSolverFactory '" + name + "' has not been registered\n"; FatalError(error.c_str(), AMGX_ERR_CORE); } EigenSolverFactory<TConfig> *factory = it->second; assert(factory != NULL); delete factory; factories.erase(it); } template<class TConfig> void EigenSolverFactory<TConfig>::unregisterFactories() { EigenSolverFactoryMap &factories = getFactories(); typename EigenSolverFactoryMap::iterator it = factories.begin(); for (; it != factories.end();) { EigenSolverFactory<TConfig> *factory = it->second; assert(factory != NULL); it++; delete factory; } factories.clear(); } template<class TConfig> EigenSolver<TConfig> *EigenSolverFactory<TConfig>::allocate(AMG_Config &cfg, const std::string &current_scope, const std::string &solverType, ThreadManager *tmng) { std::string solverName, new_scope; cfg.getParameter<std::string>(solverType, solverName, current_scope, new_scope); EigenSolverFactoryMap &factories = getFactories(); typename EigenSolverFactoryMap::const_iterator it = factories.find(solverName); if (it == factories.end()) { std::string error = "EigenSolverFactory '" + solverName + "' has not been registered\n"; FatalError(error.c_str( ), AMGX_ERR_CORE); } EigenSolver<TConfig> *solver = it->second->create(cfg, new_scope, tmng); solver->setName(solverName); return solver; } ; template<class TConfig> EigenSolver<TConfig> *EigenSolverFactory<TConfig>::allocate(AMG_Config &cfg, const std::string &solverType, ThreadManager *tmng) { return EigenSolverFactory<TConfig>::allocate(cfg, "default", solverType, tmng); } // Explicit template instantiation. #define AMGX_CASE_LINE(CASE) template class EigenSolver<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class EigenSolverFactory<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE };
c64f03025b1ce1a74168e7fb591cb4999cbfea3e.cu
/* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <eigensolvers/eigensolver.h> #include <eigensolvers/eigenvector_solver.h> #include <norm.h> #include <amgx_timer.h> #include <blas.h> #include <memory_info.h> #include "amgx_types/util.h" #include "amgx_types/rand.h" #include "amgx_types/io.h" #include <sstream> #include <iomanip> namespace amgx { template <class TConfig> EigenSolver<TConfig>::EigenSolver(AMG_Config &cfg, const std::string &cfg_scope) : m_A(0), m_converged(false), m_curr_iter(0), m_num_iters(0), m_max_iters(0), m_ref_count(1) { m_want_eigenvectors = cfg.getParameter<int>("eig_eigenvector", cfg_scope); m_tolerance = cfg.getParameter<double>("eig_tolerance", cfg_scope); m_shift = types::util<ValueTypeVec>::get_one() * cfg.getParameter<double>("eig_shift", cfg_scope); m_damping_factor = cfg.getParameter<double>("eig_damping_factor", cfg_scope); m_max_iters = cfg.getParameter<int>("eig_max_iters", cfg_scope); m_verbosity_level = cfg.getParameter<int>("verbosity_level", cfg_scope); m_eigenvector_solver_name = cfg.getParameter<std::string>("eig_eigenvector_solver", cfg_scope); m_norm_type = cfg.getParameter<NormType>("norm", cfg_scope); std::string which = cfg.getParameter<std::string>("eig_which", cfg_scope); if (which == "smallest") { m_which = EIG_SMALLEST; } else if (which == "largest") { m_which = EIG_LARGEST; } else if (which == "pagerank") { m_which = EIG_PAGERANK; } else if (which == "shift") { m_which = EIG_SHIFT; } else { FatalError("EigenSolver: invalid target spectrum.", AMGX_ERR_CONFIGURATION); } // Allocate events. cudaEventCreate(&m_setup_start); cudaEventCreate(&m_setup_stop); cudaEventCreate(&m_solve_start); cudaEventCreate(&m_solve_stop); cudaEventCreate(&m_iter_start); cudaEventCreate(&m_iter_stop); m_setup_time = 0.0f; m_solve_time = 0.0f; } template <class TConfig> EigenSolver<TConfig>::~EigenSolver() { m_eigenvalues.clear(); m_eigenvectors.clear(); cudaEventDestroy(m_setup_start); cudaEventDestroy(m_setup_stop); cudaEventDestroy(m_solve_start); cudaEventDestroy(m_solve_stop); cudaEventDestroy(m_iter_start); cudaEventDestroy(m_iter_stop); } template <class TConfig> int EigenSolver<TConfig>::get_num_iters() const { return m_num_iters; } template <class TConfig> void EigenSolver<TConfig>::set_max_iters(int max_iters) { m_max_iters = max_iters; } template <class TConfig> void EigenSolver<TConfig>::set_tolerance(double tol) { m_tolerance = tol; } template <class TConfig> void EigenSolver<TConfig>::set_shift(ValueTypeVec shift) { m_shift = shift; } template <class TConfig> bool EigenSolver<TConfig>::converged() const { return m_converged; } template <class TConfig> void EigenSolver<TConfig>::setup(Operator<TConfig> &A) { m_A = &A; m_converged = false; #ifdef AMGX_WITH_MPI #ifdef MPI_SOLVE_PROFILE MPI_Barrier(MPI_COMM_WORLD); #endif #endif cudaEventRecord(m_setup_start); solver_setup(); #ifdef AMGX_WITH_MPI #ifdef MPI_SOLVE_PROFILE MPI_Barrier(MPI_COMM_WORLD); #endif #endif cudaEventRecord(m_setup_stop); cudaEventSynchronize(m_setup_stop); cudaEventElapsedTime(&m_setup_time, m_setup_start, m_setup_stop); m_setup_time *= 1e-3f; } template<class TConfig> void EigenSolver<TConfig>::exchangeSolveResultsConsolidation(AMGX_STATUS &status) { std::vector<PODVector_h> m_res_history; PODVector_h res(1); for (int i = 0; i < m_residuals.size(); i++) { res[0] = m_residuals[i]; m_res_history.push_back(res); } this->m_A->getManager()->exchangeSolveResultsConsolidation(m_num_iters, m_res_history, status, true /*looks like we always store residual history*/); } template<class TConfig> AMGX_ERROR EigenSolver<TConfig>::solve_no_throw(VVector &x, AMGX_STATUS &status) { AMGX_ERROR rc = AMGX_OK; try { // Check if fine level is consolidated and not a root partition if ( !(this->m_A->getManager() != NULL && this->m_A->getManager()->isFineLevelConsolidated() && !this->m_A->getManager()->isFineLevelRootPartition() )) { // If matrix is consolidated on fine level and not a root partition if (x.tag == -1) { x.tag = 4242 * 100 + 1; } status = this->solve(x); } // Exchange residual history, number of iterations, solve status if fine level consoildation was used if (this->m_A->getManager() != NULL && this->m_A->getManager()->isFineLevelConsolidated()) { this->exchangeSolveResultsConsolidation(status); } } AMGX_CATCHES(rc) return rc; } template <class TConfig> AMGX_STATUS EigenSolver<TConfig>::solve(VVector &x) { // initial vector is empty, initialize it with random values. if (x.empty()) { Operator<TConfig> &A = *this->m_A; ViewType oldView = A.currentView(); A.setViewExterior(); int N = A.get_num_cols(); Vector_h h_x(N); for (int i = 0; i < N; ++i) { h_x[i] = types::get_rand<ValueTypeVec>(); } x = h_x; A.setView(oldView); } // This code is needed for MPI implementation of eigensolvers. x.set_block_dimx(1); x.set_block_dimy(m_A->get_block_dimx()); if (x.tag == -1) { x.tag = 1; } x.dirtybit = 1; x.delayed_send = 1; m_eigenvectors.clear(); m_eigenvalues.clear(); #ifdef AMGX_WITH_MPI #ifdef MPI_SOLVE_PROFILE MPI_Barrier(MPI_COMM_WORLD); #endif #endif cudaEventRecord(m_solve_start); solve_init(x); bool done = false; for (m_curr_iter = 0; m_curr_iter < m_max_iters && !done; ++m_curr_iter) { done = solve_iteration(x); // solve_iteration did not update the residuals, add an undefined norm. if (m_residuals.size() == m_curr_iter) { m_residuals.push_back( types::util<PODValueB>::get_minus_one()); } if (m_verbosity_level == 3) { print_iter_stats(); } } print_final_stats(); if (done) { m_converged = true; } m_num_iters = m_curr_iter; solve_finalize(); if (m_want_eigenvectors && m_eigenvectors.empty()) { std::string str = "Eigenvectors requested but not provided by solver.\n"; amgx_output(str.c_str(), str.length()); if (m_eigenvector_solver_name.empty()) { FatalError("Eigenvectors requested but no eigenvector solver provided", AMGX_ERR_CONFIGURATION); } EigenVectorSolver<TConfig> *eigenvector_solver = EigenVectorSolverFactory<TConfig>::create(m_eigenvector_solver_name); ValueTypeVec eigenvalue = m_eigenvalues.front(); eigenvector_solver->setup(*m_A); m_eigenvectors.resize(m_eigenvalues.size()); eigenvector_solver->solve(eigenvalue, m_eigenvectors[0]); delete eigenvector_solver; } #ifdef AMGX_WITH_MPI #ifdef MPI_SOLVE_PROFILE MPI_Barrier(MPI_COMM_WORLD); #endif #endif cudaEventRecord(m_solve_stop); cudaEventSynchronize(m_solve_stop); cudaEventElapsedTime(&m_solve_time, m_solve_start, m_solve_stop); m_solve_time *= 1e-3f; if (m_verbosity_level == 3) { std::stringstream ss; if (m_converged) { ss << "Eigensolver converged after " << get_num_iters() << " iterations." << std::endl; std::vector<ValueTypeVec> eigenvalues = get_eigenvalues(); ss << "Eigenvalue: "; for (int i = 0; i < eigenvalues.size(); ++i) { ss << eigenvalues[i] << " "; } ss << std::endl; } else { ss << "Eigensolver did not converge after " << this->get_num_iters() << " iterations." << std::endl; } amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); print_timings(); } return m_converged ? AMGX_ST_CONVERGED : AMGX_ST_NOT_CONVERGED; } template<class TConfig> void EigenSolver<TConfig>::postprocess_eigenpairs() { // If the smallest eigenvalues were computed (with A^-1), // we need to invert the eigenvalue. if (m_which == EIG_SMALLEST) { for (int i = 0; i < m_eigenvalues.size(); ++i) { ValueTypeVec eigenvalue = m_eigenvalues[i]; ValueTypeVec inv_eigenvalue = types::util<ValueTypeVec>::get_one() / eigenvalue; m_eigenvalues[i] = inv_eigenvalue + m_shift; } } } template<class TConfig> void EigenSolver<TConfig>::print_timings() { std::stringstream ss; ss << "Total Time: " << m_setup_time + m_solve_time << std::endl; ss << " setup: " << m_setup_time << " s\n"; ss << " solve: " << m_solve_time << " s\n"; ss << " solve(per iteration): " << ((m_num_iters == 0) ? m_num_iters : m_solve_time / m_num_iters) << " s\n"; amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); } template <typename TConfig> void EigenSolver<TConfig>::print_iter_stats() { if (m_curr_iter == 0) { std::stringstream ss; ss << std::setw(15) << "iter" << std::setw(20) << " Mem Usage (GB)" << std::setw(15) << "residual"; ss << std::setw(15) << "rate"; ss << std::endl; ss << " --------------------------------------------------------------"; ss << std::endl; amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); } std::stringstream ss; ss << std::setw(15) << m_curr_iter; MemoryInfo::updateMaxMemoryUsage(); ss << std::setw(20) << MemoryInfo::getMaxMemoryUsage(); PODValueB iter_residual = m_residuals[m_curr_iter]; if (iter_residual >= 0) { ss << std::scientific << std::setprecision(6) << std::setw(15) << iter_residual; // Compute convergence rate. if (m_curr_iter > 0) { PODValueB prev_residual = m_residuals[m_curr_iter - 1]; if (prev_residual > 0) { ss << std::setw(15); ss << std::fixed << std::setprecision(4) << iter_residual / prev_residual; } } } ss << std::endl; amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); } template <typename TConfig> void EigenSolver<TConfig>::print_final_stats() { std::stringstream ss; ss << " --------------------------------------------------------------"; ss << std::endl; amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); } template<class TConfig> typename EigenSolverFactory<TConfig>::EigenSolverFactoryMap & EigenSolverFactory<TConfig>::getFactories() { static EigenSolverFactoryMap factories; return factories; } template<class TConfig> void EigenSolverFactory<TConfig>::registerFactory(const std::string &name, EigenSolverFactory<TConfig> *f) { EigenSolverFactoryMap &factories = getFactories(); typename EigenSolverFactoryMap::const_iterator it = factories.find(name); if (it != factories.end()) { std::string error = "EigenSolverFactory '" + name + "' has already been registered\n"; FatalError(error.c_str(), AMGX_ERR_CORE); } factories[name] = f; } template<class TConfig> void EigenSolverFactory<TConfig>::unregisterFactory(const std::string &name) { EigenSolverFactoryMap &factories = getFactories(); typename EigenSolverFactoryMap::iterator it = factories.find(name); if (it == factories.end()) { std::string error = "EigenSolverFactory '" + name + "' has not been registered\n"; FatalError(error.c_str(), AMGX_ERR_CORE); } EigenSolverFactory<TConfig> *factory = it->second; assert(factory != NULL); delete factory; factories.erase(it); } template<class TConfig> void EigenSolverFactory<TConfig>::unregisterFactories() { EigenSolverFactoryMap &factories = getFactories(); typename EigenSolverFactoryMap::iterator it = factories.begin(); for (; it != factories.end();) { EigenSolverFactory<TConfig> *factory = it->second; assert(factory != NULL); it++; delete factory; } factories.clear(); } template<class TConfig> EigenSolver<TConfig> *EigenSolverFactory<TConfig>::allocate(AMG_Config &cfg, const std::string &current_scope, const std::string &solverType, ThreadManager *tmng) { std::string solverName, new_scope; cfg.getParameter<std::string>(solverType, solverName, current_scope, new_scope); EigenSolverFactoryMap &factories = getFactories(); typename EigenSolverFactoryMap::const_iterator it = factories.find(solverName); if (it == factories.end()) { std::string error = "EigenSolverFactory '" + solverName + "' has not been registered\n"; FatalError(error.c_str( ), AMGX_ERR_CORE); } EigenSolver<TConfig> *solver = it->second->create(cfg, new_scope, tmng); solver->setName(solverName); return solver; } ; template<class TConfig> EigenSolver<TConfig> *EigenSolverFactory<TConfig>::allocate(AMG_Config &cfg, const std::string &solverType, ThreadManager *tmng) { return EigenSolverFactory<TConfig>::allocate(cfg, "default", solverType, tmng); } // Explicit template instantiation. #define AMGX_CASE_LINE(CASE) template class EigenSolver<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class EigenSolverFactory<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE };
c277895f63570761823c0057ea02d87ebf4b321d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void vector_add(int *a, int *b, int *c) { /* insert code to calculate the index properly using blockIdx.x, blockDim.x, threadIdx.x */ int index = blockIdx.x * blockDim.x + threadIdx.x; c[index] = a[index] + b[index]; } /* experiment with N */ /* how large can it be? */ #define N (2048*2048) #define THREADS_PER_BLOCK 512 int main() { int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof( int ); /* allocate space for device copies of a, b, c */ hipMalloc( (void **) &d_a, size ); hipMalloc( (void **) &d_b, size ); hipMalloc( (void **) &d_c, size ); /* allocate space for host copies of a, b, c and setup input values */ a = (int *)malloc( size ); b = (int *)malloc( size ); c = (int *)malloc( size ); for( int i = 0; i < N; i++ ) { a[i] = b[i] = i; c[i] = 0; } /* copy inputs to device */ /* fix the parameters needed to copy data to the device */ hipMemcpy( a , d_a, size, hipMemcpyHostToDevice ); hipMemcpy( b, d_b, size, hipMemcpyHostToDevice ); /* launch the kernel on the GPU */ /* insert the launch parameters to launch the kernel properly using blocks and threads */ hipLaunchKernelGGL(( vector_add), dim3(N / THREADS_PER_BLOCK) , dim3(THREADS_PER_BLOCK) , 0, 0, d_a, d_b, d_c ); /* copy result back to host */ /* fix the parameters needed to copy data back to the host */ hipMemcpy( d_c , c, size, hipMemcpyDeviceToHost ); printf( "c[%d] = %d\n",0,c[0] ); printf( "c[%d] = %d\n",N-1, c[N-1] ); /* clean up */ free(a); free(b); free(c); hipFree( d_a ); hipFree( d_b ); hipFree( d_c ); return 0; } /* end main */
c277895f63570761823c0057ea02d87ebf4b321d.cu
#include <stdio.h> __global__ void vector_add(int *a, int *b, int *c) { /* insert code to calculate the index properly using blockIdx.x, blockDim.x, threadIdx.x */ int index = blockIdx.x * blockDim.x + threadIdx.x; c[index] = a[index] + b[index]; } /* experiment with N */ /* how large can it be? */ #define N (2048*2048) #define THREADS_PER_BLOCK 512 int main() { int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof( int ); /* allocate space for device copies of a, b, c */ cudaMalloc( (void **) &d_a, size ); cudaMalloc( (void **) &d_b, size ); cudaMalloc( (void **) &d_c, size ); /* allocate space for host copies of a, b, c and setup input values */ a = (int *)malloc( size ); b = (int *)malloc( size ); c = (int *)malloc( size ); for( int i = 0; i < N; i++ ) { a[i] = b[i] = i; c[i] = 0; } /* copy inputs to device */ /* fix the parameters needed to copy data to the device */ cudaMemcpy( a , d_a, size, cudaMemcpyHostToDevice ); cudaMemcpy( b, d_b, size, cudaMemcpyHostToDevice ); /* launch the kernel on the GPU */ /* insert the launch parameters to launch the kernel properly using blocks and threads */ vector_add<<< N / THREADS_PER_BLOCK , THREADS_PER_BLOCK >>>( d_a, d_b, d_c ); /* copy result back to host */ /* fix the parameters needed to copy data back to the host */ cudaMemcpy( d_c , c, size, cudaMemcpyDeviceToHost ); printf( "c[%d] = %d\n",0,c[0] ); printf( "c[%d] = %d\n",N-1, c[N-1] ); /* clean up */ free(a); free(b); free(c); cudaFree( d_a ); cudaFree( d_b ); cudaFree( d_c ); return 0; } /* end main */
9845da13a474e9c7400e420d939a651fa5f48728.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2021 by Contributors * @file array/cuda/rowwise_sampling.cu * @brief uniform rowwise sampling */ #include <hiprand/hiprand_kernel.h> #include <dgl/random.h> #include <dgl/runtime/device_api.h> #include <numeric> #include "../../array/cuda/atomic.cuh" #include "../../runtime/cuda/cuda_common.h" #include "./dgl_cub.cuh" #include "./utils.h" namespace dgl { using namespace cuda; using namespace aten::cuda; namespace aten { namespace impl { namespace { constexpr int BLOCK_SIZE = 128; /** * @brief Compute the size of each row in the sampled CSR, without replacement. * * @tparam IdType The type of node and edge indexes. * @param num_picks The number of non-zero entries to pick per row. * @param num_rows The number of rows to pick. * @param in_rows The set of rows to pick. * @param in_ptr The index where each row's edges start. * @param out_deg The size of each row in the sampled matrix, as indexed by * `in_rows` (output). */ template <typename IdType> __global__ void _CSRRowWiseSampleDegreeKernel( const int64_t num_picks, const int64_t num_rows, const IdType* const in_rows, const IdType* const in_ptr, IdType* const out_deg) { const int tIdx = threadIdx.x + blockIdx.x * blockDim.x; if (tIdx < num_rows) { const int in_row = in_rows[tIdx]; const int out_row = tIdx; out_deg[out_row] = min( static_cast<IdType>(num_picks), in_ptr[in_row + 1] - in_ptr[in_row]); if (out_row == num_rows - 1) { // make the prefixsum work out_deg[num_rows] = 0; } } } /** * @brief Compute the size of each row in the sampled CSR, with replacement. * * @tparam IdType The type of node and edge indexes. * @param num_picks The number of non-zero entries to pick per row. * @param num_rows The number of rows to pick. * @param in_rows The set of rows to pick. * @param in_ptr The index where each row's edges start. * @param out_deg The size of each row in the sampled matrix, as indexed by * `in_rows` (output). */ template <typename IdType> __global__ void _CSRRowWiseSampleDegreeReplaceKernel( const int64_t num_picks, const int64_t num_rows, const IdType* const in_rows, const IdType* const in_ptr, IdType* const out_deg) { const int tIdx = threadIdx.x + blockIdx.x * blockDim.x; if (tIdx < num_rows) { const int64_t in_row = in_rows[tIdx]; const int64_t out_row = tIdx; if (in_ptr[in_row + 1] - in_ptr[in_row] == 0) { out_deg[out_row] = 0; } else { out_deg[out_row] = static_cast<IdType>(num_picks); } if (out_row == num_rows - 1) { // make the prefixsum work out_deg[num_rows] = 0; } } } /** * @brief Perform row-wise uniform sampling on a CSR matrix, * and generate a COO matrix, without replacement. * * @tparam IdType The ID type used for matrices. * @tparam TILE_SIZE The number of rows covered by each threadblock. * @param rand_seed The random seed to use. * @param num_picks The number of non-zeros to pick per row. * @param num_rows The number of rows to pick. * @param in_rows The set of rows to pick. * @param in_ptr The indptr array of the input CSR. * @param in_index The indices array of the input CSR. * @param data The data array of the input CSR. * @param out_ptr The offset to write each row to in the output COO. * @param out_rows The rows of the output COO (output). * @param out_cols The columns of the output COO (output). * @param out_idxs The data array of the output COO (output). */ template <typename IdType, int TILE_SIZE> __global__ void _CSRRowWiseSampleUniformKernel( const uint64_t rand_seed, const int64_t num_picks, const int64_t num_rows, const IdType* const in_rows, const IdType* const in_ptr, const IdType* const in_index, const IdType* const data, const IdType* const out_ptr, IdType* const out_rows, IdType* const out_cols, IdType* const out_idxs) { // we assign one warp per row assert(blockDim.x == BLOCK_SIZE); int64_t out_row = blockIdx.x * TILE_SIZE; const int64_t last_row = min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows); hiprandStatePhilox4_32_10_t rng; hiprand_init(rand_seed * gridDim.x + blockIdx.x, threadIdx.x, 0, &rng); while (out_row < last_row) { const int64_t row = in_rows[out_row]; const int64_t in_row_start = in_ptr[row]; const int64_t deg = in_ptr[row + 1] - in_row_start; const int64_t out_row_start = out_ptr[out_row]; if (deg <= num_picks) { // just copy row when there is not enough nodes to sample. for (int idx = threadIdx.x; idx < deg; idx += BLOCK_SIZE) { const IdType in_idx = in_row_start + idx; out_rows[out_row_start + idx] = row; out_cols[out_row_start + idx] = in_index[in_idx]; out_idxs[out_row_start + idx] = data ? data[in_idx] : in_idx; } } else { // generate permutation list via reservoir algorithm for (int idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) { out_idxs[out_row_start + idx] = idx; } __syncthreads(); for (int idx = num_picks + threadIdx.x; idx < deg; idx += BLOCK_SIZE) { const int num = hiprand(&rng) % (idx + 1); if (num < num_picks) { // use max so as to achieve the replacement order the serial // algorithm would have AtomicMax(out_idxs + out_row_start + num, idx); } } __syncthreads(); // copy permutation over for (int idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) { const IdType perm_idx = out_idxs[out_row_start + idx] + in_row_start; out_rows[out_row_start + idx] = row; out_cols[out_row_start + idx] = in_index[perm_idx]; out_idxs[out_row_start + idx] = data ? data[perm_idx] : perm_idx; } } out_row += 1; } } /** * @brief Perform row-wise uniform sampling on a CSR matrix, * and generate a COO matrix, with replacement. * * @tparam IdType The ID type used for matrices. * @tparam TILE_SIZE The number of rows covered by each threadblock. * @param rand_seed The random seed to use. * @param num_picks The number of non-zeros to pick per row. * @param num_rows The number of rows to pick. * @param in_rows The set of rows to pick. * @param in_ptr The indptr array of the input CSR. * @param in_index The indices array of the input CSR. * @param data The data array of the input CSR. * @param out_ptr The offset to write each row to in the output COO. * @param out_rows The rows of the output COO (output). * @param out_cols The columns of the output COO (output). * @param out_idxs The data array of the output COO (output). */ template <typename IdType, int TILE_SIZE> __global__ void _CSRRowWiseSampleUniformReplaceKernel( const uint64_t rand_seed, const int64_t num_picks, const int64_t num_rows, const IdType* const in_rows, const IdType* const in_ptr, const IdType* const in_index, const IdType* const data, const IdType* const out_ptr, IdType* const out_rows, IdType* const out_cols, IdType* const out_idxs) { // we assign one warp per row assert(blockDim.x == BLOCK_SIZE); int64_t out_row = blockIdx.x * TILE_SIZE; const int64_t last_row = min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows); hiprandStatePhilox4_32_10_t rng; hiprand_init(rand_seed * gridDim.x + blockIdx.x, threadIdx.x, 0, &rng); while (out_row < last_row) { const int64_t row = in_rows[out_row]; const int64_t in_row_start = in_ptr[row]; const int64_t out_row_start = out_ptr[out_row]; const int64_t deg = in_ptr[row + 1] - in_row_start; if (deg > 0) { // each thread then blindly copies in rows only if deg > 0. for (int idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) { const int64_t edge = hiprand(&rng) % deg; const int64_t out_idx = out_row_start + idx; out_rows[out_idx] = row; out_cols[out_idx] = in_index[in_row_start + edge]; out_idxs[out_idx] = data ? data[in_row_start + edge] : in_row_start + edge; } } out_row += 1; } } } // namespace ///////////////////////////// CSR sampling ////////////////////////// template <DGLDeviceType XPU, typename IdType> COOMatrix _CSRRowWiseSamplingUniform( CSRMatrix mat, IdArray rows, const int64_t num_picks, const bool replace) { const auto& ctx = rows->ctx; auto device = runtime::DeviceAPI::Get(ctx); hipStream_t stream = runtime::getCurrentHIPStreamMasqueradingAsCUDA(); const int64_t num_rows = rows->shape[0]; const IdType* const slice_rows = static_cast<const IdType*>(rows->data); IdArray picked_row = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8); IdArray picked_col = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8); IdArray picked_idx = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8); IdType* const out_rows = static_cast<IdType*>(picked_row->data); IdType* const out_cols = static_cast<IdType*>(picked_col->data); IdType* const out_idxs = static_cast<IdType*>(picked_idx->data); const IdType* in_ptr = static_cast<IdType*>(GetDevicePointer(mat.indptr)); const IdType* in_cols = static_cast<IdType*>(GetDevicePointer(mat.indices)); const IdType* data = CSRHasData(mat) ? static_cast<IdType*>(GetDevicePointer(mat.data)) : nullptr; // compute degree IdType* out_deg = static_cast<IdType*>( device->AllocWorkspace(ctx, (num_rows + 1) * sizeof(IdType))); if (replace) { const dim3 block(512); const dim3 grid((num_rows + block.x - 1) / block.x); CUDA_KERNEL_CALL( _CSRRowWiseSampleDegreeReplaceKernel, grid, block, 0, stream, num_picks, num_rows, slice_rows, in_ptr, out_deg); } else { const dim3 block(512); const dim3 grid((num_rows + block.x - 1) / block.x); CUDA_KERNEL_CALL( _CSRRowWiseSampleDegreeKernel, grid, block, 0, stream, num_picks, num_rows, slice_rows, in_ptr, out_deg); } // fill out_ptr IdType* out_ptr = static_cast<IdType*>( device->AllocWorkspace(ctx, (num_rows + 1) * sizeof(IdType))); size_t prefix_temp_size = 0; CUDA_CALL(hipcub::DeviceScan::ExclusiveSum( nullptr, prefix_temp_size, out_deg, out_ptr, num_rows + 1, stream)); void* prefix_temp = device->AllocWorkspace(ctx, prefix_temp_size); CUDA_CALL(hipcub::DeviceScan::ExclusiveSum( prefix_temp, prefix_temp_size, out_deg, out_ptr, num_rows + 1, stream)); device->FreeWorkspace(ctx, prefix_temp); device->FreeWorkspace(ctx, out_deg); hipEvent_t copyEvent; CUDA_CALL(hipEventCreate(&copyEvent)); // TODO(dlasalle): use pinned memory to overlap with the actual sampling, and // wait on a cudaevent IdType new_len; // copy using the internal current stream device->CopyDataFromTo( out_ptr, num_rows * sizeof(new_len), &new_len, 0, sizeof(new_len), ctx, DGLContext{kDGLCPU, 0}, mat.indptr->dtype); CUDA_CALL(hipEventRecord(copyEvent, stream)); const uint64_t random_seed = RandomEngine::ThreadLocal()->RandInt(1000000000); // select edges // the number of rows each thread block will cover constexpr int TILE_SIZE = 128 / BLOCK_SIZE; if (replace) { // with replacement const dim3 block(BLOCK_SIZE); const dim3 grid((num_rows + TILE_SIZE - 1) / TILE_SIZE); CUDA_KERNEL_CALL( (_CSRRowWiseSampleUniformReplaceKernel<IdType, TILE_SIZE>), grid, block, 0, stream, random_seed, num_picks, num_rows, slice_rows, in_ptr, in_cols, data, out_ptr, out_rows, out_cols, out_idxs); } else { // without replacement const dim3 block(BLOCK_SIZE); const dim3 grid((num_rows + TILE_SIZE - 1) / TILE_SIZE); CUDA_KERNEL_CALL( (_CSRRowWiseSampleUniformKernel<IdType, TILE_SIZE>), grid, block, 0, stream, random_seed, num_picks, num_rows, slice_rows, in_ptr, in_cols, data, out_ptr, out_rows, out_cols, out_idxs); } device->FreeWorkspace(ctx, out_ptr); // wait for copying `new_len` to finish CUDA_CALL(hipEventSynchronize(copyEvent)); CUDA_CALL(hipEventDestroy(copyEvent)); picked_row = picked_row.CreateView({new_len}, picked_row->dtype); picked_col = picked_col.CreateView({new_len}, picked_col->dtype); picked_idx = picked_idx.CreateView({new_len}, picked_idx->dtype); return COOMatrix( mat.num_rows, mat.num_cols, picked_row, picked_col, picked_idx); } template <DGLDeviceType XPU, typename IdType> COOMatrix CSRRowWiseSamplingUniform( CSRMatrix mat, IdArray rows, const int64_t num_picks, const bool replace) { if (num_picks == -1) { // Basically this is UnitGraph::InEdges(). COOMatrix coo = CSRToCOO(CSRSliceRows(mat, rows), false); IdArray sliced_rows = IndexSelect(rows, coo.row); return COOMatrix( mat.num_rows, mat.num_cols, sliced_rows, coo.col, coo.data); } else { return _CSRRowWiseSamplingUniform<XPU, IdType>( mat, rows, num_picks, replace); } } template COOMatrix CSRRowWiseSamplingUniform<kDGLCUDA, int32_t>( CSRMatrix, IdArray, int64_t, bool); template COOMatrix CSRRowWiseSamplingUniform<kDGLCUDA, int64_t>( CSRMatrix, IdArray, int64_t, bool); } // namespace impl } // namespace aten } // namespace dgl
9845da13a474e9c7400e420d939a651fa5f48728.cu
/** * Copyright (c) 2021 by Contributors * @file array/cuda/rowwise_sampling.cu * @brief uniform rowwise sampling */ #include <curand_kernel.h> #include <dgl/random.h> #include <dgl/runtime/device_api.h> #include <numeric> #include "../../array/cuda/atomic.cuh" #include "../../runtime/cuda/cuda_common.h" #include "./dgl_cub.cuh" #include "./utils.h" namespace dgl { using namespace cuda; using namespace aten::cuda; namespace aten { namespace impl { namespace { constexpr int BLOCK_SIZE = 128; /** * @brief Compute the size of each row in the sampled CSR, without replacement. * * @tparam IdType The type of node and edge indexes. * @param num_picks The number of non-zero entries to pick per row. * @param num_rows The number of rows to pick. * @param in_rows The set of rows to pick. * @param in_ptr The index where each row's edges start. * @param out_deg The size of each row in the sampled matrix, as indexed by * `in_rows` (output). */ template <typename IdType> __global__ void _CSRRowWiseSampleDegreeKernel( const int64_t num_picks, const int64_t num_rows, const IdType* const in_rows, const IdType* const in_ptr, IdType* const out_deg) { const int tIdx = threadIdx.x + blockIdx.x * blockDim.x; if (tIdx < num_rows) { const int in_row = in_rows[tIdx]; const int out_row = tIdx; out_deg[out_row] = min( static_cast<IdType>(num_picks), in_ptr[in_row + 1] - in_ptr[in_row]); if (out_row == num_rows - 1) { // make the prefixsum work out_deg[num_rows] = 0; } } } /** * @brief Compute the size of each row in the sampled CSR, with replacement. * * @tparam IdType The type of node and edge indexes. * @param num_picks The number of non-zero entries to pick per row. * @param num_rows The number of rows to pick. * @param in_rows The set of rows to pick. * @param in_ptr The index where each row's edges start. * @param out_deg The size of each row in the sampled matrix, as indexed by * `in_rows` (output). */ template <typename IdType> __global__ void _CSRRowWiseSampleDegreeReplaceKernel( const int64_t num_picks, const int64_t num_rows, const IdType* const in_rows, const IdType* const in_ptr, IdType* const out_deg) { const int tIdx = threadIdx.x + blockIdx.x * blockDim.x; if (tIdx < num_rows) { const int64_t in_row = in_rows[tIdx]; const int64_t out_row = tIdx; if (in_ptr[in_row + 1] - in_ptr[in_row] == 0) { out_deg[out_row] = 0; } else { out_deg[out_row] = static_cast<IdType>(num_picks); } if (out_row == num_rows - 1) { // make the prefixsum work out_deg[num_rows] = 0; } } } /** * @brief Perform row-wise uniform sampling on a CSR matrix, * and generate a COO matrix, without replacement. * * @tparam IdType The ID type used for matrices. * @tparam TILE_SIZE The number of rows covered by each threadblock. * @param rand_seed The random seed to use. * @param num_picks The number of non-zeros to pick per row. * @param num_rows The number of rows to pick. * @param in_rows The set of rows to pick. * @param in_ptr The indptr array of the input CSR. * @param in_index The indices array of the input CSR. * @param data The data array of the input CSR. * @param out_ptr The offset to write each row to in the output COO. * @param out_rows The rows of the output COO (output). * @param out_cols The columns of the output COO (output). * @param out_idxs The data array of the output COO (output). */ template <typename IdType, int TILE_SIZE> __global__ void _CSRRowWiseSampleUniformKernel( const uint64_t rand_seed, const int64_t num_picks, const int64_t num_rows, const IdType* const in_rows, const IdType* const in_ptr, const IdType* const in_index, const IdType* const data, const IdType* const out_ptr, IdType* const out_rows, IdType* const out_cols, IdType* const out_idxs) { // we assign one warp per row assert(blockDim.x == BLOCK_SIZE); int64_t out_row = blockIdx.x * TILE_SIZE; const int64_t last_row = min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows); curandStatePhilox4_32_10_t rng; curand_init(rand_seed * gridDim.x + blockIdx.x, threadIdx.x, 0, &rng); while (out_row < last_row) { const int64_t row = in_rows[out_row]; const int64_t in_row_start = in_ptr[row]; const int64_t deg = in_ptr[row + 1] - in_row_start; const int64_t out_row_start = out_ptr[out_row]; if (deg <= num_picks) { // just copy row when there is not enough nodes to sample. for (int idx = threadIdx.x; idx < deg; idx += BLOCK_SIZE) { const IdType in_idx = in_row_start + idx; out_rows[out_row_start + idx] = row; out_cols[out_row_start + idx] = in_index[in_idx]; out_idxs[out_row_start + idx] = data ? data[in_idx] : in_idx; } } else { // generate permutation list via reservoir algorithm for (int idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) { out_idxs[out_row_start + idx] = idx; } __syncthreads(); for (int idx = num_picks + threadIdx.x; idx < deg; idx += BLOCK_SIZE) { const int num = curand(&rng) % (idx + 1); if (num < num_picks) { // use max so as to achieve the replacement order the serial // algorithm would have AtomicMax(out_idxs + out_row_start + num, idx); } } __syncthreads(); // copy permutation over for (int idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) { const IdType perm_idx = out_idxs[out_row_start + idx] + in_row_start; out_rows[out_row_start + idx] = row; out_cols[out_row_start + idx] = in_index[perm_idx]; out_idxs[out_row_start + idx] = data ? data[perm_idx] : perm_idx; } } out_row += 1; } } /** * @brief Perform row-wise uniform sampling on a CSR matrix, * and generate a COO matrix, with replacement. * * @tparam IdType The ID type used for matrices. * @tparam TILE_SIZE The number of rows covered by each threadblock. * @param rand_seed The random seed to use. * @param num_picks The number of non-zeros to pick per row. * @param num_rows The number of rows to pick. * @param in_rows The set of rows to pick. * @param in_ptr The indptr array of the input CSR. * @param in_index The indices array of the input CSR. * @param data The data array of the input CSR. * @param out_ptr The offset to write each row to in the output COO. * @param out_rows The rows of the output COO (output). * @param out_cols The columns of the output COO (output). * @param out_idxs The data array of the output COO (output). */ template <typename IdType, int TILE_SIZE> __global__ void _CSRRowWiseSampleUniformReplaceKernel( const uint64_t rand_seed, const int64_t num_picks, const int64_t num_rows, const IdType* const in_rows, const IdType* const in_ptr, const IdType* const in_index, const IdType* const data, const IdType* const out_ptr, IdType* const out_rows, IdType* const out_cols, IdType* const out_idxs) { // we assign one warp per row assert(blockDim.x == BLOCK_SIZE); int64_t out_row = blockIdx.x * TILE_SIZE; const int64_t last_row = min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows); curandStatePhilox4_32_10_t rng; curand_init(rand_seed * gridDim.x + blockIdx.x, threadIdx.x, 0, &rng); while (out_row < last_row) { const int64_t row = in_rows[out_row]; const int64_t in_row_start = in_ptr[row]; const int64_t out_row_start = out_ptr[out_row]; const int64_t deg = in_ptr[row + 1] - in_row_start; if (deg > 0) { // each thread then blindly copies in rows only if deg > 0. for (int idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) { const int64_t edge = curand(&rng) % deg; const int64_t out_idx = out_row_start + idx; out_rows[out_idx] = row; out_cols[out_idx] = in_index[in_row_start + edge]; out_idxs[out_idx] = data ? data[in_row_start + edge] : in_row_start + edge; } } out_row += 1; } } } // namespace ///////////////////////////// CSR sampling ////////////////////////// template <DGLDeviceType XPU, typename IdType> COOMatrix _CSRRowWiseSamplingUniform( CSRMatrix mat, IdArray rows, const int64_t num_picks, const bool replace) { const auto& ctx = rows->ctx; auto device = runtime::DeviceAPI::Get(ctx); cudaStream_t stream = runtime::getCurrentCUDAStream(); const int64_t num_rows = rows->shape[0]; const IdType* const slice_rows = static_cast<const IdType*>(rows->data); IdArray picked_row = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8); IdArray picked_col = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8); IdArray picked_idx = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8); IdType* const out_rows = static_cast<IdType*>(picked_row->data); IdType* const out_cols = static_cast<IdType*>(picked_col->data); IdType* const out_idxs = static_cast<IdType*>(picked_idx->data); const IdType* in_ptr = static_cast<IdType*>(GetDevicePointer(mat.indptr)); const IdType* in_cols = static_cast<IdType*>(GetDevicePointer(mat.indices)); const IdType* data = CSRHasData(mat) ? static_cast<IdType*>(GetDevicePointer(mat.data)) : nullptr; // compute degree IdType* out_deg = static_cast<IdType*>( device->AllocWorkspace(ctx, (num_rows + 1) * sizeof(IdType))); if (replace) { const dim3 block(512); const dim3 grid((num_rows + block.x - 1) / block.x); CUDA_KERNEL_CALL( _CSRRowWiseSampleDegreeReplaceKernel, grid, block, 0, stream, num_picks, num_rows, slice_rows, in_ptr, out_deg); } else { const dim3 block(512); const dim3 grid((num_rows + block.x - 1) / block.x); CUDA_KERNEL_CALL( _CSRRowWiseSampleDegreeKernel, grid, block, 0, stream, num_picks, num_rows, slice_rows, in_ptr, out_deg); } // fill out_ptr IdType* out_ptr = static_cast<IdType*>( device->AllocWorkspace(ctx, (num_rows + 1) * sizeof(IdType))); size_t prefix_temp_size = 0; CUDA_CALL(cub::DeviceScan::ExclusiveSum( nullptr, prefix_temp_size, out_deg, out_ptr, num_rows + 1, stream)); void* prefix_temp = device->AllocWorkspace(ctx, prefix_temp_size); CUDA_CALL(cub::DeviceScan::ExclusiveSum( prefix_temp, prefix_temp_size, out_deg, out_ptr, num_rows + 1, stream)); device->FreeWorkspace(ctx, prefix_temp); device->FreeWorkspace(ctx, out_deg); cudaEvent_t copyEvent; CUDA_CALL(cudaEventCreate(&copyEvent)); // TODO(dlasalle): use pinned memory to overlap with the actual sampling, and // wait on a cudaevent IdType new_len; // copy using the internal current stream device->CopyDataFromTo( out_ptr, num_rows * sizeof(new_len), &new_len, 0, sizeof(new_len), ctx, DGLContext{kDGLCPU, 0}, mat.indptr->dtype); CUDA_CALL(cudaEventRecord(copyEvent, stream)); const uint64_t random_seed = RandomEngine::ThreadLocal()->RandInt(1000000000); // select edges // the number of rows each thread block will cover constexpr int TILE_SIZE = 128 / BLOCK_SIZE; if (replace) { // with replacement const dim3 block(BLOCK_SIZE); const dim3 grid((num_rows + TILE_SIZE - 1) / TILE_SIZE); CUDA_KERNEL_CALL( (_CSRRowWiseSampleUniformReplaceKernel<IdType, TILE_SIZE>), grid, block, 0, stream, random_seed, num_picks, num_rows, slice_rows, in_ptr, in_cols, data, out_ptr, out_rows, out_cols, out_idxs); } else { // without replacement const dim3 block(BLOCK_SIZE); const dim3 grid((num_rows + TILE_SIZE - 1) / TILE_SIZE); CUDA_KERNEL_CALL( (_CSRRowWiseSampleUniformKernel<IdType, TILE_SIZE>), grid, block, 0, stream, random_seed, num_picks, num_rows, slice_rows, in_ptr, in_cols, data, out_ptr, out_rows, out_cols, out_idxs); } device->FreeWorkspace(ctx, out_ptr); // wait for copying `new_len` to finish CUDA_CALL(cudaEventSynchronize(copyEvent)); CUDA_CALL(cudaEventDestroy(copyEvent)); picked_row = picked_row.CreateView({new_len}, picked_row->dtype); picked_col = picked_col.CreateView({new_len}, picked_col->dtype); picked_idx = picked_idx.CreateView({new_len}, picked_idx->dtype); return COOMatrix( mat.num_rows, mat.num_cols, picked_row, picked_col, picked_idx); } template <DGLDeviceType XPU, typename IdType> COOMatrix CSRRowWiseSamplingUniform( CSRMatrix mat, IdArray rows, const int64_t num_picks, const bool replace) { if (num_picks == -1) { // Basically this is UnitGraph::InEdges(). COOMatrix coo = CSRToCOO(CSRSliceRows(mat, rows), false); IdArray sliced_rows = IndexSelect(rows, coo.row); return COOMatrix( mat.num_rows, mat.num_cols, sliced_rows, coo.col, coo.data); } else { return _CSRRowWiseSamplingUniform<XPU, IdType>( mat, rows, num_picks, replace); } } template COOMatrix CSRRowWiseSamplingUniform<kDGLCUDA, int32_t>( CSRMatrix, IdArray, int64_t, bool); template COOMatrix CSRRowWiseSamplingUniform<kDGLCUDA, int64_t>( CSRMatrix, IdArray, int64_t, bool); } // namespace impl } // namespace aten } // namespace dgl
58b1263cde63a533b64072c3a0197f0ebaefdf7f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "lodepng.h" #include "gputimer.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #define POOL_BLOCK_SIZE 2 #define BYTES_PER_PIXEL 4 #define MAX_THREADS_PER_BLOCK 1024 __device__ unsigned get_block_offset(unsigned k, unsigned image_width){ return BYTES_PER_PIXEL * POOL_BLOCK_SIZE * ( image_width * (k / (image_width / 2)) + (k % (image_width / 2))); } /** * CUDA kernal to perform pooling for one output pixel */ __global__ void max_pool(unsigned char *d_image_buffer, unsigned char *d_out_buffer, unsigned image_width, unsigned index_offset) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int threadId = blockId * blockDim.x + threadIdx.x; int index = threadId + index_offset; unsigned char* c; unsigned max, val; // pool on RGBA channels for (int rgba = 0; rgba < BYTES_PER_PIXEL; rgba++){ if (rgba < 3){ // for RGB channels max = 0; for (int i = 0; i < POOL_BLOCK_SIZE; i++){ c = d_image_buffer + get_block_offset(index, image_width) + BYTES_PER_PIXEL * image_width * i; for (int j = 0; j < POOL_BLOCK_SIZE; j++){ c += BYTES_PER_PIXEL * j; val = (int)c[rgba]; max = (max < val) ? val : max; } } val = (unsigned char)max; } else { // for alpha channel val = (unsigned char)255; } d_out_buffer[BYTES_PER_PIXEL*index + rgba] = val; } } int main(int argc, char *argv[]) { // get arguments from command line if(argc<3) { printf("Not enough arguments.\n"); return -1; } char *argv1 = argv[1]; char *argv2 = argv[2]; int len1 = strlen(argv1) + 1; int len2 = strlen(argv2) + 1; const char* input_filename = (char*) malloc (len1*sizeof(char)); strcpy((char *) input_filename, argv[1]); const char* output_filename = (char*) malloc (len2*sizeof(char)); strcpy((char *) output_filename, argv[2]); // vars for rectifying unsigned char *h_image_in, *h_image_out; unsigned width_in, height_in; unsigned total_pixels, total_out_pixels; int error1 = lodepng_decode32_file(&h_image_in, &width_in, &height_in, input_filename); if(error1) { printf("error %u: %s\n", error1, lodepng_error_text(error1)); return -2; } unsigned width_in_halved, height_in_halved; width_in_halved = width_in / 2; height_in_halved = height_in / 2; total_pixels = width_in * height_in; total_out_pixels = width_in_halved * height_in_halved; h_image_out = (unsigned char*) malloc (BYTES_PER_PIXEL * total_out_pixels * sizeof(char)); // declare GPU memory pointer unsigned char *d_image_in, *d_image_out; // allocate GPU memory hipMalloc((void**)(&d_image_in), BYTES_PER_PIXEL * total_pixels * sizeof(char)); hipMalloc((void**)(&d_image_out), BYTES_PER_PIXEL * total_out_pixels * sizeof(char)); // transfer the array to the GPU hipMemcpy(d_image_in, h_image_in, BYTES_PER_PIXEL * total_pixels, hipMemcpyHostToDevice); // setup threads int threads_x, threads_y, blocks_x, blocks_y; int threads_per_block; #ifndef DEBUG threads_per_block = MAX_THREADS_PER_BLOCK; #else threads_per_block = 1; while (threads_per_block <= MAX_THREADS_PER_BLOCK){ #endif /* DEBUG */ // setup threads threads_x = threads_per_block; threads_y = 1; // setup blocks blocks_x = total_out_pixels / threads_per_block; blocks_y = 1; while (blocks_x > MAX_THREADS_PER_BLOCK){ blocks_x /= 2; blocks_y *= 2; } dim3 numThreadsPerBlock(threads_x, threads_y, 1); // 1024 threads dim3 numBlocks(blocks_x, blocks_y, 1); printf("Spawning {%d,%d} blocks, {%d,%d} threads each.\n", blocks_x, blocks_y, threads_x, threads_y); GpuTimer timer; // start kernel timer.Start(); hipLaunchKernelGGL(( max_pool), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, d_image_in, d_image_out, width_in, 0); timer.Stop(); printf("{tpb:%d} Time elapsed = %g ms\n", threads_per_block, timer.Elapsed()); #ifdef DEBUG threads_per_block *= 2; } #endif /* DEBUG */ // pull leftover int leftover = total_out_pixels - (threads_x * threads_y * blocks_x * blocks_y); // this will necessarily be less than 1024 printf("Leftover %d.\n",leftover); int blocks = 1; while (leftover > MAX_THREADS_PER_BLOCK){ leftover /= 2; blocks *= 2; } hipLaunchKernelGGL(( max_pool), dim3(blocks), dim3(leftover), 0, 0, d_image_in, d_image_out, width_in, total_out_pixels - leftover); // copy back the result array to the CPU hipMemcpy(h_image_out, d_image_out, BYTES_PER_PIXEL * total_out_pixels, hipMemcpyDeviceToHost); // save rectified pixel data to file lodepng_encode32_file(output_filename, h_image_out, width_in_halved, height_in_halved); free(h_image_in); free(h_image_out); free((char*)input_filename); free((char*)output_filename); hipFree(d_image_in); hipFree(d_image_out); return 0; }
58b1263cde63a533b64072c3a0197f0ebaefdf7f.cu
#include "lodepng.h" #include "gputimer.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #define POOL_BLOCK_SIZE 2 #define BYTES_PER_PIXEL 4 #define MAX_THREADS_PER_BLOCK 1024 __device__ unsigned get_block_offset(unsigned k, unsigned image_width){ return BYTES_PER_PIXEL * POOL_BLOCK_SIZE * ( image_width * (k / (image_width / 2)) + (k % (image_width / 2))); } /** * CUDA kernal to perform pooling for one output pixel */ __global__ void max_pool(unsigned char *d_image_buffer, unsigned char *d_out_buffer, unsigned image_width, unsigned index_offset) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int threadId = blockId * blockDim.x + threadIdx.x; int index = threadId + index_offset; unsigned char* c; unsigned max, val; // pool on RGBA channels for (int rgba = 0; rgba < BYTES_PER_PIXEL; rgba++){ if (rgba < 3){ // for RGB channels max = 0; for (int i = 0; i < POOL_BLOCK_SIZE; i++){ c = d_image_buffer + get_block_offset(index, image_width) + BYTES_PER_PIXEL * image_width * i; for (int j = 0; j < POOL_BLOCK_SIZE; j++){ c += BYTES_PER_PIXEL * j; val = (int)c[rgba]; max = (max < val) ? val : max; } } val = (unsigned char)max; } else { // for alpha channel val = (unsigned char)255; } d_out_buffer[BYTES_PER_PIXEL*index + rgba] = val; } } int main(int argc, char *argv[]) { // get arguments from command line if(argc<3) { printf("Not enough arguments.\n"); return -1; } char *argv1 = argv[1]; char *argv2 = argv[2]; int len1 = strlen(argv1) + 1; int len2 = strlen(argv2) + 1; const char* input_filename = (char*) malloc (len1*sizeof(char)); strcpy((char *) input_filename, argv[1]); const char* output_filename = (char*) malloc (len2*sizeof(char)); strcpy((char *) output_filename, argv[2]); // vars for rectifying unsigned char *h_image_in, *h_image_out; unsigned width_in, height_in; unsigned total_pixels, total_out_pixels; int error1 = lodepng_decode32_file(&h_image_in, &width_in, &height_in, input_filename); if(error1) { printf("error %u: %s\n", error1, lodepng_error_text(error1)); return -2; } unsigned width_in_halved, height_in_halved; width_in_halved = width_in / 2; height_in_halved = height_in / 2; total_pixels = width_in * height_in; total_out_pixels = width_in_halved * height_in_halved; h_image_out = (unsigned char*) malloc (BYTES_PER_PIXEL * total_out_pixels * sizeof(char)); // declare GPU memory pointer unsigned char *d_image_in, *d_image_out; // allocate GPU memory cudaMalloc((void**)(&d_image_in), BYTES_PER_PIXEL * total_pixels * sizeof(char)); cudaMalloc((void**)(&d_image_out), BYTES_PER_PIXEL * total_out_pixels * sizeof(char)); // transfer the array to the GPU cudaMemcpy(d_image_in, h_image_in, BYTES_PER_PIXEL * total_pixels, cudaMemcpyHostToDevice); // setup threads int threads_x, threads_y, blocks_x, blocks_y; int threads_per_block; #ifndef DEBUG threads_per_block = MAX_THREADS_PER_BLOCK; #else threads_per_block = 1; while (threads_per_block <= MAX_THREADS_PER_BLOCK){ #endif /* DEBUG */ // setup threads threads_x = threads_per_block; threads_y = 1; // setup blocks blocks_x = total_out_pixels / threads_per_block; blocks_y = 1; while (blocks_x > MAX_THREADS_PER_BLOCK){ blocks_x /= 2; blocks_y *= 2; } dim3 numThreadsPerBlock(threads_x, threads_y, 1); // 1024 threads dim3 numBlocks(blocks_x, blocks_y, 1); printf("Spawning {%d,%d} blocks, {%d,%d} threads each.\n", blocks_x, blocks_y, threads_x, threads_y); GpuTimer timer; // start kernel timer.Start(); max_pool<<<numBlocks, numThreadsPerBlock>>> (d_image_in, d_image_out, width_in, 0); timer.Stop(); printf("{tpb:%d} Time elapsed = %g ms\n", threads_per_block, timer.Elapsed()); #ifdef DEBUG threads_per_block *= 2; } #endif /* DEBUG */ // pull leftover int leftover = total_out_pixels - (threads_x * threads_y * blocks_x * blocks_y); // this will necessarily be less than 1024 printf("Leftover %d.\n",leftover); int blocks = 1; while (leftover > MAX_THREADS_PER_BLOCK){ leftover /= 2; blocks *= 2; } max_pool<<<blocks, leftover>>> (d_image_in, d_image_out, width_in, total_out_pixels - leftover); // copy back the result array to the CPU cudaMemcpy(h_image_out, d_image_out, BYTES_PER_PIXEL * total_out_pixels, cudaMemcpyDeviceToHost); // save rectified pixel data to file lodepng_encode32_file(output_filename, h_image_out, width_in_halved, height_in_halved); free(h_image_in); free(h_image_out); free((char*)input_filename); free((char*)output_filename); cudaFree(d_image_in); cudaFree(d_image_out); return 0; }
aee2a8967580be4e2ed6ea687686a870786ffe08.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" #include "common_cuda.h" #include "chemistry.h" #include "equilibrium_solver/minimizer_options.h" #include "equilibrium_solver/equilibrium_state.h" #include "equilibrium_solver/equilibrium_solver.h" namespace equilibrium_solver { // make variables on the device visible #ifdef __CUDA_ARCH__ using namespace common_device; #else using namespace common; #endif using chemistry::ThermodynamicProperties; __global__ void minimize_kernel(ThermodynamicProperties thermo_props, size_t ncells, numeric_t* bs_raw, Vector<numeric_t, common::num_species>* xs, Vector<numeric_t, common::num_components>* ys, Vector<numeric_t, common::num_species>* zs, MinimizerOptions options) { constexpr size_t m = formula_matrix_t::RowsAtCompileTime; constexpr size_t n = formula_matrix_t::ColsAtCompileTime; constexpr size_t p = m + n; constexpr size_t t = m + 2 * n; int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < ncells) { auto& x = xs[idx]; auto& y = ys[idx]; auto& z = zs[idx]; const auto& A = formula_matrix; Eigen::Map<component_amounts_t> b(bs_raw+num_components*idx); const auto imax = options.imax; const auto mu = options.mu; const auto tau = options.tau; const auto tol = options.tol; size_t it; numeric_t error; for (it=0; it<imax; ++it) { Vector<numeric_t, t> F; F.setConstant(0); Matrix<numeric_t, t, t> J; J.setConstant(0); // f - scalar // g - vector // H - matrix chemistry::ObjectiveResult obj_res = gibbs_energy_optimized(thermo_props, x); auto& f = obj_res.f; auto& g = obj_res.g; auto& H = obj_res.H; // Assemble the negative of the residual vector -F // [g(x) - tr(A)*y - z] // F = [ A*x - b ] // [ X*Z*e - mu ] // 2*num_species subs, num_species*(num_components-1 adds and num_components muls) F.head(n) = g - A.transpose()*y - z; F.segment(n, m) = A*x - b; F.tail(n) = (x.array() * z.array()).matrix() - mu*Vector<numeric_t, common::num_species>::Ones(); // Calculate the current total error numeric_t error = F.template lpNorm<Infinity>(); // Check if the calculation has converged if (error < tol) break; // Assemble the Jacoabian matrix J // [H -tr(A) -I] // J = [A 0 0] // [Z 0 X] J.setConstant(0); J.block(0, 0, n, n) = H; J.block(0, n, n, p-n) = -A.transpose(); J.block(0, t-n, n, n).diagonal().setConstant(-1); J.block(n, 0, p-n, n) = A; J.block(t-n, 0, n, n).diagonal() = z; J.block(t-n, t-n, n, n).diagonal() = x; // solve lse Vector<numeric_t, t> delta; F = -F; gauss<numeric_t, t>(&J(0, 0), &F[0], &delta[0]); Eigen::Ref<Vector<numeric_t, n>> dx = delta.head(n); Eigen::Ref<Vector<numeric_t, p-n>> dy = delta.segment(n, p-n); Eigen::Ref<Vector<numeric_t, n>> dz = delta.tail(n); // Calculate the new values for x and z for (size_t i=0; i<n; ++i) { x[i] += (x[i] + dx[i] > 0.0) ? dx[i] : (-tau * x[i]); z[i] += (z[i] + dz[i] > 0.0) ? dz[i] : (-tau * z[i]); } // Calculate the new values for y y += dy; } atomicAdd(&common_device::minimization_kernel_num_iterations, it); } } }
aee2a8967580be4e2ed6ea687686a870786ffe08.cu
#include "common.h" #include "common_cuda.h" #include "chemistry.h" #include "equilibrium_solver/minimizer_options.h" #include "equilibrium_solver/equilibrium_state.h" #include "equilibrium_solver/equilibrium_solver.h" namespace equilibrium_solver { // make variables on the device visible #ifdef __CUDA_ARCH__ using namespace common_device; #else using namespace common; #endif using chemistry::ThermodynamicProperties; __global__ void minimize_kernel(ThermodynamicProperties thermo_props, size_t ncells, numeric_t* bs_raw, Vector<numeric_t, common::num_species>* xs, Vector<numeric_t, common::num_components>* ys, Vector<numeric_t, common::num_species>* zs, MinimizerOptions options) { constexpr size_t m = formula_matrix_t::RowsAtCompileTime; constexpr size_t n = formula_matrix_t::ColsAtCompileTime; constexpr size_t p = m + n; constexpr size_t t = m + 2 * n; int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < ncells) { auto& x = xs[idx]; auto& y = ys[idx]; auto& z = zs[idx]; const auto& A = formula_matrix; Eigen::Map<component_amounts_t> b(bs_raw+num_components*idx); const auto imax = options.imax; const auto mu = options.mu; const auto tau = options.tau; const auto tol = options.tol; size_t it; numeric_t error; for (it=0; it<imax; ++it) { Vector<numeric_t, t> F; F.setConstant(0); Matrix<numeric_t, t, t> J; J.setConstant(0); // f - scalar // g - vector // H - matrix chemistry::ObjectiveResult obj_res = gibbs_energy_optimized(thermo_props, x); auto& f = obj_res.f; auto& g = obj_res.g; auto& H = obj_res.H; // Assemble the negative of the residual vector -F // [g(x) - tr(A)*y - z] // F = [ A*x - b ] // [ X*Z*e - mu ] // 2*num_species subs, num_species*(num_components-1 adds and num_components muls) F.head(n) = g - A.transpose()*y - z; F.segment(n, m) = A*x - b; F.tail(n) = (x.array() * z.array()).matrix() - mu*Vector<numeric_t, common::num_species>::Ones(); // Calculate the current total error numeric_t error = F.template lpNorm<Infinity>(); // Check if the calculation has converged if (error < tol) break; // Assemble the Jacoabian matrix J // [H -tr(A) -I] // J = [A 0 0] // [Z 0 X] J.setConstant(0); J.block(0, 0, n, n) = H; J.block(0, n, n, p-n) = -A.transpose(); J.block(0, t-n, n, n).diagonal().setConstant(-1); J.block(n, 0, p-n, n) = A; J.block(t-n, 0, n, n).diagonal() = z; J.block(t-n, t-n, n, n).diagonal() = x; // solve lse Vector<numeric_t, t> delta; F = -F; gauss<numeric_t, t>(&J(0, 0), &F[0], &delta[0]); Eigen::Ref<Vector<numeric_t, n>> dx = delta.head(n); Eigen::Ref<Vector<numeric_t, p-n>> dy = delta.segment(n, p-n); Eigen::Ref<Vector<numeric_t, n>> dz = delta.tail(n); // Calculate the new values for x and z for (size_t i=0; i<n; ++i) { x[i] += (x[i] + dx[i] > 0.0) ? dx[i] : (-tau * x[i]); z[i] += (z[i] + dz[i] > 0.0) ? dz[i] : (-tau * z[i]); } // Calculate the new values for y y += dy; } atomicAdd(&common_device::minimization_kernel_num_iterations, it); } } }
4d67c2529bb384219e00ce82ef39de568065428a.hip
// !!! This is a file automatically generated by hipify!!! /* * Module : Twine * Copyright : [2016] Trevor L. McDonell * License : BSD3 * * Maintainer : Trevor L. McDonell <[email protected]> * Stability : experimental * Portability : non-portable (GHC extensions) * * Convert between Accelerate's Struct-of-Array representation of complex * numbers and the Array-of-Struct representation necessary for CUBLAS. */ #include <hip/hip_runtime.h> #include <hip/hip_complex.h> #ifdef __cplusplus extern "C" { #endif __global__ void interleave ( cuFloatComplex * __restrict__ cplx, const float * __restrict__ real, const float * __restrict__ imag, const int size ) { const int gridSize = blockDim.x * gridDim.x; int ix; for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < size; ix += gridSize) { const float re = real[ix]; const float im = imag[ix]; cplx[ix] = make_cuFloatComplex(re, im); } } __global__ void deinterleave ( float * __restrict__ real, float * __restrict__ imag, const cuFloatComplex * __restrict__ cplx, const int size ) { const int gridSize = blockDim.x * gridDim.x; int ix; for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < size; ix += gridSize) { const cuFloatComplex c = cplx[ix]; real[ix] = cuCrealf(c); imag[ix] = cuCimagf(c); } } #ifdef __cplusplus } #endif
4d67c2529bb384219e00ce82ef39de568065428a.cu
/* * Module : Twine * Copyright : [2016] Trevor L. McDonell * License : BSD3 * * Maintainer : Trevor L. McDonell <[email protected]> * Stability : experimental * Portability : non-portable (GHC extensions) * * Convert between Accelerate's Struct-of-Array representation of complex * numbers and the Array-of-Struct representation necessary for CUBLAS. */ #include <cuda.h> #include <cuComplex.h> #ifdef __cplusplus extern "C" { #endif __global__ void interleave ( cuFloatComplex * __restrict__ cplx, const float * __restrict__ real, const float * __restrict__ imag, const int size ) { const int gridSize = blockDim.x * gridDim.x; int ix; for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < size; ix += gridSize) { const float re = real[ix]; const float im = imag[ix]; cplx[ix] = make_cuFloatComplex(re, im); } } __global__ void deinterleave ( float * __restrict__ real, float * __restrict__ imag, const cuFloatComplex * __restrict__ cplx, const int size ) { const int gridSize = blockDim.x * gridDim.x; int ix; for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < size; ix += gridSize) { const cuFloatComplex c = cplx[ix]; real[ix] = cuCrealf(c); imag[ix] = cuCimagf(c); } } #ifdef __cplusplus } #endif
4f05e08bc98de082d50881dd8b95f01d339a3593.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda.h> #include<nvvm.h> #include<unistd.h> #include<malloc.h> #include<iostream> #include<iomanip> #include"PreprocessSW.h" #include"Data.h" #include"Preprocess.h" #include"Gain.h" // Lenght of each data __constant__ int gcT_size; __constant__ int gcP_size; // Threshold of the SW algorithm __constant__ int gcThre; // Data of the query __constant__ char gcP_seq[1024]; // Cost and Gain __constant__ int gcMatch; __constant__ int gcMiss; __constant__ int gcExtend; __constant__ int gcBegin; enum{ Zero, Diagonal, Vertical, Horizon, }; using namespace std; PreprocessSW::PreprocessSW(const Data& txt, const Data& ptn, const Preprocess& prepro, int threshold){ // Sieze check if(ptn.size() > 1024 || ptn.size() * txt.size() > 1024 * 1024 * 1024){ cout << "Too large size" << endl; return; } cout << "At the beginning of the SW algorithm (Preprocess)" << endl; // Set value in constant memory int tsize = txt.size(); int psize = ptn.size(); hipMemcpyToSymbol(gcT_size, &tsize, sizeof(int)); hipMemcpyToSymbol(gcP_size, &psize, sizeof(int)); hipMemcpyToSymbol(gcThre, &threshold, sizeof(int)); hipMemcpyToSymbol(gcP_seq, ptn.data(), sizeof(char) * ptn.size()); // TODO Cost and gain int gain = MATCH; hipMemcpyToSymbol(gcMatch, &gain, sizeof(int)); gain = MISS; hipMemcpyToSymbol(gcMiss, &gain, sizeof(int)); gain = EXT; hipMemcpyToSymbol(gcExtend, &gain, sizeof(int)); gain = BEG; hipMemcpyToSymbol(gcBegin, &gain, sizeof(int)); // Dynamic Programing part by call_DP call_DP(txt, ptn, prepro); std::cout << "At the End of the SW algorithm" << std::endl; } PreprocessSW::~PreprocessSW(){ } // Implementation __global__ void DP(char* dT_seq, int* dRange, char* dTrace, int* dScore){ // ThreadId = ptn point int id = threadIdx.x; // BlockId shows begin point of txt int bId = blockIdx.x; int start = dRange[bId * 2]; int end = dRange[bId * 2 + 1] + gcP_size; // The acid in this thread char p = gcP_seq[id]; // p-1 row line's value __shared__ int Hp_1[1024]; __shared__ int Ep_1[1024]; // Temporary int Hp_1_buf = 0; int Ep_1_buf = 0; // t-1 element value int Ht_1 = 0; int Ft_1 = 0; // p-1 t-1 element value int Ht_1p_1 = 0; // Initialize Hp_1[id] = 0; Ep_1[id] = 0; // Similar score int sim = 0; int point = id * gcT_size - id + start; // Culcurate elements for(int t = start - id; t < end + 1; ++t){ // Control culcurate order if(t<start){} // Get similar score else{ // Compare acids if(dT_seq[t] == p){sim = gcMatch;} else{sim = gcMiss;} } // SW algorithm // Culcurate each elements Ht_1p_1 += sim; // Diagonal Ht_1 += gcBegin; // Horizon (Start) Ft_1 += gcExtend; // Horizon (Extend) Hp_1_buf = Hp_1[id] + gcBegin; // Vertical (Start) Ep_1_buf = Ep_1[id] + gcExtend; // Vertical (Extend) // Choose the gap score if(Ht_1 > Ft_1){Ft_1 = Ht_1;} // Horizon if(Hp_1_buf > Ft_1){Ep_1_buf = Hp_1_buf;} // Vertical // Choose the max score // Ht_1 is stored the max score if(Ht_1p_1 > Ep_1_buf){ // Diagonal if(Ht_1p_1 > Ft_1){ Ht_1 = Ht_1p_1; dTrace[point] = Diagonal; } // Horizon else{ Ht_1 = Ft_1; dTrace[point] = Horizon; } } else { // Vertical if(Ep_1_buf > Ft_1){ Ht_1 = Ep_1_buf; dTrace[point] = Vertical; } // Horizon else{ Ht_1 = Ft_1; dTrace[point] = Horizon; } } // The case 0 is max if(Ht_1 <= 0){ Ht_1 = 0; // Set 0 other value Ft_1 = 0; Ep_1_buf = 0; dTrace[point] = Zero; } // Hp-1 is next Ht-1p-1 Ht_1p_1 = Hp_1[id]; __syncthreads(); // Set value need next culcurate // p+1 row line if(t >= start){ Hp_1[id + 1] = Ht_1; Ep_1[id + 1] = Ep_1_buf; // DEBUG, score check // dTrace[point] = (char)(Ht_1); } if(Ht_1 >= gcThre){ // printf("Score = %d:\n", Ht_1); // traceback(dTrace, dT_seq, point-1, t); if(Ht_1 >= (dScore[t] & 0x0000ffff)){ // Set score and now ptn point dScore[t] = Ht_1 + (id << 16); } } ++point; __syncthreads(); // for end } } // Provisional void PreprocessSW::call_DP(const Data& txt, const Data& ptn, const Preprocess& prepro){ // Set txt char* dT_seq; hipMalloc((void**)&dT_seq, sizeof(char)*txt.size()); hipMemcpy(dT_seq, txt.data(), sizeof(char)*txt.size(), hipMemcpyHostToDevice); // Set Traceback char* dTrace; hipMalloc((void**)&dTrace, sizeof(char)*txt.size()*ptn.size()); // Set Score and point int* dScore; hipMalloc((void**)&dScore, sizeof(int)*txt.size()); int* init0 = new int[txt.size()]; for(int i=0;i<txt.size();++i){init0[i]=0;} hipMemcpy(dScore, init0, sizeof(int)*txt.size(), hipMemcpyHostToDevice); // Get block data int blockNum = prepro.block(); int* dRange = new int[blockNum]; hipMalloc((void**)&dRange, sizeof(int)*blockNum*2); hipMemcpy(dRange, prepro.getAll(), sizeof(int)*blockNum*2, hipMemcpyHostToDevice); // Main process hipLaunchKernelGGL(( DP), dim3(blockNum),dim3(ptn.size()), 0, 0, dT_seq, dRange, dTrace, dScore); // Direction copy char* direction = new char[txt.size()*ptn.size()]; hipMemcpy(direction, dTrace, sizeof(char)*txt.size()*ptn.size(), hipMemcpyDeviceToHost); // show(direction,txt,ptn); // Score and point copy int* score = new int[txt.size()]; hipMemcpy(score, dScore, sizeof(int)*txt.size(), hipMemcpyDeviceToHost); // traceback if txt has homelogy checkScore(direction, score, txt); delete[] direction; delete[] score; delete[] init0; hipFree(dT_seq); hipFree(dTrace); hipFree(dScore); hipFree(dRange); } // score -> 0~16 : 17~31 = score : point of ptn void PreprocessSW::checkScore(const char* direction, const int* score, const Data& txt) const{ // get the max score int x = 0, y = 0, max = 0; for(int i=0; i<txt.size(); ++i){ int result = score[i] & 0x0000ffff; if(max < result){ x = i; y = (score[i] & 0xffff0000) >> 16; max = result; } } cout << "max score is " << max << endl; if(max != 0){ traceback(direction, txt, x, y); } } void PreprocessSW::traceback(const char* direction, const Data& txt, int txt_point, int ptn_point) const{ // Store the result, get enough size char *ans = new char[1024 * 2]; // Point of result array int p = 0; int point = txt_point; int trace = point + ptn_point * txt.size(); // Traceback while(trace >= 0){ switch(direction[trace]){ case Diagonal: ans[p++] = txt[point--]; trace -= txt.size() + 1; break; case Vertical: ans[p++] = '+'; trace -= txt.size(); break; case Horizon: ans[p++] = '-'; --trace; --point; break; case Zero: // End trace = -1; break; default: // Didn't use trace = -1; break; } } // This array has reverse answer for(int i=p-1;i>=0;--i){ printf("%c", ans[i]); } printf(" %d ~ %d \n", point+1, txt_point); delete[] ans; } void PreprocessSW::show(const char* score, const Data& txt, const Data& ptn) const{ cout << " "; for(int i=0; i < ptn.size(); ++i){ cout << " " << ptn[i]; } cout << endl; for(int t=0; t < txt.size(); ++t){ cout << txt[t] << " "; for(int p=0; p < ptn.size(); ++p){ cout << setw(3) << static_cast<int>(score[t + p*txt.size()]); } cout << endl; } }
4f05e08bc98de082d50881dd8b95f01d339a3593.cu
#include<cuda.h> #include<nvvm.h> #include<unistd.h> #include<malloc.h> #include<iostream> #include<iomanip> #include"PreprocessSW.h" #include"Data.h" #include"Preprocess.h" #include"Gain.h" // Lenght of each data __constant__ int gcT_size; __constant__ int gcP_size; // Threshold of the SW algorithm __constant__ int gcThre; // Data of the query __constant__ char gcP_seq[1024]; // Cost and Gain __constant__ int gcMatch; __constant__ int gcMiss; __constant__ int gcExtend; __constant__ int gcBegin; enum{ Zero, Diagonal, Vertical, Horizon, }; using namespace std; PreprocessSW::PreprocessSW(const Data& txt, const Data& ptn, const Preprocess& prepro, int threshold){ // Sieze check if(ptn.size() > 1024 || ptn.size() * txt.size() > 1024 * 1024 * 1024){ cout << "Too large size" << endl; return; } cout << "At the beginning of the SW algorithm (Preprocess)" << endl; // Set value in constant memory int tsize = txt.size(); int psize = ptn.size(); cudaMemcpyToSymbol(gcT_size, &tsize, sizeof(int)); cudaMemcpyToSymbol(gcP_size, &psize, sizeof(int)); cudaMemcpyToSymbol(gcThre, &threshold, sizeof(int)); cudaMemcpyToSymbol(gcP_seq, ptn.data(), sizeof(char) * ptn.size()); // TODO Cost and gain int gain = MATCH; cudaMemcpyToSymbol(gcMatch, &gain, sizeof(int)); gain = MISS; cudaMemcpyToSymbol(gcMiss, &gain, sizeof(int)); gain = EXT; cudaMemcpyToSymbol(gcExtend, &gain, sizeof(int)); gain = BEG; cudaMemcpyToSymbol(gcBegin, &gain, sizeof(int)); // Dynamic Programing part by call_DP call_DP(txt, ptn, prepro); std::cout << "At the End of the SW algorithm" << std::endl; } PreprocessSW::~PreprocessSW(){ } // Implementation __global__ void DP(char* dT_seq, int* dRange, char* dTrace, int* dScore){ // ThreadId = ptn point int id = threadIdx.x; // BlockId shows begin point of txt int bId = blockIdx.x; int start = dRange[bId * 2]; int end = dRange[bId * 2 + 1] + gcP_size; // The acid in this thread char p = gcP_seq[id]; // p-1 row line's value __shared__ int Hp_1[1024]; __shared__ int Ep_1[1024]; // Temporary int Hp_1_buf = 0; int Ep_1_buf = 0; // t-1 element value int Ht_1 = 0; int Ft_1 = 0; // p-1 t-1 element value int Ht_1p_1 = 0; // Initialize Hp_1[id] = 0; Ep_1[id] = 0; // Similar score int sim = 0; int point = id * gcT_size - id + start; // Culcurate elements for(int t = start - id; t < end + 1; ++t){ // Control culcurate order if(t<start){} // Get similar score else{ // Compare acids if(dT_seq[t] == p){sim = gcMatch;} else{sim = gcMiss;} } // SW algorithm // Culcurate each elements Ht_1p_1 += sim; // Diagonal Ht_1 += gcBegin; // Horizon (Start) Ft_1 += gcExtend; // Horizon (Extend) Hp_1_buf = Hp_1[id] + gcBegin; // Vertical (Start) Ep_1_buf = Ep_1[id] + gcExtend; // Vertical (Extend) // Choose the gap score if(Ht_1 > Ft_1){Ft_1 = Ht_1;} // Horizon if(Hp_1_buf > Ft_1){Ep_1_buf = Hp_1_buf;} // Vertical // Choose the max score // Ht_1 is stored the max score if(Ht_1p_1 > Ep_1_buf){ // Diagonal if(Ht_1p_1 > Ft_1){ Ht_1 = Ht_1p_1; dTrace[point] = Diagonal; } // Horizon else{ Ht_1 = Ft_1; dTrace[point] = Horizon; } } else { // Vertical if(Ep_1_buf > Ft_1){ Ht_1 = Ep_1_buf; dTrace[point] = Vertical; } // Horizon else{ Ht_1 = Ft_1; dTrace[point] = Horizon; } } // The case 0 is max if(Ht_1 <= 0){ Ht_1 = 0; // Set 0 other value Ft_1 = 0; Ep_1_buf = 0; dTrace[point] = Zero; } // Hp-1 is next Ht-1p-1 Ht_1p_1 = Hp_1[id]; __syncthreads(); // Set value need next culcurate // p+1 row line if(t >= start){ Hp_1[id + 1] = Ht_1; Ep_1[id + 1] = Ep_1_buf; // DEBUG, score check // dTrace[point] = (char)(Ht_1); } if(Ht_1 >= gcThre){ // printf("Score = %d:\n", Ht_1); // traceback(dTrace, dT_seq, point-1, t); if(Ht_1 >= (dScore[t] & 0x0000ffff)){ // Set score and now ptn point dScore[t] = Ht_1 + (id << 16); } } ++point; __syncthreads(); // for end } } // Provisional void PreprocessSW::call_DP(const Data& txt, const Data& ptn, const Preprocess& prepro){ // Set txt char* dT_seq; cudaMalloc((void**)&dT_seq, sizeof(char)*txt.size()); cudaMemcpy(dT_seq, txt.data(), sizeof(char)*txt.size(), cudaMemcpyHostToDevice); // Set Traceback char* dTrace; cudaMalloc((void**)&dTrace, sizeof(char)*txt.size()*ptn.size()); // Set Score and point int* dScore; cudaMalloc((void**)&dScore, sizeof(int)*txt.size()); int* init0 = new int[txt.size()]; for(int i=0;i<txt.size();++i){init0[i]=0;} cudaMemcpy(dScore, init0, sizeof(int)*txt.size(), cudaMemcpyHostToDevice); // Get block data int blockNum = prepro.block(); int* dRange = new int[blockNum]; cudaMalloc((void**)&dRange, sizeof(int)*blockNum*2); cudaMemcpy(dRange, prepro.getAll(), sizeof(int)*blockNum*2, cudaMemcpyHostToDevice); // Main process DP<<<blockNum,ptn.size()>>>(dT_seq, dRange, dTrace, dScore); // Direction copy char* direction = new char[txt.size()*ptn.size()]; cudaMemcpy(direction, dTrace, sizeof(char)*txt.size()*ptn.size(), cudaMemcpyDeviceToHost); // show(direction,txt,ptn); // Score and point copy int* score = new int[txt.size()]; cudaMemcpy(score, dScore, sizeof(int)*txt.size(), cudaMemcpyDeviceToHost); // traceback if txt has homelogy checkScore(direction, score, txt); delete[] direction; delete[] score; delete[] init0; cudaFree(dT_seq); cudaFree(dTrace); cudaFree(dScore); cudaFree(dRange); } // score -> 0~16 : 17~31 = score : point of ptn void PreprocessSW::checkScore(const char* direction, const int* score, const Data& txt) const{ // get the max score int x = 0, y = 0, max = 0; for(int i=0; i<txt.size(); ++i){ int result = score[i] & 0x0000ffff; if(max < result){ x = i; y = (score[i] & 0xffff0000) >> 16; max = result; } } cout << "max score is " << max << endl; if(max != 0){ traceback(direction, txt, x, y); } } void PreprocessSW::traceback(const char* direction, const Data& txt, int txt_point, int ptn_point) const{ // Store the result, get enough size char *ans = new char[1024 * 2]; // Point of result array int p = 0; int point = txt_point; int trace = point + ptn_point * txt.size(); // Traceback while(trace >= 0){ switch(direction[trace]){ case Diagonal: ans[p++] = txt[point--]; trace -= txt.size() + 1; break; case Vertical: ans[p++] = '+'; trace -= txt.size(); break; case Horizon: ans[p++] = '-'; --trace; --point; break; case Zero: // End trace = -1; break; default: // Didn't use trace = -1; break; } } // This array has reverse answer for(int i=p-1;i>=0;--i){ printf("%c", ans[i]); } printf(" %d ~ %d \n", point+1, txt_point); delete[] ans; } void PreprocessSW::show(const char* score, const Data& txt, const Data& ptn) const{ cout << " "; for(int i=0; i < ptn.size(); ++i){ cout << " " << ptn[i]; } cout << endl; for(int t=0; t < txt.size(); ++t){ cout << txt[t] << " "; for(int p=0; p < ptn.size(); ++p){ cout << setw(3) << static_cast<int>(score[t + p*txt.size()]); } cout << endl; } }
40955f3d6c593e067ea59519ceff0fe9a2c8a88f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation and/or // other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors // may be used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON // ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Author's note: // This file was distributed as part of the Nature Biotechnology // supplementary software release for DeepBind. Users of DeepBind // are encouraged to instead use the latest source code and binaries // for scoring sequences at // http://tools.genes.toronto.edu/deepbind/ // #include <smat_cuda/elemwise2.cuh> #include <smat_cuda/cuda_errors.h> #include <smat/vm/instruction_db.h> #include <smat/vm/util/specialization_table.h> #include <smat/vm/util/specialization_typelists.h> SM_NAMESPACE_BEGIN DEF_AB_GENERIC(copy, b[j] = (B)a[i]) // copy function just casts element of type A to destination type B #define KERNEL_COPY_DD_PARAMS const S* src, D* dst #define KERNEL_COPY_DD_ARGS src,i,dst,i #define KERNEL_COPY_DD_PREAMBLE #define KERNEL_COPY_RD_PARAMS const S* src, D* dst, usize_t m #define KERNEL_COPY_RD_ARGS src,i%m,dst,i #define KERNEL_COPY_RD_PREAMBLE #define KERNEL_COPY_CD_PARAMS const S* src, D* dst, usize_t m #define KERNEL_COPY_CD_ARGS src,i/m,dst,i #define KERNEL_COPY_CD_PREAMBLE #define KERNEL_COPY_SD_PARAMS const S _src, D* dst #define KERNEL_COPY_SD_ARGS src,0,dst,i #define KERNEL_COPY_SD_PREAMBLE const S src[1] = { _src }; // turn scalar into an "array" for the sake of generic copy code; should be optimized away // TODO: specialize this for smaller-than-32-bit copy, // since right now it's significantly slower, and working with // bytes will be important (converting to/from). // Note that if source bytes can be bound to a texture, // they can be auto-converted to range [0,1]. #define DEF_KERNEL_COPY(broadcast) \ template <typename S, typename D> \ __global__ void kernel_copy_##broadcast(KERNEL_COPY_##broadcast##_PARAMS, usize_t size) \ { \ DECL_KERNEL_VARS \ KERNEL_COPY_##broadcast##_PREAMBLE \ for (usize_t i = (usize_t)bdx*bx+tx; i < size; i += bdx*gdx) \ k_copy<S,D>::apply(KERNEL_COPY_##broadcast##_ARGS); \ } /////////////////////////////////////////////////////////////////// // COPY VARIANTS: // dd = device matrix -> device matrix // rd = device rowvec -> device matrix // cd = device colvec -> device matrix // sd = host scalar -> device matrix // id = identity matrix -> device matrix /////////////////////////////////////////////////////////////////// DEF_KERNEL_COPY(DD) // no broadcasting DEF_KERNEL_COPY(RD) // broadcast row vec arg DEF_KERNEL_COPY(CD) // broadcast col vec arg DEF_KERNEL_COPY(SD) // broadcast scalar arg #define LAUNCH_KERNEL_COPY(broadcast) \ hipLaunchKernelGGL(( kernel_copy_##broadcast), dim3(cfg.gdim),dim3(cfg.bdim),cfg.smem,cfg.stream, template <typename S, typename D> struct execute_copy_typed_xd { static void executeopcode_t opcode, const argument& src, const argument& dst) { if (src.vtype != vt_darray || dst.vtype != vt_darray) SM_ERROR(format("NotImplementedError: Unsupported combination of argument value types in copy: %s:%s -> %s:%s.\n", \ vtype2str(src.vtype),dtype2str(src.dtype), \ vtype2str(dst.vtype),dtype2str(dst.dtype)).c_str()); usize_t size = (usize_t)dst.size(); if (size > 0) { launchcfg cfg = make_elemwise_launchcfg(size); // Launch either an elementwise or a row/col broadcasted version of the functor if (src.shape == dst.shape) { LAUNCH_KERNEL_COPY(DD)(src.get<const S*>(),dst.get<D*>(),size); // direct elementwise copy // TODO: make this faster for small matching types } else if (src.shape.y == 1 && src.shape.x == dst.shape.x) { LAUNCH_KERNEL_COPY(RD)(src.get<const S*>(),dst.get<D*>(),dst.shape.x,size); // broadcast row vector on left } else if (src.shape.x == 1 && src.shape.y == dst.shape.y) { LAUNCH_KERNEL_COPY(CD)(src.get<const S*>(),dst.get<D*>(),dst.shape.x,size); // broadcast col vector on left } else SM_ERROR("NotImplementedError: incompatible broadcasting dimensions at kernel launch.\n"); } } }; // Launch kernel to write scalar value to dst; we can assume the types are the same // because the context base class tries to coerce the dtypes of constant arguments. template <typename D> struct execute_copy_typed_sd { static void execute(opcode_t opcode, const argument& src, const argument& dst) { usize_t size = (usize_t)dst.size(); if (size > 0) { launchcfg cfg = make_elemwise_launchcfg(size); LAUNCH_KERNEL_COPY(SD)(src.get<D>(),dst.get<D*>(),size); } } }; template <typename D> __global__ void kernel_copy_id(D* dst, usize_t m_plus_one, usize_t size) { DECL_KERNEL_VARS for (usize_t i = (usize_t)bdx*bx+tx; i < size; i += bdx*gdx) dst[i] = (i % m_plus_one) == 0 ? (D)1 : (D)0; } // Launch kernel to write identity matrix to dst template <typename D> struct execute_copy_typed_id { static void execute(opcode_t opcode, const argument& src, const argument& dst) { usize_t size = (usize_t)dst.size(); if (size > 0) { launchcfg cfg = make_elemwise_launchcfg(size); hipLaunchKernelGGL(( kernel_copy_id<D>), dim3(cfg.gdim),dim3(cfg.bdim),cfg.smem,cfg.stream, dst.get<D*>(),dst.shape.x+1,size); } } }; template <typename T> void copy_ch_typed(T val, T* dst, usize_t size) { for (usize_t i = 0; i < size; ++i) dst[i] = val; } void copy_ch(const argument& src, const argument& dst) { switch (dst.dtype) {case b8: copy_ch_typed(src.get<bool>() ,dst.get<bool*>() ,dst.size()); case i8: copy_ch_typed(src.get<int8_t>() ,dst.get<int8_t*>() ,dst.size()); case u8: copy_ch_typed(src.get<uint8_t>() ,dst.get<uint8_t*>() ,dst.size()); case i16: copy_ch_typed(src.get<int16_t>() ,dst.get<int16_t*>() ,dst.size()); case u16: copy_ch_typed(src.get<uint16_t>(),dst.get<uint16_t*>(),dst.size()); case i32: copy_ch_typed(src.get<int32_t>() ,dst.get<int32_t*>() ,dst.size()); case u32: copy_ch_typed(src.get<uint32_t>(),dst.get<uint32_t*>(),dst.size()); case i64: copy_ch_typed(src.get<int64_t>() ,dst.get<int64_t*>() ,dst.size()); case u64: copy_ch_typed(src.get<uint64_t>(),dst.get<uint64_t*>(),dst.size()); case f32: copy_ch_typed(src.get<float>() ,dst.get<float*>() ,dst.size()); case f64: copy_ch_typed(src.get<double>() ,dst.get<double*>() ,dst.size()); } } void execute_copy(opcode_t opcode, const argument& src, const argument& dst) { SM_ASSERT(dst.vtype == vt_harray || dst.vtype == vt_darray); if (dst.shape.size() == 0) return; isize_t dtsize = dtype_size(src.dtype); SM_ASSERTMSG(dst.vtype == vt_darray || dst.vtype == vt_harray,"AssertionError: Output must be host address or device address."); bool all_full_stride = (dst.strides == src.strides) && (src.strides.y == src.shape.x*src.strides.x); if (src.vtype == vt_harray && dst.vtype == vt_darray) { // HOST -> DEVICE SM_ASSERT(src.dtype == dst.dtype); // dtypes must match if transferring to/from host. SM_ASSERT(src.shape == dst.shape); // sizes must match (no broadcasting when copy to device) if (all_full_stride) { ccu(MemcpyAsync ,dst.get<void*>(),src.get<const void*>(),src.size()*dtsize,hipMemcpyHostToDevice,thread_cudactx().stream()); } // try to do 1D copies since they can be overlapped if we eventually use streams else { ccu(Memcpy2DAsync,dst.get<void*>(),dst.strides.y*dtsize,src.get<const void*>(),src.strides.y*dtsize,src.shape.x*src.strides.x*dtsize,src.shape.y,hipMemcpyHostToDevice,thread_cudactx().stream()); } } else if (src.vtype == vt_darray && dst.vtype == vt_harray) { // HOST <- DEVICE SM_ASSERT(src.dtype == dst.dtype); // dtypes must match if transferring to/from host. SM_ASSERT(src.shape == dst.shape); // sizes must match (no broadcasting when copy to device) if (all_full_stride) { ccu(MemcpyAsync ,dst.get<void*>(),src.get<const void*>(),src.size()*dtsize,hipMemcpyDeviceToHost,thread_cudactx().stream()); } // try to do 1D copies since they can be overlapped if we eventually use streams else { ccu(Memcpy2DAsync,dst.get<void*>(),dst.strides.y*dtsize,src.get<const void*>(),src.strides.y*dtsize,src.shape.x*src.strides.x*dtsize,src.shape.y,hipMemcpyDeviceToHost,thread_cudactx().stream()); } } else if (src.vtype == vt_darray && dst.vtype == vt_darray) { // DEVICE -> DEVICE if (src.shape != dst.shape || src.dtype != dst.dtype) { SM_ASSERTMSG(src.strides.y == src.shape.x*src.strides.x,"NotImplementedError: Cannot perform broadcasting/conversion on column-sliced input array.") SM_ASSERTMSG(dst.strides.y == dst.shape.x*dst.strides.x,"NotImplementedError: Cannot perform broadcasting/conversion on column-sliced output array.") DECL_SPECIALIZATION_TABLE(T_GxG,execute_fn2,execute_copy_typed_xd); // TODO: Several of these kernels will have identical machine code, so don't generate redundant kernels; TODO: when data is of small but matching type (e.g. int8->int8), copy with larger size when possible specialization_table(src.dtype,dst.dtype)(opcode,src,dst); // copy src to dst, casting type as necessary } else { if (all_full_stride) { ccu(MemcpyAsync ,dst.get<void*>(),src.get<const void*>(),src.size()*dtsize,hipMemcpyDeviceToDevice,thread_cudactx().stream()); } // try to do 1D copies since they can be overlapped if we eventually use streams else { ccu(Memcpy2DAsync,dst.get<void*>(),dst.strides.y*dtsize,src.get<const void*>(),src.strides.y*dtsize,src.shape.x*src.strides.x*dtsize,src.shape.y,hipMemcpyDeviceToDevice,thread_cudactx().stream()); } } } else { SM_ASSERTMSG(dst.strides.x == 1, "NotImplementedError: Column slicing not yet supported for this operation."); // TODO SM_ASSERTMSG(dst.strides.y == dst.strides.x*dst.shape.x || dst.shape.y == 1,"NotImplementedError: Column slicing not yet supported for this operation."); // TODO if (src.vtype == vt_carray && dst.vtype == vt_darray) { DECL_SPECIALIZATION_TABLE(T_G,execute_fn2,execute_copy_typed_sd) specialization_table(dst.dtype)(opcode,src,dst); // broadcast scalar, for any matching type } else if (src.vtype == vt_carray && dst.vtype == vt_harray) { copy_ch(src,dst); // broadcast scalar to host array } else if (src.vtype == vt_iarray && dst.vtype == vt_darray) { DECL_SPECIALIZATION_TABLE(T_G,execute_fn2,execute_copy_typed_id); specialization_table(dst.dtype)(opcode,src,dst); // broadcast scalar, for any matching type } else { SM_ERROR(format("Unsupported combination of argument value types in exec_copy: %s:%s -> %s:%s.\n",vtype2str(src.vtype),dtype2str(src.dtype),vtype2str(dst.vtype),dtype2str(dst.dtype)).c_str()); } } } SM_NAMESPACE_END
40955f3d6c593e067ea59519ceff0fe9a2c8a88f.cu
// Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation and/or // other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors // may be used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON // ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Author's note: // This file was distributed as part of the Nature Biotechnology // supplementary software release for DeepBind. Users of DeepBind // are encouraged to instead use the latest source code and binaries // for scoring sequences at // http://tools.genes.toronto.edu/deepbind/ // #include <smat_cuda/elemwise2.cuh> #include <smat_cuda/cuda_errors.h> #include <smat/vm/instruction_db.h> #include <smat/vm/util/specialization_table.h> #include <smat/vm/util/specialization_typelists.h> SM_NAMESPACE_BEGIN DEF_AB_GENERIC(copy, b[j] = (B)a[i]) // copy function just casts element of type A to destination type B #define KERNEL_COPY_DD_PARAMS const S* src, D* dst #define KERNEL_COPY_DD_ARGS src,i,dst,i #define KERNEL_COPY_DD_PREAMBLE #define KERNEL_COPY_RD_PARAMS const S* src, D* dst, usize_t m #define KERNEL_COPY_RD_ARGS src,i%m,dst,i #define KERNEL_COPY_RD_PREAMBLE #define KERNEL_COPY_CD_PARAMS const S* src, D* dst, usize_t m #define KERNEL_COPY_CD_ARGS src,i/m,dst,i #define KERNEL_COPY_CD_PREAMBLE #define KERNEL_COPY_SD_PARAMS const S _src, D* dst #define KERNEL_COPY_SD_ARGS src,0,dst,i #define KERNEL_COPY_SD_PREAMBLE const S src[1] = { _src }; // turn scalar into an "array" for the sake of generic copy code; should be optimized away // TODO: specialize this for smaller-than-32-bit copy, // since right now it's significantly slower, and working with // bytes will be important (converting to/from). // Note that if source bytes can be bound to a texture, // they can be auto-converted to range [0,1]. #define DEF_KERNEL_COPY(broadcast) \ template <typename S, typename D> \ __global__ void kernel_copy_##broadcast(KERNEL_COPY_##broadcast##_PARAMS, usize_t size) \ { \ DECL_KERNEL_VARS \ KERNEL_COPY_##broadcast##_PREAMBLE \ for (usize_t i = (usize_t)bdx*bx+tx; i < size; i += bdx*gdx) \ k_copy<S,D>::apply(KERNEL_COPY_##broadcast##_ARGS); \ } /////////////////////////////////////////////////////////////////// // COPY VARIANTS: // dd = device matrix -> device matrix // rd = device rowvec -> device matrix // cd = device colvec -> device matrix // sd = host scalar -> device matrix // id = identity matrix -> device matrix /////////////////////////////////////////////////////////////////// DEF_KERNEL_COPY(DD) // no broadcasting DEF_KERNEL_COPY(RD) // broadcast row vec arg DEF_KERNEL_COPY(CD) // broadcast col vec arg DEF_KERNEL_COPY(SD) // broadcast scalar arg #define LAUNCH_KERNEL_COPY(broadcast) \ kernel_copy_##broadcast<<<cfg.gdim,cfg.bdim,cfg.smem,cfg.stream>>> template <typename S, typename D> struct execute_copy_typed_xd { static void execute(opcode_t opcode, const argument& src, const argument& dst) { if (src.vtype != vt_darray || dst.vtype != vt_darray) SM_ERROR(format("NotImplementedError: Unsupported combination of argument value types in copy: %s:%s -> %s:%s.\n", \ vtype2str(src.vtype),dtype2str(src.dtype), \ vtype2str(dst.vtype),dtype2str(dst.dtype)).c_str()); usize_t size = (usize_t)dst.size(); if (size > 0) { launchcfg cfg = make_elemwise_launchcfg(size); // Launch either an elementwise or a row/col broadcasted version of the functor if (src.shape == dst.shape) { LAUNCH_KERNEL_COPY(DD)(src.get<const S*>(),dst.get<D*>(),size); // direct elementwise copy // TODO: make this faster for small matching types } else if (src.shape.y == 1 && src.shape.x == dst.shape.x) { LAUNCH_KERNEL_COPY(RD)(src.get<const S*>(),dst.get<D*>(),dst.shape.x,size); // broadcast row vector on left } else if (src.shape.x == 1 && src.shape.y == dst.shape.y) { LAUNCH_KERNEL_COPY(CD)(src.get<const S*>(),dst.get<D*>(),dst.shape.x,size); // broadcast col vector on left } else SM_ERROR("NotImplementedError: incompatible broadcasting dimensions at kernel launch.\n"); } } }; // Launch kernel to write scalar value to dst; we can assume the types are the same // because the context base class tries to coerce the dtypes of constant arguments. template <typename D> struct execute_copy_typed_sd { static void execute(opcode_t opcode, const argument& src, const argument& dst) { usize_t size = (usize_t)dst.size(); if (size > 0) { launchcfg cfg = make_elemwise_launchcfg(size); LAUNCH_KERNEL_COPY(SD)(src.get<D>(),dst.get<D*>(),size); } } }; template <typename D> __global__ void kernel_copy_id(D* dst, usize_t m_plus_one, usize_t size) { DECL_KERNEL_VARS for (usize_t i = (usize_t)bdx*bx+tx; i < size; i += bdx*gdx) dst[i] = (i % m_plus_one) == 0 ? (D)1 : (D)0; } // Launch kernel to write identity matrix to dst template <typename D> struct execute_copy_typed_id { static void execute(opcode_t opcode, const argument& src, const argument& dst) { usize_t size = (usize_t)dst.size(); if (size > 0) { launchcfg cfg = make_elemwise_launchcfg(size); kernel_copy_id<D><<<cfg.gdim,cfg.bdim,cfg.smem,cfg.stream>>>(dst.get<D*>(),dst.shape.x+1,size); } } }; template <typename T> void copy_ch_typed(T val, T* dst, usize_t size) { for (usize_t i = 0; i < size; ++i) dst[i] = val; } void copy_ch(const argument& src, const argument& dst) { switch (dst.dtype) {case b8: copy_ch_typed(src.get<bool>() ,dst.get<bool*>() ,dst.size()); case i8: copy_ch_typed(src.get<int8_t>() ,dst.get<int8_t*>() ,dst.size()); case u8: copy_ch_typed(src.get<uint8_t>() ,dst.get<uint8_t*>() ,dst.size()); case i16: copy_ch_typed(src.get<int16_t>() ,dst.get<int16_t*>() ,dst.size()); case u16: copy_ch_typed(src.get<uint16_t>(),dst.get<uint16_t*>(),dst.size()); case i32: copy_ch_typed(src.get<int32_t>() ,dst.get<int32_t*>() ,dst.size()); case u32: copy_ch_typed(src.get<uint32_t>(),dst.get<uint32_t*>(),dst.size()); case i64: copy_ch_typed(src.get<int64_t>() ,dst.get<int64_t*>() ,dst.size()); case u64: copy_ch_typed(src.get<uint64_t>(),dst.get<uint64_t*>(),dst.size()); case f32: copy_ch_typed(src.get<float>() ,dst.get<float*>() ,dst.size()); case f64: copy_ch_typed(src.get<double>() ,dst.get<double*>() ,dst.size()); } } void execute_copy(opcode_t opcode, const argument& src, const argument& dst) { SM_ASSERT(dst.vtype == vt_harray || dst.vtype == vt_darray); if (dst.shape.size() == 0) return; isize_t dtsize = dtype_size(src.dtype); SM_ASSERTMSG(dst.vtype == vt_darray || dst.vtype == vt_harray,"AssertionError: Output must be host address or device address."); bool all_full_stride = (dst.strides == src.strides) && (src.strides.y == src.shape.x*src.strides.x); if (src.vtype == vt_harray && dst.vtype == vt_darray) { // HOST -> DEVICE SM_ASSERT(src.dtype == dst.dtype); // dtypes must match if transferring to/from host. SM_ASSERT(src.shape == dst.shape); // sizes must match (no broadcasting when copy to device) if (all_full_stride) { ccu(MemcpyAsync ,dst.get<void*>(),src.get<const void*>(),src.size()*dtsize,cudaMemcpyHostToDevice,thread_cudactx().stream()); } // try to do 1D copies since they can be overlapped if we eventually use streams else { ccu(Memcpy2DAsync,dst.get<void*>(),dst.strides.y*dtsize,src.get<const void*>(),src.strides.y*dtsize,src.shape.x*src.strides.x*dtsize,src.shape.y,cudaMemcpyHostToDevice,thread_cudactx().stream()); } } else if (src.vtype == vt_darray && dst.vtype == vt_harray) { // HOST <- DEVICE SM_ASSERT(src.dtype == dst.dtype); // dtypes must match if transferring to/from host. SM_ASSERT(src.shape == dst.shape); // sizes must match (no broadcasting when copy to device) if (all_full_stride) { ccu(MemcpyAsync ,dst.get<void*>(),src.get<const void*>(),src.size()*dtsize,cudaMemcpyDeviceToHost,thread_cudactx().stream()); } // try to do 1D copies since they can be overlapped if we eventually use streams else { ccu(Memcpy2DAsync,dst.get<void*>(),dst.strides.y*dtsize,src.get<const void*>(),src.strides.y*dtsize,src.shape.x*src.strides.x*dtsize,src.shape.y,cudaMemcpyDeviceToHost,thread_cudactx().stream()); } } else if (src.vtype == vt_darray && dst.vtype == vt_darray) { // DEVICE -> DEVICE if (src.shape != dst.shape || src.dtype != dst.dtype) { SM_ASSERTMSG(src.strides.y == src.shape.x*src.strides.x,"NotImplementedError: Cannot perform broadcasting/conversion on column-sliced input array.") SM_ASSERTMSG(dst.strides.y == dst.shape.x*dst.strides.x,"NotImplementedError: Cannot perform broadcasting/conversion on column-sliced output array.") DECL_SPECIALIZATION_TABLE(T_GxG,execute_fn2,execute_copy_typed_xd); // TODO: Several of these kernels will have identical machine code, so don't generate redundant kernels; TODO: when data is of small but matching type (e.g. int8->int8), copy with larger size when possible specialization_table(src.dtype,dst.dtype)(opcode,src,dst); // copy src to dst, casting type as necessary } else { if (all_full_stride) { ccu(MemcpyAsync ,dst.get<void*>(),src.get<const void*>(),src.size()*dtsize,cudaMemcpyDeviceToDevice,thread_cudactx().stream()); } // try to do 1D copies since they can be overlapped if we eventually use streams else { ccu(Memcpy2DAsync,dst.get<void*>(),dst.strides.y*dtsize,src.get<const void*>(),src.strides.y*dtsize,src.shape.x*src.strides.x*dtsize,src.shape.y,cudaMemcpyDeviceToDevice,thread_cudactx().stream()); } } } else { SM_ASSERTMSG(dst.strides.x == 1, "NotImplementedError: Column slicing not yet supported for this operation."); // TODO SM_ASSERTMSG(dst.strides.y == dst.strides.x*dst.shape.x || dst.shape.y == 1,"NotImplementedError: Column slicing not yet supported for this operation."); // TODO if (src.vtype == vt_carray && dst.vtype == vt_darray) { DECL_SPECIALIZATION_TABLE(T_G,execute_fn2,execute_copy_typed_sd) specialization_table(dst.dtype)(opcode,src,dst); // broadcast scalar, for any matching type } else if (src.vtype == vt_carray && dst.vtype == vt_harray) { copy_ch(src,dst); // broadcast scalar to host array } else if (src.vtype == vt_iarray && dst.vtype == vt_darray) { DECL_SPECIALIZATION_TABLE(T_G,execute_fn2,execute_copy_typed_id); specialization_table(dst.dtype)(opcode,src,dst); // broadcast scalar, for any matching type } else { SM_ERROR(format("Unsupported combination of argument value types in exec_copy: %s:%s -> %s:%s.\n",vtype2str(src.vtype),dtype2str(src.dtype),vtype2str(dst.vtype),dtype2str(dst.dtype)).c_str()); } } } SM_NAMESPACE_END
e54bf9885f6201f4c08deb6fa24851cb402859d7.hip
// !!! This is a file automatically generated by hipify!!! /*!/------------------------------------------------------------------------------ * cuSparseMultiply.cu * * ac-SpGEMM * * Authors: Daniel Mlakar, Markus Steinberger, Martin Winter *------------------------------------------------------------------------------ */ #include "cusparse/include/cuSparseMultiply.h" #include <hip/hip_runtime.h> namespace cuSPARSE { template<> hipsparseStatus_t CUSPARSEAPI CuSparseTest<double>::cusparseMultiply(hipsparseHandle_t handle, hipsparseOperation_t transA, hipsparseOperation_t transB, int m, int n, int k, const hipsparseMatDescr_t descrA, int nnzA, const double *csrSortedValA, const int *csrSortedRowPtrA, const int *csrSortedColIndA, const hipsparseMatDescr_t descrB, int nnzB, const double *csrSortedValB, const int *csrSortedRowPtrB, const int *csrSortedColIndB, const hipsparseMatDescr_t descrC, double *csrSortedValC, const int *csrSortedRowPtrC, int *csrSortedColIndC){ return hipsparseDcsrgemm(handle, transA, transB, m, n, k, descrA, nnzA, csrSortedValA, csrSortedRowPtrA, csrSortedColIndA, descrB, nnzB, csrSortedValB, csrSortedRowPtrB, csrSortedColIndB, descrC, csrSortedValC, csrSortedRowPtrC, csrSortedColIndC); } template<> hipsparseStatus_t CUSPARSEAPI CuSparseTest<float>::cusparseMultiply(hipsparseHandle_t handle, hipsparseOperation_t transA, hipsparseOperation_t transB, int m, int n, int k, const hipsparseMatDescr_t descrA, int nnzA, const float *csrSortedValA, const int *csrSortedRowPtrA, const int *csrSortedColIndA, const hipsparseMatDescr_t descrB, int nnzB, const float *csrSortedValB, const int *csrSortedRowPtrB, const int *csrSortedColIndB, const hipsparseMatDescr_t descrC, float *csrSortedValC, const int *csrSortedRowPtrC, int *csrSortedColIndC){ return hipsparseScsrgemm(handle, transA, transB, m, n, k, descrA, nnzA, csrSortedValA, csrSortedRowPtrA, csrSortedColIndA, descrB, nnzB, csrSortedValB, csrSortedRowPtrB, csrSortedColIndB, descrC, csrSortedValC, csrSortedRowPtrC, csrSortedColIndC); } template<> hipsparseStatus_t CUSPARSEAPI CuSparseTest<float>::cusparseTranspose(hipsparseHandle_t handle, int m, int n, int nnz, const float *csrSortedVal, const int *csrSortedRowPtr, const int *csrSortedColInd, float *cscSortedVal, int *cscSortedRowInd, int *cscSortedColPtr, hipsparseAction_t copyValues, hipsparseIndexBase_t idxBase) { return hipsparseScsr2csc(handle, m, n, nnz, csrSortedVal, csrSortedRowPtr, csrSortedColInd, cscSortedVal, cscSortedRowInd, cscSortedColPtr, copyValues, idxBase); } template<> hipsparseStatus_t CUSPARSEAPI CuSparseTest<double>::cusparseTranspose(hipsparseHandle_t handle, int m, int n, int nnz, const double *csrSortedVal, const int *csrSortedRowPtr, const int *csrSortedColInd, double *cscSortedVal, int *cscSortedRowInd, int *cscSortedColPtr, hipsparseAction_t copyValues, hipsparseIndexBase_t idxBase) { return hipsparseDcsr2csc(handle, m, n, nnz, csrSortedVal, csrSortedRowPtr, csrSortedColInd, cscSortedVal, cscSortedRowInd, cscSortedColPtr, copyValues, idxBase); } template <typename DataType> float CuSparseTest<DataType>::Multiply(const dCSR<DataType>& A, const dCSR<DataType>& B, dCSR<DataType>& matOut, uint32_t& cusparse_nnz) { int nnzC; int *nnzTotalDevHostPtr = &nnzC; float duration; int m, n, k; m = A.rows; n = B.cols; k = A.cols; matOut.reset(); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // ############################ hipEventRecord(start); // ############################ // Allocate memory for row indices hipMalloc(&(matOut.row_offsets), sizeof(uint32_t) * (A.rows + 1)); // Precompute number of nnz in C checkCuSparseError(hipsparseXcsrgemmNnz( handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, m, n, k, descr, A.nnz, reinterpret_cast<const int*>(A.row_offsets), reinterpret_cast<const int*>(A.col_ids), descrB, B.nnz, reinterpret_cast<const int*>(B.row_offsets), reinterpret_cast<const int*>(B.col_ids), descrC, reinterpret_cast<int*>(matOut.row_offsets), nnzTotalDevHostPtr), "cuSparse: Precompute failed" ); cusparse_nnz = nnzC; // Allocate rest of memory hipMalloc(&(matOut.col_ids), sizeof(uint32_t) * nnzC); hipMalloc(&(matOut.data), sizeof(DataType) * nnzC); // Compute SpGEMM checkCuSparseError(cusparseMultiply( handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, m, n, k, descr, A.nnz, reinterpret_cast<const DataType*>(A.data), reinterpret_cast<const int*>(A.row_offsets), reinterpret_cast<const int*>(A.col_ids), descrB, B.nnz, reinterpret_cast<const DataType*>(B.data), reinterpret_cast<const int*>(B.row_offsets), reinterpret_cast<const int*>(B.col_ids), descrC, reinterpret_cast<DataType*>(matOut.data), reinterpret_cast<int*>(matOut.row_offsets), reinterpret_cast<int*>(matOut.col_ids)), "cuSparse: SpGEMM failed"); matOut.nnz = nnzC; matOut.rows = m; matOut.cols = n; // ############################ hipEventRecord(stop); hipEventSynchronize(stop); // ############################ hipEventElapsedTime(&duration, start, stop); return duration; } template float CuSparseTest<float>::Multiply(const dCSR<float>& A, const dCSR<float>& B, dCSR<float>& matOut, uint32_t& cusparse_nnz); template float CuSparseTest<double>::Multiply(const dCSR<double>& A, const dCSR<double>& B, dCSR<double>& matOut, uint32_t& cusparse_nnz); template <typename DataType> void CuSparseTest<DataType>::Transpose(const dCSR<DataType>& A, dCSR<DataType>& AT) { AT.alloc(A.cols, A.rows, A.nnz); checkCuSparseError(cusparseTranspose(handle, A.rows, A.cols, A.nnz, reinterpret_cast<const DataType*>(A.data), reinterpret_cast<const int*>(A.row_offsets), reinterpret_cast<const int*>(A.col_ids), reinterpret_cast<DataType*>(AT.data), reinterpret_cast<int*>(AT.col_ids), reinterpret_cast<int*>(AT.row_offsets), HIPSPARSE_ACTION_NUMERIC, HIPSPARSE_INDEX_BASE_ZERO), "transpose failed"); } template void CuSparseTest<float>::Transpose(const dCSR<float>& A, dCSR<float>& AT); template void CuSparseTest<double>::Transpose(const dCSR<double>& A, dCSR<double>& AT); }
e54bf9885f6201f4c08deb6fa24851cb402859d7.cu
/*!/------------------------------------------------------------------------------ * cuSparseMultiply.cu * * ac-SpGEMM * * Authors: Daniel Mlakar, Markus Steinberger, Martin Winter *------------------------------------------------------------------------------ */ #include "cusparse/include/cuSparseMultiply.h" #include <cuda_runtime.h> namespace cuSPARSE { template<> cusparseStatus_t CUSPARSEAPI CuSparseTest<double>::cusparseMultiply(cusparseHandle_t handle, cusparseOperation_t transA, cusparseOperation_t transB, int m, int n, int k, const cusparseMatDescr_t descrA, int nnzA, const double *csrSortedValA, const int *csrSortedRowPtrA, const int *csrSortedColIndA, const cusparseMatDescr_t descrB, int nnzB, const double *csrSortedValB, const int *csrSortedRowPtrB, const int *csrSortedColIndB, const cusparseMatDescr_t descrC, double *csrSortedValC, const int *csrSortedRowPtrC, int *csrSortedColIndC){ return cusparseDcsrgemm(handle, transA, transB, m, n, k, descrA, nnzA, csrSortedValA, csrSortedRowPtrA, csrSortedColIndA, descrB, nnzB, csrSortedValB, csrSortedRowPtrB, csrSortedColIndB, descrC, csrSortedValC, csrSortedRowPtrC, csrSortedColIndC); } template<> cusparseStatus_t CUSPARSEAPI CuSparseTest<float>::cusparseMultiply(cusparseHandle_t handle, cusparseOperation_t transA, cusparseOperation_t transB, int m, int n, int k, const cusparseMatDescr_t descrA, int nnzA, const float *csrSortedValA, const int *csrSortedRowPtrA, const int *csrSortedColIndA, const cusparseMatDescr_t descrB, int nnzB, const float *csrSortedValB, const int *csrSortedRowPtrB, const int *csrSortedColIndB, const cusparseMatDescr_t descrC, float *csrSortedValC, const int *csrSortedRowPtrC, int *csrSortedColIndC){ return cusparseScsrgemm(handle, transA, transB, m, n, k, descrA, nnzA, csrSortedValA, csrSortedRowPtrA, csrSortedColIndA, descrB, nnzB, csrSortedValB, csrSortedRowPtrB, csrSortedColIndB, descrC, csrSortedValC, csrSortedRowPtrC, csrSortedColIndC); } template<> cusparseStatus_t CUSPARSEAPI CuSparseTest<float>::cusparseTranspose(cusparseHandle_t handle, int m, int n, int nnz, const float *csrSortedVal, const int *csrSortedRowPtr, const int *csrSortedColInd, float *cscSortedVal, int *cscSortedRowInd, int *cscSortedColPtr, cusparseAction_t copyValues, cusparseIndexBase_t idxBase) { return cusparseScsr2csc(handle, m, n, nnz, csrSortedVal, csrSortedRowPtr, csrSortedColInd, cscSortedVal, cscSortedRowInd, cscSortedColPtr, copyValues, idxBase); } template<> cusparseStatus_t CUSPARSEAPI CuSparseTest<double>::cusparseTranspose(cusparseHandle_t handle, int m, int n, int nnz, const double *csrSortedVal, const int *csrSortedRowPtr, const int *csrSortedColInd, double *cscSortedVal, int *cscSortedRowInd, int *cscSortedColPtr, cusparseAction_t copyValues, cusparseIndexBase_t idxBase) { return cusparseDcsr2csc(handle, m, n, nnz, csrSortedVal, csrSortedRowPtr, csrSortedColInd, cscSortedVal, cscSortedRowInd, cscSortedColPtr, copyValues, idxBase); } template <typename DataType> float CuSparseTest<DataType>::Multiply(const dCSR<DataType>& A, const dCSR<DataType>& B, dCSR<DataType>& matOut, uint32_t& cusparse_nnz) { int nnzC; int *nnzTotalDevHostPtr = &nnzC; float duration; int m, n, k; m = A.rows; n = B.cols; k = A.cols; matOut.reset(); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // ############################ cudaEventRecord(start); // ############################ // Allocate memory for row indices cudaMalloc(&(matOut.row_offsets), sizeof(uint32_t) * (A.rows + 1)); // Precompute number of nnz in C checkCuSparseError(cusparseXcsrgemmNnz( handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, m, n, k, descr, A.nnz, reinterpret_cast<const int*>(A.row_offsets), reinterpret_cast<const int*>(A.col_ids), descrB, B.nnz, reinterpret_cast<const int*>(B.row_offsets), reinterpret_cast<const int*>(B.col_ids), descrC, reinterpret_cast<int*>(matOut.row_offsets), nnzTotalDevHostPtr), "cuSparse: Precompute failed" ); cusparse_nnz = nnzC; // Allocate rest of memory cudaMalloc(&(matOut.col_ids), sizeof(uint32_t) * nnzC); cudaMalloc(&(matOut.data), sizeof(DataType) * nnzC); // Compute SpGEMM checkCuSparseError(cusparseMultiply( handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, m, n, k, descr, A.nnz, reinterpret_cast<const DataType*>(A.data), reinterpret_cast<const int*>(A.row_offsets), reinterpret_cast<const int*>(A.col_ids), descrB, B.nnz, reinterpret_cast<const DataType*>(B.data), reinterpret_cast<const int*>(B.row_offsets), reinterpret_cast<const int*>(B.col_ids), descrC, reinterpret_cast<DataType*>(matOut.data), reinterpret_cast<int*>(matOut.row_offsets), reinterpret_cast<int*>(matOut.col_ids)), "cuSparse: SpGEMM failed"); matOut.nnz = nnzC; matOut.rows = m; matOut.cols = n; // ############################ cudaEventRecord(stop); cudaEventSynchronize(stop); // ############################ cudaEventElapsedTime(&duration, start, stop); return duration; } template float CuSparseTest<float>::Multiply(const dCSR<float>& A, const dCSR<float>& B, dCSR<float>& matOut, uint32_t& cusparse_nnz); template float CuSparseTest<double>::Multiply(const dCSR<double>& A, const dCSR<double>& B, dCSR<double>& matOut, uint32_t& cusparse_nnz); template <typename DataType> void CuSparseTest<DataType>::Transpose(const dCSR<DataType>& A, dCSR<DataType>& AT) { AT.alloc(A.cols, A.rows, A.nnz); checkCuSparseError(cusparseTranspose(handle, A.rows, A.cols, A.nnz, reinterpret_cast<const DataType*>(A.data), reinterpret_cast<const int*>(A.row_offsets), reinterpret_cast<const int*>(A.col_ids), reinterpret_cast<DataType*>(AT.data), reinterpret_cast<int*>(AT.col_ids), reinterpret_cast<int*>(AT.row_offsets), CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO), "transpose failed"); } template void CuSparseTest<float>::Transpose(const dCSR<float>& A, dCSR<float>& AT); template void CuSparseTest<double>::Transpose(const dCSR<double>& A, dCSR<double>& AT); }
9194085517a62f148f006b81ca8f00220cfa15c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef _TIMER_ #include "hip/hip_runtime_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #define GAPX (22) #define GAPY (22) #define EXTENT (5) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(float * input, int N, int M, float * __copy_arr_0__, float * __copy_arr_1__, float * __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float * __tilevar_2__ = __tilevar_0__; float * __tilevar_3__ = __tilevar_1__; float * __tilevar_4__ = __tilevar_0__; float * __tilevar_5__ = __tilevar_1__; int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY); int __iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ; if( __iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) ){ int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if( __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1)) ){ __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(0-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)]; } } __syncthreads(); int __iter_4__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ; if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){ int __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ float __temp_2__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(0-(__iter_1__+0)))]); float __temp_5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__); float __temp_9__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_10__ = (__temp_6__ + 15 * __temp_9__); float __temp_13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_14__ = (__temp_10__ + 12 * __temp_13__); float __temp_17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_18__ = (__temp_14__ + 5 * __temp_17__); float __temp_19__ = (__temp_18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+1)))] = __temp_19__; } } int __iter_6__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ; if( __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){ int __iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ if (__iter_6__ < (FORMA_MAX((__iter_1__+1),1)+2) || __iter_6__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2))-2) || __iter_7__ < (FORMA_MAX((__iter_0__+1),1)+2) || __iter_7__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))-2)) { __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+1)))]; } } } __syncthreads(); int __iter_10__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ; if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){ int __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ float __temp_32__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(-1)+(0-(__iter_1__+1)))]); float __temp_35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+1)))]); float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__); float __temp_39__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+1)))]); float __temp_40__ = (__temp_36__ + 15 * __temp_39__); float __temp_43__ = (__tilevar_3__[__iter_11__+(1)+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+1)))]); float __temp_44__ = (__temp_40__ + 12 * __temp_43__); float __temp_47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(0-(__iter_1__+1)))]); float __temp_48__ = (__temp_44__ + 5 * __temp_47__); float __temp_49__ = (__temp_48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+2)))] = __temp_49__; } } int __iter_12__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ; if( __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){ int __iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ if (__iter_12__ < (FORMA_MAX((__iter_1__+2),1)+2) || __iter_12__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2))-2) || __iter_13__ < (FORMA_MAX((__iter_0__+2),1)+2) || __iter_13__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))-2)) { __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+2)))]; } } } __syncthreads(); int __iter_16__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ; if( __iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){ int __iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ float __temp_60__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(0-(__iter_1__+2)))]); float __temp_61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]); float __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__); float __temp_63__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]); float __temp_64__ = (__temp_62__ + 15 * __temp_63__); float __temp_65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]); float __temp_66__ = (__temp_64__ + 12 * __temp_65__); float __temp_67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+2)))]); float __temp_68__ = (__temp_66__ + 5 * __temp_67__); float __temp_69__ = (__temp_68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+3)))] = __temp_69__; } } int __iter_18__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ; if( __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){ int __iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ if (__iter_18__ < (FORMA_MAX((__iter_1__+3),1)+2) || __iter_18__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2))-2) || __iter_19__ < (FORMA_MAX((__iter_0__+3),1)+2) || __iter_19__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))-2)) { __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+3)))]; } } } __syncthreads(); int __iter_22__ = FORMA_MAX((__iter_1__+4),1) + (int)(threadIdx.y) ; if( __iter_22__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-2)) ){ int __iter_23__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ; if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){ float __temp_80__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(0-(__iter_1__+3)))]); float __temp_81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+3)))]); float __temp_82__ = (5 * __temp_80__ + 12 * __temp_81__); float __temp_83__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+3)))]); float __temp_84__ = (__temp_82__ + 15 * __temp_83__); float __temp_85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+3)))]); float __temp_86__ = (__temp_84__ + 12 * __temp_85__); float __temp_87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(0-(__iter_1__+3)))]); float __temp_88__ = (__temp_86__ + 5 * __temp_87__); float __temp_89__ = (__temp_88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_89__; } } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(float)*(2*(FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)); return SMemSize; } __global__ void __kernel___forma_kernel__1__(float * input, int N, int M, float * __copy_arr_0__, float * __copy_arr_1__, float * __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float * __tilevar_2__ = __tilevar_0__; float * __tilevar_3__ = __tilevar_1__; float * __tilevar_4__ = __tilevar_0__; float * __tilevar_5__ = __tilevar_1__; int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX) + (int)(FORMA_BLOCKDIM_X); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY); int __iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ; if( __iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) ){ int __iter_3__ = FORMA_MAX(__iter_0__-2,0) + (int)(threadIdx.x) ; if( __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1)) ){ __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(0-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)]; } } __syncthreads(); int __iter_4__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ; if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){ int __iter_5__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){ float __temp_2__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(0-(__iter_1__+0)))]); float __temp_5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__); float __temp_9__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_10__ = (__temp_6__ + 15 * __temp_9__); float __temp_13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_14__ = (__temp_10__ + 12 * __temp_13__); float __temp_17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_18__ = (__temp_14__ + 5 * __temp_17__); float __temp_19__ = (__temp_18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))] = __temp_19__; } } int __iter_6__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ; if( __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){ int __iter_7__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){ if (__iter_6__ < (FORMA_MAX((__iter_1__+1),1)+2) || __iter_6__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2))-2)) { __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+0)))]; } } } __iter_6__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ; if( __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){ int __iter_7__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ if (__iter_7__ < FORMA_MAX((__iter_0__-1),1) || __iter_7__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2))) { __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } __syncthreads(); int __iter_10__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ; if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){ int __iter_11__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){ float __temp_32__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*( __iter_10__+(-1)+(0-(__iter_1__+0)))]); float __temp_35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+0)))]); float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__); float __temp_39__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+0)))]); float __temp_40__ = (__temp_36__ + 15 * __temp_39__); float __temp_43__ = (__tilevar_3__[__iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+0)))]); float __temp_44__ = (__temp_40__ + 12 * __temp_43__); float __temp_47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(0-(__iter_1__+0)))]); float __temp_48__ = (__temp_44__ + 5 * __temp_47__); float __temp_49__ = (__temp_48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+0)))] = __temp_49__; } } int __iter_12__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ; if( __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){ int __iter_13__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){ if (__iter_12__ < (FORMA_MAX((__iter_1__+2),1)+2) || __iter_12__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2))-2)) { __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+0)))]; } } } __iter_12__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ; if( __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){ int __iter_13__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){ if (__iter_13__ < FORMA_MAX((__iter_0__-2),1) || __iter_13__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ) { __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } __syncthreads(); int __iter_16__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ; if( __iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){ int __iter_17__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ float __temp_60__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(0-(__iter_1__+0)))]); float __temp_61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]); float __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__); float __temp_63__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]); float __temp_64__ = (__temp_62__ + 15 * __temp_63__); float __temp_65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]); float __temp_66__ = (__temp_64__ + 12 * __temp_65__); float __temp_67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+0)))]); float __temp_68__ = (__temp_66__ + 5 * __temp_67__); float __temp_69__ = (__temp_68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))] = __temp_69__; } } int __iter_18__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ; if( __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){ int __iter_19__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ if (__iter_18__ < (FORMA_MAX((__iter_1__+3),1)+2) || __iter_18__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2))-2)) { __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+0)))]; } } } __iter_18__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ; if( __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){ int __iter_19__ = FORMA_MAX((__iter_0__-5),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(M-2)) ){ if (__iter_19__ < FORMA_MAX((__iter_0__-3),1) || __iter_19__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2))) { __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } __syncthreads(); int __iter_22__ = FORMA_MAX((__iter_1__+4),1) + (int)(threadIdx.y) ; if( __iter_22__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-2)) ){ int __iter_23__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ; if( __iter_23__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){ float __temp_80__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(0-(__iter_1__+0)))]); float __temp_81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+0)))]); float __temp_82__ = (5 * __temp_80__ + 12 * __temp_81__); float __temp_83__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+0)))]); float __temp_84__ = (__temp_82__ + 15 * __temp_83__); float __temp_85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+0)))]); float __temp_86__ = (__temp_84__ + 12 * __temp_85__); float __temp_87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(0-(__iter_1__+0)))]); float __temp_88__ = (__temp_86__ + 5 * __temp_87__); float __temp_89__ = (__temp_88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_89__; } } } __global__ void __kernel___forma_kernel__2__(float * input, int N, int M, float * __copy_arr_0__, float * __copy_arr_1__, float * __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float * __tilevar_2__ = __tilevar_0__; float * __tilevar_3__ = __tilevar_1__; float * __tilevar_4__ = __tilevar_0__; float * __tilevar_5__ = __tilevar_1__; int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY) + (int)(FORMA_BLOCKDIM_Y); int __iter_2__ = FORMA_MAX(__iter_1__-2,0) + (int)(threadIdx.y) ; if( __iter_2__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-1)) ){ int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if( __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1)) ){ __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)]; } } __syncthreads(); int __iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ; if( __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) ){ int __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ float __temp_2__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__); float __temp_9__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_10__ = (__temp_6__ + 15 * __temp_9__); float __temp_13__ = (__tilevar_2__[ __iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_14__ = (__temp_10__ + 12 * __temp_13__); float __temp_17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_18__ = (__temp_14__ + 5 * __temp_17__); float __temp_19__ = (__temp_18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))] = __temp_19__; } } int __iter_6__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ; if( __iter_6__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) ){ int __iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ if (__iter_7__ < (FORMA_MAX((__iter_0__+1),1)+2) || __iter_7__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))-2)) { __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))]; } } } __iter_6__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ; if( __iter_6__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){ int __iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ if (__iter_6__ < FORMA_MAX((__iter_1__-1),1) || __iter_6__ > FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2))) { __tilevar_3__[__iter_7__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } __syncthreads(); int __iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ; if( __iter_10__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) ){ int __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ float __temp_32__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__); float __temp_39__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_40__ = (__temp_36__ + 15 * __temp_39__); float __temp_43__ = (__tilevar_3__[__iter_11__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_44__ = (__temp_40__ + 12 * __temp_43__); float __temp_47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_48__ = (__temp_44__ + 5 * __temp_47__); float __temp_49__ = (__temp_48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))] = __temp_49__; } } int __iter_12__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ; if( __iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) ){ int __iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ if (__iter_13__ < (FORMA_MAX((__iter_0__+2),1)+2) || __iter_13__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))-2)) { __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))]; } } } __iter_12__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ; if( __iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){ int __iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ if (__iter_12__ < FORMA_MAX((__iter_1__-2),1) || __iter_12__ > FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2))) { __tilevar_4__[__iter_13__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } __syncthreads(); int __iter_16__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ; if( __iter_16__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){ int __iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ float __temp_60__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__); float __temp_63__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_64__ = (__temp_62__ + 15 * __temp_63__); float __temp_65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_66__ = (__temp_64__ + 12 * __temp_65__); float __temp_67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_68__ = (__temp_66__ + 5 * __temp_67__); float __temp_69__ = (__temp_68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))] = __temp_69__; } } int __iter_18__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ; if( __iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){ int __iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ if (__iter_19__ < (FORMA_MAX((__iter_0__+3),1)+2) || __iter_19__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))-2)) { __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))]; } } } __iter_18__ = FORMA_MAX((__iter_1__-5),1) + (int)(threadIdx.y) ; if( __iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(N-2)) ){ int __iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ if (__iter_18__ < FORMA_MAX((__iter_1__-3),1) || __iter_18__ > FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2))) { __tilevar_5__[__iter_19__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } __syncthreads(); int __iter_22__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ; if( __iter_22__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){ int __iter_23__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ; if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){ float __temp_80__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_82__ = (5 * __temp_80__ + 12 * __temp_81__); float __temp_83__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_84__ = (__temp_82__ + 15 * __temp_83__); float __temp_85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_86__ = (__temp_84__ + 12 * __temp_85__); float __temp_87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_88__ = (__temp_86__ + 5 * __temp_87__); float __temp_89__ = (__temp_88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_89__; } } } __global__ void __kernel___forma_kernel__3__(float * input, int N, int M, float * __copy_arr_0__, float * __copy_arr_1__, float * __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float * __tilevar_2__ = __tilevar_0__; float * __tilevar_3__ = __tilevar_1__; float * __tilevar_4__ = __tilevar_0__; float * __tilevar_5__ = __tilevar_1__; int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX) + (int)(FORMA_BLOCKDIM_X); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY) + (int)(FORMA_BLOCKDIM_Y); int __iter_2__ = FORMA_MAX(__iter_1__-2,0) + (int)(threadIdx.y) ; if( __iter_2__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-1)) ){ int __iter_3__ = FORMA_MAX(__iter_0__-2,0) + (int)(threadIdx.x) ; if( __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1)) ){ __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)]; } } __syncthreads(); int __iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ; if( __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) ){ int __iter_5__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){ float __temp_2__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__); float __temp_9__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_10__ = (__temp_6__ + 15 * __temp_9__); float __temp_13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_14__ = (__temp_10__ + 12 * __temp_13__); float __temp_17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*( __iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_18__ = (__temp_14__ + 5 * __temp_17__); float __temp_19__ = (__temp_18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))] = __temp_19__; } } int __iter_6__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ; if( __iter_6__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){ int __iter_7__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ if (__iter_6__ < FORMA_MAX((__iter_1__-1),1) || __iter_6__ > FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) || __iter_7__ < FORMA_MAX((__iter_0__-1),1) || __iter_7__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2))) { __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } __syncthreads(); int __iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ; if( __iter_10__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) ){ int __iter_11__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){ float __temp_32__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__); float __temp_39__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_40__ = (__temp_36__ + 15 * __temp_39__); float __temp_43__ = (__tilevar_3__[__iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_44__ = (__temp_40__ + 12 * __temp_43__); float __temp_47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_48__ = (__temp_44__ + 5 * __temp_47__); float __temp_49__ = (__temp_48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))] = __temp_49__; } } int __iter_12__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ; if( __iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){ int __iter_13__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){ if (__iter_12__ < FORMA_MAX((__iter_1__-2),1) || __iter_12__ > FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) || __iter_13__ < FORMA_MAX((__iter_0__-2),1) || __iter_13__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2))) { __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } __syncthreads(); int __iter_16__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ; if( __iter_16__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){ int __iter_17__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ float __temp_60__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__); float __temp_63__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_64__ = (__temp_62__ + 15 * __temp_63__); float __temp_65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_66__ = (__temp_64__ + 12 * __temp_65__); float __temp_67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_68__ = (__temp_66__ + 5 * __temp_67__); float __temp_69__ = (__temp_68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))] = __temp_69__; } } int __iter_18__ = FORMA_MAX((__iter_1__-5),1) + (int)(threadIdx.y) ; if( __iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(N-2)) ){ int __iter_19__ = FORMA_MAX((__iter_0__-5),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(M-2)) ){ if (__iter_18__ < FORMA_MAX((__iter_1__-3),1) || __iter_18__ > FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) || __iter_19__ < FORMA_MAX((__iter_0__-3),1) || __iter_19__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2))) { __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } __syncthreads(); int __iter_22__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ; if( __iter_22__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){ int __iter_23__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ; if( __iter_23__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){ float __temp_80__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_82__ = (5 * __temp_80__ + 12 * __temp_81__); float __temp_83__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_84__ = (__temp_82__ + 15 * __temp_83__); float __temp_85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_86__ = (__temp_84__ + 12 * __temp_85__); float __temp_87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_88__ = (__temp_86__ + 5 * __temp_87__); float __temp_89__ = (__temp_88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_89__; } } } /*Device code End */ /* Host Code Begin */ extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){ /* Host allocation Begin */ float * input; hipMalloc(&input,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : input\n"); hipPointerAttribute_t ptrAttrib_h_input; hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice; if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess) if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice) memcpy_kind_h_input = hipMemcpyDeviceToDevice; hipGetLastError(); if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){ hipMemcpy(input,h_input,sizeof(float)*((N-0)*(M-0)), memcpy_kind_h_input); } float * __var_1__; hipMalloc(&__var_1__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); float * __copy_arr_0__; hipMalloc(&__copy_arr_0__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n"); float * __copy_arr_1__; hipMalloc(&__copy_arr_1__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n"); float * __copy_arr_2__; hipMalloc(&__copy_arr_2__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ hipEvent_t _forma_timer_start_,_forma_timer_stop_; hipEventCreate(&_forma_timer_start_); hipEventCreate(&_forma_timer_stop_); hipEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1; int __size_1___kernel___forma_kernel__0__ = ((N-1) - 0 ) + 1; int __max_occupancy_blocksize___kernel___forma_kernel__0__; int _max_occupancy_gridsize___kernel___forma_kernel__0__; hipOccupancyMaxPotentialBlockSize(&_max_occupancy_gridsize___kernel___forma_kernel__0__,&__max_occupancy_blocksize___kernel___forma_kernel__0__,(const void*)__kernel___forma_kernel__0__,0,0); int __max_occupancy_blocksize___kernel___forma_kernel__0___0 = pow((double)__max_occupancy_blocksize___kernel___forma_kernel__0__, (double)(1.0/(double)2)); __max_occupancy_blocksize___kernel___forma_kernel__0___0 = FORMA_MAX(__max_occupancy_blocksize___kernel___forma_kernel__0___0/32, 1)*32; int __block_0___kernel___forma_kernel__0__ = FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel___forma_kernel__0___0,FORMA_MAX((__size_0___kernel___forma_kernel__0__)/32,1)*32),FORMA_MAX_BLOCKDIM_0),9); __max_occupancy_blocksize___kernel___forma_kernel__0__ /= __block_0___kernel___forma_kernel__0__; int __max_occupancy_blocksize___kernel___forma_kernel__0___1 = __max_occupancy_blocksize___kernel___forma_kernel__0__; int __block_1___kernel___forma_kernel__0__ = FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel___forma_kernel__0___1,__size_1___kernel___forma_kernel__0__),FORMA_MAX_BLOCKDIM_1),9); __max_occupancy_blocksize___kernel___forma_kernel__0__ /= __block_1___kernel___forma_kernel__0__; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); while( __SMemSize___kernel___forma_kernel__0__ > __FORMA_MAX_SHARED_MEM__){ if( __blockConfig___kernel___forma_kernel__0__.y/2 > 9) __blockConfig___kernel___forma_kernel__0__.y /= 2; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); if( __SMemSize___kernel___forma_kernel__0__ <= __FORMA_MAX_SHARED_MEM__) break; if( __blockConfig___kernel___forma_kernel__0__.x/2 > FORMA_MIN(32,9)) __blockConfig___kernel___forma_kernel__0__.x /= 2; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); } int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x+GAPX); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y+GAPY); dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); dim3 __blockConfig___kernel___forma_kernel__1__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y); hipLaunchKernelGGL(( __kernel___forma_kernel__1__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__1__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__1__\n"); dim3 __blockConfig___kernel___forma_kernel__2__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y); hipLaunchKernelGGL(( __kernel___forma_kernel__2__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__2__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__2__\n"); dim3 __blockConfig___kernel___forma_kernel__3__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y); hipLaunchKernelGGL(( __kernel___forma_kernel__3__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__3__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__3__\n"); hipPointerAttribute_t ptrAttrib___var_0__; hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost; if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess) if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice) memcpy_kind___var_0__ = hipMemcpyDeviceToDevice; hipGetLastError(); hipMemcpy(__var_0__,__var_1__, sizeof(float)*((N-0)*(M-0)), memcpy_kind___var_0__); #ifdef _TIMER_ hipEventRecord(_forma_timer_stop_,0); hipEventSynchronize(_forma_timer_stop_); float elapsedTime; hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); hipEventDestroy(_forma_timer_start_); hipEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ hipFree(input); hipFree(__var_1__); hipFree(__copy_arr_0__); hipFree(__copy_arr_1__); hipFree(__copy_arr_2__); } /*Host Free End*/
9194085517a62f148f006b81ca8f00220cfa15c7.cu
#include "cuda.h" #ifdef _TIMER_ #include "cuda_profiler_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #define GAPX (22) #define GAPY (22) #define EXTENT (5) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); __kernel_init__<<<init_grid,init_block>>>(d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(float * input, int N, int M, float * __copy_arr_0__, float * __copy_arr_1__, float * __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float * __tilevar_2__ = __tilevar_0__; float * __tilevar_3__ = __tilevar_1__; float * __tilevar_4__ = __tilevar_0__; float * __tilevar_5__ = __tilevar_1__; int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY); int __iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ; if( __iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) ){ int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if( __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1)) ){ __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(0-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)]; } } __syncthreads(); int __iter_4__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ; if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){ int __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ float __temp_2__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(0-(__iter_1__+0)))]); float __temp_5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__); float __temp_9__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_10__ = (__temp_6__ + 15 * __temp_9__); float __temp_13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_14__ = (__temp_10__ + 12 * __temp_13__); float __temp_17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_18__ = (__temp_14__ + 5 * __temp_17__); float __temp_19__ = (__temp_18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+1)))] = __temp_19__; } } int __iter_6__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ; if( __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){ int __iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ if (__iter_6__ < (FORMA_MAX((__iter_1__+1),1)+2) || __iter_6__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2))-2) || __iter_7__ < (FORMA_MAX((__iter_0__+1),1)+2) || __iter_7__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))-2)) { __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+1)))]; } } } __syncthreads(); int __iter_10__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ; if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){ int __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ float __temp_32__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(-1)+(0-(__iter_1__+1)))]); float __temp_35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+1)))]); float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__); float __temp_39__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+1)))]); float __temp_40__ = (__temp_36__ + 15 * __temp_39__); float __temp_43__ = (__tilevar_3__[__iter_11__+(1)+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+1)))]); float __temp_44__ = (__temp_40__ + 12 * __temp_43__); float __temp_47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(0-(__iter_1__+1)))]); float __temp_48__ = (__temp_44__ + 5 * __temp_47__); float __temp_49__ = (__temp_48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+2)))] = __temp_49__; } } int __iter_12__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ; if( __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){ int __iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ if (__iter_12__ < (FORMA_MAX((__iter_1__+2),1)+2) || __iter_12__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2))-2) || __iter_13__ < (FORMA_MAX((__iter_0__+2),1)+2) || __iter_13__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))-2)) { __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+2)))]; } } } __syncthreads(); int __iter_16__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ; if( __iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){ int __iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ float __temp_60__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(0-(__iter_1__+2)))]); float __temp_61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]); float __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__); float __temp_63__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]); float __temp_64__ = (__temp_62__ + 15 * __temp_63__); float __temp_65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]); float __temp_66__ = (__temp_64__ + 12 * __temp_65__); float __temp_67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+2)))]); float __temp_68__ = (__temp_66__ + 5 * __temp_67__); float __temp_69__ = (__temp_68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+3)))] = __temp_69__; } } int __iter_18__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ; if( __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){ int __iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ if (__iter_18__ < (FORMA_MAX((__iter_1__+3),1)+2) || __iter_18__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2))-2) || __iter_19__ < (FORMA_MAX((__iter_0__+3),1)+2) || __iter_19__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))-2)) { __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+3)))]; } } } __syncthreads(); int __iter_22__ = FORMA_MAX((__iter_1__+4),1) + (int)(threadIdx.y) ; if( __iter_22__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-2)) ){ int __iter_23__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ; if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){ float __temp_80__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(0-(__iter_1__+3)))]); float __temp_81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+3)))]); float __temp_82__ = (5 * __temp_80__ + 12 * __temp_81__); float __temp_83__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+3)))]); float __temp_84__ = (__temp_82__ + 15 * __temp_83__); float __temp_85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+3)))]); float __temp_86__ = (__temp_84__ + 12 * __temp_85__); float __temp_87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(0-(__iter_1__+3)))]); float __temp_88__ = (__temp_86__ + 5 * __temp_87__); float __temp_89__ = (__temp_88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_89__; } } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(float)*(2*(FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)); return SMemSize; } __global__ void __kernel___forma_kernel__1__(float * input, int N, int M, float * __copy_arr_0__, float * __copy_arr_1__, float * __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float * __tilevar_2__ = __tilevar_0__; float * __tilevar_3__ = __tilevar_1__; float * __tilevar_4__ = __tilevar_0__; float * __tilevar_5__ = __tilevar_1__; int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX) + (int)(FORMA_BLOCKDIM_X); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY); int __iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ; if( __iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) ){ int __iter_3__ = FORMA_MAX(__iter_0__-2,0) + (int)(threadIdx.x) ; if( __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1)) ){ __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(0-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)]; } } __syncthreads(); int __iter_4__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ; if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){ int __iter_5__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){ float __temp_2__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(0-(__iter_1__+0)))]); float __temp_5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__); float __temp_9__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_10__ = (__temp_6__ + 15 * __temp_9__); float __temp_13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_14__ = (__temp_10__ + 12 * __temp_13__); float __temp_17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_18__ = (__temp_14__ + 5 * __temp_17__); float __temp_19__ = (__temp_18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))] = __temp_19__; } } int __iter_6__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ; if( __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){ int __iter_7__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){ if (__iter_6__ < (FORMA_MAX((__iter_1__+1),1)+2) || __iter_6__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2))-2)) { __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+0)))]; } } } __iter_6__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ; if( __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){ int __iter_7__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ if (__iter_7__ < FORMA_MAX((__iter_0__-1),1) || __iter_7__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2))) { __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } __syncthreads(); int __iter_10__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ; if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){ int __iter_11__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){ float __temp_32__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*( __iter_10__+(-1)+(0-(__iter_1__+0)))]); float __temp_35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+0)))]); float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__); float __temp_39__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+0)))]); float __temp_40__ = (__temp_36__ + 15 * __temp_39__); float __temp_43__ = (__tilevar_3__[__iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+0)))]); float __temp_44__ = (__temp_40__ + 12 * __temp_43__); float __temp_47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(0-(__iter_1__+0)))]); float __temp_48__ = (__temp_44__ + 5 * __temp_47__); float __temp_49__ = (__temp_48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+0)))] = __temp_49__; } } int __iter_12__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ; if( __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){ int __iter_13__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){ if (__iter_12__ < (FORMA_MAX((__iter_1__+2),1)+2) || __iter_12__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2))-2)) { __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+0)))]; } } } __iter_12__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ; if( __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){ int __iter_13__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){ if (__iter_13__ < FORMA_MAX((__iter_0__-2),1) || __iter_13__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ) { __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } __syncthreads(); int __iter_16__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ; if( __iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){ int __iter_17__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ float __temp_60__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(0-(__iter_1__+0)))]); float __temp_61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]); float __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__); float __temp_63__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]); float __temp_64__ = (__temp_62__ + 15 * __temp_63__); float __temp_65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]); float __temp_66__ = (__temp_64__ + 12 * __temp_65__); float __temp_67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+0)))]); float __temp_68__ = (__temp_66__ + 5 * __temp_67__); float __temp_69__ = (__temp_68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))] = __temp_69__; } } int __iter_18__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ; if( __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){ int __iter_19__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ if (__iter_18__ < (FORMA_MAX((__iter_1__+3),1)+2) || __iter_18__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2))-2)) { __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+0)))]; } } } __iter_18__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ; if( __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){ int __iter_19__ = FORMA_MAX((__iter_0__-5),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(M-2)) ){ if (__iter_19__ < FORMA_MAX((__iter_0__-3),1) || __iter_19__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2))) { __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } __syncthreads(); int __iter_22__ = FORMA_MAX((__iter_1__+4),1) + (int)(threadIdx.y) ; if( __iter_22__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-2)) ){ int __iter_23__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ; if( __iter_23__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){ float __temp_80__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(0-(__iter_1__+0)))]); float __temp_81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+0)))]); float __temp_82__ = (5 * __temp_80__ + 12 * __temp_81__); float __temp_83__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+0)))]); float __temp_84__ = (__temp_82__ + 15 * __temp_83__); float __temp_85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+0)))]); float __temp_86__ = (__temp_84__ + 12 * __temp_85__); float __temp_87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(0-(__iter_1__+0)))]); float __temp_88__ = (__temp_86__ + 5 * __temp_87__); float __temp_89__ = (__temp_88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_89__; } } } __global__ void __kernel___forma_kernel__2__(float * input, int N, int M, float * __copy_arr_0__, float * __copy_arr_1__, float * __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float * __tilevar_2__ = __tilevar_0__; float * __tilevar_3__ = __tilevar_1__; float * __tilevar_4__ = __tilevar_0__; float * __tilevar_5__ = __tilevar_1__; int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY) + (int)(FORMA_BLOCKDIM_Y); int __iter_2__ = FORMA_MAX(__iter_1__-2,0) + (int)(threadIdx.y) ; if( __iter_2__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-1)) ){ int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if( __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1)) ){ __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)]; } } __syncthreads(); int __iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ; if( __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) ){ int __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ float __temp_2__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__); float __temp_9__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_10__ = (__temp_6__ + 15 * __temp_9__); float __temp_13__ = (__tilevar_2__[ __iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_14__ = (__temp_10__ + 12 * __temp_13__); float __temp_17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_18__ = (__temp_14__ + 5 * __temp_17__); float __temp_19__ = (__temp_18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))] = __temp_19__; } } int __iter_6__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ; if( __iter_6__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) ){ int __iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ if (__iter_7__ < (FORMA_MAX((__iter_0__+1),1)+2) || __iter_7__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))-2)) { __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))]; } } } __iter_6__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ; if( __iter_6__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){ int __iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ if (__iter_6__ < FORMA_MAX((__iter_1__-1),1) || __iter_6__ > FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2))) { __tilevar_3__[__iter_7__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } __syncthreads(); int __iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ; if( __iter_10__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) ){ int __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ float __temp_32__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__); float __temp_39__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_40__ = (__temp_36__ + 15 * __temp_39__); float __temp_43__ = (__tilevar_3__[__iter_11__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_44__ = (__temp_40__ + 12 * __temp_43__); float __temp_47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_48__ = (__temp_44__ + 5 * __temp_47__); float __temp_49__ = (__temp_48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))] = __temp_49__; } } int __iter_12__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ; if( __iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) ){ int __iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ if (__iter_13__ < (FORMA_MAX((__iter_0__+2),1)+2) || __iter_13__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))-2)) { __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))]; } } } __iter_12__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ; if( __iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){ int __iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ if (__iter_12__ < FORMA_MAX((__iter_1__-2),1) || __iter_12__ > FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2))) { __tilevar_4__[__iter_13__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } __syncthreads(); int __iter_16__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ; if( __iter_16__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){ int __iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ float __temp_60__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__); float __temp_63__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_64__ = (__temp_62__ + 15 * __temp_63__); float __temp_65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_66__ = (__temp_64__ + 12 * __temp_65__); float __temp_67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_68__ = (__temp_66__ + 5 * __temp_67__); float __temp_69__ = (__temp_68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))] = __temp_69__; } } int __iter_18__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ; if( __iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){ int __iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ if (__iter_19__ < (FORMA_MAX((__iter_0__+3),1)+2) || __iter_19__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))-2)) { __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))]; } } } __iter_18__ = FORMA_MAX((__iter_1__-5),1) + (int)(threadIdx.y) ; if( __iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(N-2)) ){ int __iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ if (__iter_18__ < FORMA_MAX((__iter_1__-3),1) || __iter_18__ > FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2))) { __tilevar_5__[__iter_19__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } __syncthreads(); int __iter_22__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ; if( __iter_22__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){ int __iter_23__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ; if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){ float __temp_80__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_82__ = (5 * __temp_80__ + 12 * __temp_81__); float __temp_83__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_84__ = (__temp_82__ + 15 * __temp_83__); float __temp_85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_86__ = (__temp_84__ + 12 * __temp_85__); float __temp_87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_88__ = (__temp_86__ + 5 * __temp_87__); float __temp_89__ = (__temp_88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_89__; } } } __global__ void __kernel___forma_kernel__3__(float * input, int N, int M, float * __copy_arr_0__, float * __copy_arr_1__, float * __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float * __tilevar_2__ = __tilevar_0__; float * __tilevar_3__ = __tilevar_1__; float * __tilevar_4__ = __tilevar_0__; float * __tilevar_5__ = __tilevar_1__; int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX) + (int)(FORMA_BLOCKDIM_X); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY) + (int)(FORMA_BLOCKDIM_Y); int __iter_2__ = FORMA_MAX(__iter_1__-2,0) + (int)(threadIdx.y) ; if( __iter_2__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-1)) ){ int __iter_3__ = FORMA_MAX(__iter_0__-2,0) + (int)(threadIdx.x) ; if( __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1)) ){ __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)]; } } __syncthreads(); int __iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ; if( __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) ){ int __iter_5__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){ float __temp_2__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__); float __temp_9__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_10__ = (__temp_6__ + 15 * __temp_9__); float __temp_13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_14__ = (__temp_10__ + 12 * __temp_13__); float __temp_17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*( __iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_18__ = (__temp_14__ + 5 * __temp_17__); float __temp_19__ = (__temp_18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))] = __temp_19__; } } int __iter_6__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ; if( __iter_6__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){ int __iter_7__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ if (__iter_6__ < FORMA_MAX((__iter_1__-1),1) || __iter_6__ > FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) || __iter_7__ < FORMA_MAX((__iter_0__-1),1) || __iter_7__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2))) { __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } __syncthreads(); int __iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ; if( __iter_10__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) ){ int __iter_11__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){ float __temp_32__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__); float __temp_39__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_40__ = (__temp_36__ + 15 * __temp_39__); float __temp_43__ = (__tilevar_3__[__iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_44__ = (__temp_40__ + 12 * __temp_43__); float __temp_47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_48__ = (__temp_44__ + 5 * __temp_47__); float __temp_49__ = (__temp_48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))] = __temp_49__; } } int __iter_12__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ; if( __iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){ int __iter_13__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){ if (__iter_12__ < FORMA_MAX((__iter_1__-2),1) || __iter_12__ > FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) || __iter_13__ < FORMA_MAX((__iter_0__-2),1) || __iter_13__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2))) { __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } __syncthreads(); int __iter_16__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ; if( __iter_16__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){ int __iter_17__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ float __temp_60__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__); float __temp_63__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_64__ = (__temp_62__ + 15 * __temp_63__); float __temp_65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_66__ = (__temp_64__ + 12 * __temp_65__); float __temp_67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_68__ = (__temp_66__ + 5 * __temp_67__); float __temp_69__ = (__temp_68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))] = __temp_69__; } } int __iter_18__ = FORMA_MAX((__iter_1__-5),1) + (int)(threadIdx.y) ; if( __iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(N-2)) ){ int __iter_19__ = FORMA_MAX((__iter_0__-5),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(M-2)) ){ if (__iter_18__ < FORMA_MAX((__iter_1__-3),1) || __iter_18__ > FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) || __iter_19__ < FORMA_MAX((__iter_0__-3),1) || __iter_19__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2))) { __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } __syncthreads(); int __iter_22__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ; if( __iter_22__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){ int __iter_23__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ; if( __iter_23__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){ float __temp_80__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_82__ = (5 * __temp_80__ + 12 * __temp_81__); float __temp_83__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_84__ = (__temp_82__ + 15 * __temp_83__); float __temp_85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_86__ = (__temp_84__ + 12 * __temp_85__); float __temp_87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_88__ = (__temp_86__ + 5 * __temp_87__); float __temp_89__ = (__temp_88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_89__; } } } /*Device code End */ /* Host Code Begin */ extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){ /* Host allocation Begin */ float * input; cudaMalloc(&input,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : input\n"); cudaPointerAttributes ptrAttrib_h_input; cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice; if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess) if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice) memcpy_kind_h_input = cudaMemcpyDeviceToDevice; cudaGetLastError(); if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){ cudaMemcpy(input,h_input,sizeof(float)*((N-0)*(M-0)), memcpy_kind_h_input); } float * __var_1__; cudaMalloc(&__var_1__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); float * __copy_arr_0__; cudaMalloc(&__copy_arr_0__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n"); float * __copy_arr_1__; cudaMalloc(&__copy_arr_1__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n"); float * __copy_arr_2__; cudaMalloc(&__copy_arr_2__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ cudaEvent_t _forma_timer_start_,_forma_timer_stop_; cudaEventCreate(&_forma_timer_start_); cudaEventCreate(&_forma_timer_stop_); cudaEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1; int __size_1___kernel___forma_kernel__0__ = ((N-1) - 0 ) + 1; int __max_occupancy_blocksize___kernel___forma_kernel__0__; int _max_occupancy_gridsize___kernel___forma_kernel__0__; cudaOccupancyMaxPotentialBlockSize(&_max_occupancy_gridsize___kernel___forma_kernel__0__,&__max_occupancy_blocksize___kernel___forma_kernel__0__,(const void*)__kernel___forma_kernel__0__,0,0); int __max_occupancy_blocksize___kernel___forma_kernel__0___0 = pow((double)__max_occupancy_blocksize___kernel___forma_kernel__0__, (double)(1.0/(double)2)); __max_occupancy_blocksize___kernel___forma_kernel__0___0 = FORMA_MAX(__max_occupancy_blocksize___kernel___forma_kernel__0___0/32, 1)*32; int __block_0___kernel___forma_kernel__0__ = FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel___forma_kernel__0___0,FORMA_MAX((__size_0___kernel___forma_kernel__0__)/32,1)*32),FORMA_MAX_BLOCKDIM_0),9); __max_occupancy_blocksize___kernel___forma_kernel__0__ /= __block_0___kernel___forma_kernel__0__; int __max_occupancy_blocksize___kernel___forma_kernel__0___1 = __max_occupancy_blocksize___kernel___forma_kernel__0__; int __block_1___kernel___forma_kernel__0__ = FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel___forma_kernel__0___1,__size_1___kernel___forma_kernel__0__),FORMA_MAX_BLOCKDIM_1),9); __max_occupancy_blocksize___kernel___forma_kernel__0__ /= __block_1___kernel___forma_kernel__0__; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); while( __SMemSize___kernel___forma_kernel__0__ > __FORMA_MAX_SHARED_MEM__){ if( __blockConfig___kernel___forma_kernel__0__.y/2 > 9) __blockConfig___kernel___forma_kernel__0__.y /= 2; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); if( __SMemSize___kernel___forma_kernel__0__ <= __FORMA_MAX_SHARED_MEM__) break; if( __blockConfig___kernel___forma_kernel__0__.x/2 > FORMA_MIN(32,9)) __blockConfig___kernel___forma_kernel__0__.x /= 2; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); } int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x+GAPX); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y+GAPY); dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); dim3 __blockConfig___kernel___forma_kernel__1__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y); __kernel___forma_kernel__1__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__1__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__1__\n"); dim3 __blockConfig___kernel___forma_kernel__2__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y); __kernel___forma_kernel__2__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__2__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__2__\n"); dim3 __blockConfig___kernel___forma_kernel__3__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y); __kernel___forma_kernel__3__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__3__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__3__\n"); cudaPointerAttributes ptrAttrib___var_0__; cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost; if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess) if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice) memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice; cudaGetLastError(); cudaMemcpy(__var_0__,__var_1__, sizeof(float)*((N-0)*(M-0)), memcpy_kind___var_0__); #ifdef _TIMER_ cudaEventRecord(_forma_timer_stop_,0); cudaEventSynchronize(_forma_timer_stop_); float elapsedTime; cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); cudaEventDestroy(_forma_timer_start_); cudaEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ cudaFree(input); cudaFree(__var_1__); cudaFree(__copy_arr_0__); cudaFree(__copy_arr_1__); cudaFree(__copy_arr_2__); } /*Host Free End*/
079c5b18383ee063b0b6fea02c08718644f6f264.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/functional.hpp" #include "opencv2/core/cuda/transform.hpp" #include "opencv2/core/cuda/saturate_cast.hpp" #include "opencv2/core/cuda/simd_functions.hpp" #include "arithm_func_traits.hpp" using namespace cv::cuda; using namespace cv::cuda::device; namespace arithm { template <typename T, typename S> struct AbsDiffScalar : unary_function<T, T> { S val; __host__ explicit AbsDiffScalar(S val_) : val(val_) {} __device__ __forceinline__ T operator ()(T a) const { abs_func<S> f; return saturate_cast<T>(f(a - val)); } }; } namespace cv { namespace cuda { namespace device { template <typename T, typename S> struct TransformFunctorTraits< arithm::AbsDiffScalar<T, S> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)> { }; }}} namespace arithm { template <typename T, typename S> void absDiffScalar(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream) { AbsDiffScalar<T, S> op(static_cast<S>(val)); device::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, op, WithOutMask(), stream); } template void absDiffScalar<uchar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream); template void absDiffScalar<schar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream); template void absDiffScalar<ushort, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream); template void absDiffScalar<short, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream); template void absDiffScalar<int, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream); template void absDiffScalar<float, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream); template void absDiffScalar<double, double>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream); } #endif // CUDA_DISABLER
079c5b18383ee063b0b6fea02c08718644f6f264.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/functional.hpp" #include "opencv2/core/cuda/transform.hpp" #include "opencv2/core/cuda/saturate_cast.hpp" #include "opencv2/core/cuda/simd_functions.hpp" #include "arithm_func_traits.hpp" using namespace cv::cuda; using namespace cv::cuda::device; namespace arithm { template <typename T, typename S> struct AbsDiffScalar : unary_function<T, T> { S val; __host__ explicit AbsDiffScalar(S val_) : val(val_) {} __device__ __forceinline__ T operator ()(T a) const { abs_func<S> f; return saturate_cast<T>(f(a - val)); } }; } namespace cv { namespace cuda { namespace device { template <typename T, typename S> struct TransformFunctorTraits< arithm::AbsDiffScalar<T, S> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)> { }; }}} namespace arithm { template <typename T, typename S> void absDiffScalar(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream) { AbsDiffScalar<T, S> op(static_cast<S>(val)); device::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, op, WithOutMask(), stream); } template void absDiffScalar<uchar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream); template void absDiffScalar<schar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream); template void absDiffScalar<ushort, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream); template void absDiffScalar<short, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream); template void absDiffScalar<int, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream); template void absDiffScalar<float, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream); template void absDiffScalar<double, double>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream); } #endif // CUDA_DISABLER
d7469950e1ebed38b628cff28712bded06718bdc.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <cstdint> #include <cstdlib> #include "SyncedMemory.h" #include "lab2.h" using namespace std; #define CHECK {\ auto e = hipDeviceSynchronize();\ if (e != hipSuccess) {\ printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\ abort();\ }\ } int main(int argc, char **argv) { Lab2VideoGenerator g; Lab2VideoInfo i; g.get_info(i); if (i.w == 0 || i.h == 0 || i.n_frame == 0 || i.fps_n == 0 || i.fps_d == 0) { puts("Cannot be zero"); abort(); } else if (i.w % 2 != 0 || i.h % 2 != 0) { puts("Only even frame size is supported"); abort(); } unsigned FRAME_SIZE = i.w*i.h*3/2; MemoryBuffer<uint8_t> frameb(FRAME_SIZE); auto frames = frameb.CreateSync(FRAME_SIZE); FILE *fp = fopen("result.y4m", "wb"); fprintf(fp, "YUV4MPEG2 W%d H%d F%d:%d Ip A1:1 C420\n", i.w, i.h, i.fps_n, i.fps_d); for (unsigned j = 0; j < i.n_frame; ++j) { fputs("FRAME\n", fp); g.Generate(frames.get_gpu_wo()); fwrite(frames.get_cpu_ro(), sizeof(uint8_t), FRAME_SIZE, fp); } fclose(fp); //system("pause"); return 0; }
d7469950e1ebed38b628cff28712bded06718bdc.cu
#include <cstdio> #include <cstdint> #include <cstdlib> #include "SyncedMemory.h" #include "lab2.h" using namespace std; #define CHECK {\ auto e = cudaDeviceSynchronize();\ if (e != cudaSuccess) {\ printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\ abort();\ }\ } int main(int argc, char **argv) { Lab2VideoGenerator g; Lab2VideoInfo i; g.get_info(i); if (i.w == 0 || i.h == 0 || i.n_frame == 0 || i.fps_n == 0 || i.fps_d == 0) { puts("Cannot be zero"); abort(); } else if (i.w % 2 != 0 || i.h % 2 != 0) { puts("Only even frame size is supported"); abort(); } unsigned FRAME_SIZE = i.w*i.h*3/2; MemoryBuffer<uint8_t> frameb(FRAME_SIZE); auto frames = frameb.CreateSync(FRAME_SIZE); FILE *fp = fopen("result.y4m", "wb"); fprintf(fp, "YUV4MPEG2 W%d H%d F%d:%d Ip A1:1 C420\n", i.w, i.h, i.fps_n, i.fps_d); for (unsigned j = 0; j < i.n_frame; ++j) { fputs("FRAME\n", fp); g.Generate(frames.get_gpu_wo()); fwrite(frames.get_cpu_ro(), sizeof(uint8_t), FRAME_SIZE, fp); } fclose(fp); //system("pause"); return 0; }
c7ba98f33fa2e98d10b773243771fc8b7bb2cea5.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <raft/linalg/transpose.h> #include <test_utils.h> #include <cuml/datasets/make_blobs.hpp> #include <cuml/ensemble/randomforest.hpp> #include <raft/cuda_utils.cuh> namespace ML { using namespace MLCommon; struct RfInputs { int n_rows; int n_cols; int n_trees; float max_features; float max_samples; int max_depth; int max_leaves; bool bootstrap; bool bootstrap_features; int n_bins; int split_algo; int min_samples_leaf; int min_samples_split; float min_impurity_decrease; int n_streams; CRITERION split_criterion; float min_expected_acc; }; template <typename T> class RFBatchedClsTest : public ::testing::TestWithParam<RfInputs> { protected: void basicTest() { params = ::testing::TestWithParam<RfInputs>::GetParam(); DecisionTree::DecisionTreeParams tree_params; set_tree_params(tree_params, params.max_depth, params.max_leaves, params.max_features, params.n_bins, params.split_algo, params.min_samples_leaf, params.min_samples_split, params.min_impurity_decrease, params.bootstrap_features, params.split_criterion, false, true); RF_params rf_params; set_all_rf_params(rf_params, params.n_trees, params.bootstrap, params.max_samples, 0, params.n_streams, tree_params); CUDA_CHECK(hipStreamCreate(&stream)); handle.reset(new raft::handle_t(rf_params.n_streams)); handle->set_stream(stream); auto allocator = handle->get_device_allocator(); int data_len = params.n_rows * params.n_cols; data = (T*)allocator->allocate(data_len * sizeof(T), stream); labels = (int*)allocator->allocate(params.n_rows * sizeof(int), stream); predicted_labels = (int*)allocator->allocate(params.n_rows * sizeof(int), stream); Datasets::make_blobs(*handle, data, labels, params.n_rows, params.n_cols, 5, false, nullptr, nullptr, T(0.1), false, T(-0.5), T(0.5), 3536699ULL); labels_h.resize(params.n_rows); raft::update_host(labels_h.data(), labels, params.n_rows, stream); preprocess_labels(params.n_rows, labels_h, labels_map); raft::update_device(labels, labels_h.data(), params.n_rows, stream); // Training part forest = new typename ML::RandomForestMetaData<T, int>; null_trees_ptr(forest); fit(*handle, forest, data, params.n_rows, params.n_cols, labels, labels_map.size(), rf_params); // predict function expects row major lay out of data, so we need to // transpose the data first T* data_row_major; data_row_major = (T*)allocator->allocate(data_len * sizeof(T), stream); hipblasHandle_t cublas_h = handle->get_cublas_handle(); raft::linalg::transpose(*handle, data, data_row_major, params.n_rows, params.n_cols, stream); predict(*handle, forest, data_row_major, params.n_rows, params.n_cols, predicted_labels); raft::update_host(labels_h.data(), predicted_labels, params.n_rows, stream); RF_metrics tmp = score(*handle, forest, labels, params.n_rows, predicted_labels); CUDA_CHECK(hipStreamSynchronize(stream)); CUDA_CHECK(hipStreamDestroy(stream)); accuracy = tmp.accuracy; allocator->deallocate(data_row_major, data_len * sizeof(T), stream); } void SetUp() override { basicTest(); } void TearDown() override { auto allocator = handle->get_device_allocator(); accuracy = -1.0f; postprocess_labels(params.n_rows, labels_h, labels_map); labels_h.clear(); labels_map.clear(); allocator->deallocate(labels, params.n_rows * sizeof(int), stream); allocator->deallocate(predicted_labels, params.n_rows * sizeof(int), stream); allocator->deallocate(data, params.n_rows * params.n_cols * sizeof(T), stream); delete forest; handle.reset(); } protected: std::shared_ptr<raft::handle_t> handle; hipStream_t stream; RfInputs params; T* data; int* labels; std::vector<int> labels_h; std::map<int, int> labels_map; //unique map of labels to int vals starting from 0 RandomForestMetaData<T, int>* forest; float accuracy = -1.0f; // overriden in each test SetUp and TearDown int* predicted_labels; }; //------------------------------------------------------------------------------------------------------------------------------------- const std::vector<RfInputs> inputsf2_clf = { // Simple non-crash tests with small datasets {100, 59, 1, 1.0f, 0.4f, 16, -1, true, false, 10, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::GINI, 0.0f}, {101, 59, 2, 1.0f, 0.4f, 10, -1, true, false, 13, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::GINI, 0.0f}, {100, 1, 2, 1.0f, 0.4f, 10, -1, true, false, 15, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::GINI, 0.0f}, // Simple accuracy tests {20000, 10, 25, 1.0f, 0.4f, 16, -1, true, false, 10, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::GINI}, {20000, 10, 5, 1.0f, 0.4f, 14, -1, true, false, 10, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::ENTROPY}}; typedef RFBatchedClsTest<float> RFBatchedClsTestF; TEST_P(RFBatchedClsTestF, Fit) { ASSERT_TRUE(accuracy >= params.min_expected_acc); } INSTANTIATE_TEST_CASE_P(RFBatchedClsTests, RFBatchedClsTestF, ::testing::ValuesIn(inputsf2_clf)); typedef RFBatchedClsTest<double> RFBatchedClsTestD; TEST_P(RFBatchedClsTestD, Fit) { ASSERT_TRUE(accuracy >= params.min_expected_acc); } INSTANTIATE_TEST_CASE_P(RFBatchedClsTests, RFBatchedClsTestD, ::testing::ValuesIn(inputsf2_clf)); } // end namespace ML
c7ba98f33fa2e98d10b773243771fc8b7bb2cea5.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <raft/linalg/transpose.h> #include <test_utils.h> #include <cuml/datasets/make_blobs.hpp> #include <cuml/ensemble/randomforest.hpp> #include <raft/cuda_utils.cuh> namespace ML { using namespace MLCommon; struct RfInputs { int n_rows; int n_cols; int n_trees; float max_features; float max_samples; int max_depth; int max_leaves; bool bootstrap; bool bootstrap_features; int n_bins; int split_algo; int min_samples_leaf; int min_samples_split; float min_impurity_decrease; int n_streams; CRITERION split_criterion; float min_expected_acc; }; template <typename T> class RFBatchedClsTest : public ::testing::TestWithParam<RfInputs> { protected: void basicTest() { params = ::testing::TestWithParam<RfInputs>::GetParam(); DecisionTree::DecisionTreeParams tree_params; set_tree_params(tree_params, params.max_depth, params.max_leaves, params.max_features, params.n_bins, params.split_algo, params.min_samples_leaf, params.min_samples_split, params.min_impurity_decrease, params.bootstrap_features, params.split_criterion, false, true); RF_params rf_params; set_all_rf_params(rf_params, params.n_trees, params.bootstrap, params.max_samples, 0, params.n_streams, tree_params); CUDA_CHECK(cudaStreamCreate(&stream)); handle.reset(new raft::handle_t(rf_params.n_streams)); handle->set_stream(stream); auto allocator = handle->get_device_allocator(); int data_len = params.n_rows * params.n_cols; data = (T*)allocator->allocate(data_len * sizeof(T), stream); labels = (int*)allocator->allocate(params.n_rows * sizeof(int), stream); predicted_labels = (int*)allocator->allocate(params.n_rows * sizeof(int), stream); Datasets::make_blobs(*handle, data, labels, params.n_rows, params.n_cols, 5, false, nullptr, nullptr, T(0.1), false, T(-0.5), T(0.5), 3536699ULL); labels_h.resize(params.n_rows); raft::update_host(labels_h.data(), labels, params.n_rows, stream); preprocess_labels(params.n_rows, labels_h, labels_map); raft::update_device(labels, labels_h.data(), params.n_rows, stream); // Training part forest = new typename ML::RandomForestMetaData<T, int>; null_trees_ptr(forest); fit(*handle, forest, data, params.n_rows, params.n_cols, labels, labels_map.size(), rf_params); // predict function expects row major lay out of data, so we need to // transpose the data first T* data_row_major; data_row_major = (T*)allocator->allocate(data_len * sizeof(T), stream); cublasHandle_t cublas_h = handle->get_cublas_handle(); raft::linalg::transpose(*handle, data, data_row_major, params.n_rows, params.n_cols, stream); predict(*handle, forest, data_row_major, params.n_rows, params.n_cols, predicted_labels); raft::update_host(labels_h.data(), predicted_labels, params.n_rows, stream); RF_metrics tmp = score(*handle, forest, labels, params.n_rows, predicted_labels); CUDA_CHECK(cudaStreamSynchronize(stream)); CUDA_CHECK(cudaStreamDestroy(stream)); accuracy = tmp.accuracy; allocator->deallocate(data_row_major, data_len * sizeof(T), stream); } void SetUp() override { basicTest(); } void TearDown() override { auto allocator = handle->get_device_allocator(); accuracy = -1.0f; postprocess_labels(params.n_rows, labels_h, labels_map); labels_h.clear(); labels_map.clear(); allocator->deallocate(labels, params.n_rows * sizeof(int), stream); allocator->deallocate(predicted_labels, params.n_rows * sizeof(int), stream); allocator->deallocate(data, params.n_rows * params.n_cols * sizeof(T), stream); delete forest; handle.reset(); } protected: std::shared_ptr<raft::handle_t> handle; cudaStream_t stream; RfInputs params; T* data; int* labels; std::vector<int> labels_h; std::map<int, int> labels_map; //unique map of labels to int vals starting from 0 RandomForestMetaData<T, int>* forest; float accuracy = -1.0f; // overriden in each test SetUp and TearDown int* predicted_labels; }; //------------------------------------------------------------------------------------------------------------------------------------- const std::vector<RfInputs> inputsf2_clf = { // Simple non-crash tests with small datasets {100, 59, 1, 1.0f, 0.4f, 16, -1, true, false, 10, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::GINI, 0.0f}, {101, 59, 2, 1.0f, 0.4f, 10, -1, true, false, 13, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::GINI, 0.0f}, {100, 1, 2, 1.0f, 0.4f, 10, -1, true, false, 15, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::GINI, 0.0f}, // Simple accuracy tests {20000, 10, 25, 1.0f, 0.4f, 16, -1, true, false, 10, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::GINI}, {20000, 10, 5, 1.0f, 0.4f, 14, -1, true, false, 10, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::ENTROPY}}; typedef RFBatchedClsTest<float> RFBatchedClsTestF; TEST_P(RFBatchedClsTestF, Fit) { ASSERT_TRUE(accuracy >= params.min_expected_acc); } INSTANTIATE_TEST_CASE_P(RFBatchedClsTests, RFBatchedClsTestF, ::testing::ValuesIn(inputsf2_clf)); typedef RFBatchedClsTest<double> RFBatchedClsTestD; TEST_P(RFBatchedClsTestD, Fit) { ASSERT_TRUE(accuracy >= params.min_expected_acc); } INSTANTIATE_TEST_CASE_P(RFBatchedClsTests, RFBatchedClsTestD, ::testing::ValuesIn(inputsf2_clf)); } // end namespace ML
0a68bd483beab107a06d328e5084de5061246e42.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <global_thread_handle.h> __global__ void profileLevelUp_kernel() {} __global__ void profileLevelDown_kernel() {} __global__ void profileLevelZero_kernel() {} __global__ void profilePhaseSetup_kernel() {} __global__ void profilePhaseSolve_kernel() {} __global__ void profilePhaseNone_kernel() {} __global__ void profileSubphaseMatrixColoring_kernel() {} __global__ void profileSubphaseSmootherSetup_kernel() {} __global__ void profileSubphaseFindAggregates_kernel() {} __global__ void profileSubphaseComputeRestriction_kernel() {} __global__ void profileSubphaseComputeCoarseA_kernel() {} __global__ void profileSubphaseNone_kernel() {} __global__ void profileSubphaseTruncateP_kernel() {} void profileLevelUp() { hipLaunchKernelGGL(( profileLevelUp_kernel) , dim3(1), dim3(1), 0, thrust::global_thread_handle::get_stream(), ); } void profileLevelDown() { hipLaunchKernelGGL(( profileLevelDown_kernel) , dim3(1), dim3(1), 0, thrust::global_thread_handle::get_stream(), ); } void profileLevelZero() { hipLaunchKernelGGL(( profileLevelZero_kernel) , dim3(1), dim3(1), 0, thrust::global_thread_handle::get_stream(), ); } void profilePhaseSetup() { hipLaunchKernelGGL(( profilePhaseSetup_kernel) , dim3(1), dim3(1), 0, thrust::global_thread_handle::get_stream(), ); } void profilePhaseSolve() { hipLaunchKernelGGL(( profilePhaseSolve_kernel) , dim3(1), dim3(1), 0, thrust::global_thread_handle::get_stream(), ); } void profilePhaseNone() { hipLaunchKernelGGL(( profilePhaseNone_kernel) , dim3(1), dim3(1), 0, thrust::global_thread_handle::get_stream(), ); } void profileSubphaseMatrixColoring() { hipLaunchKernelGGL(( profileSubphaseMatrixColoring_kernel) , dim3(1), dim3(1), 0, thrust::global_thread_handle::get_stream(), ); } void profileSubphaseSmootherSetup() { hipLaunchKernelGGL(( profileSubphaseSmootherSetup_kernel) , dim3(1), dim3(1), 0, thrust::global_thread_handle::get_stream(), ); } void profileSubphaseFindAggregates() { hipLaunchKernelGGL(( profileSubphaseFindAggregates_kernel) , dim3(1), dim3(1), 0, thrust::global_thread_handle::get_stream(), ); } void profileSubphaseComputeRestriction() { hipLaunchKernelGGL(( profileSubphaseComputeRestriction_kernel) , dim3(1), dim3(1), 0, thrust::global_thread_handle::get_stream(), ); } void profileSubphaseComputeCoarseA() { hipLaunchKernelGGL(( profileSubphaseComputeCoarseA_kernel) , dim3(1), dim3(1), 0, thrust::global_thread_handle::get_stream(), ); } void profileSubphaseNone() { hipLaunchKernelGGL(( profileSubphaseNone_kernel) , dim3(1), dim3(1), 0, thrust::global_thread_handle::get_stream(), ); } void profileSubphaseTruncateP() { hipLaunchKernelGGL(( profileSubphaseTruncateP_kernel) , dim3(1), dim3(1), 0, thrust::global_thread_handle::get_stream(), ); }
0a68bd483beab107a06d328e5084de5061246e42.cu
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <global_thread_handle.h> __global__ void profileLevelUp_kernel() {} __global__ void profileLevelDown_kernel() {} __global__ void profileLevelZero_kernel() {} __global__ void profilePhaseSetup_kernel() {} __global__ void profilePhaseSolve_kernel() {} __global__ void profilePhaseNone_kernel() {} __global__ void profileSubphaseMatrixColoring_kernel() {} __global__ void profileSubphaseSmootherSetup_kernel() {} __global__ void profileSubphaseFindAggregates_kernel() {} __global__ void profileSubphaseComputeRestriction_kernel() {} __global__ void profileSubphaseComputeCoarseA_kernel() {} __global__ void profileSubphaseNone_kernel() {} __global__ void profileSubphaseTruncateP_kernel() {} void profileLevelUp() { profileLevelUp_kernel <<< 1, 1, 0, thrust::global_thread_handle::get_stream()>>>(); } void profileLevelDown() { profileLevelDown_kernel <<< 1, 1, 0, thrust::global_thread_handle::get_stream()>>>(); } void profileLevelZero() { profileLevelZero_kernel <<< 1, 1, 0, thrust::global_thread_handle::get_stream()>>>(); } void profilePhaseSetup() { profilePhaseSetup_kernel <<< 1, 1, 0, thrust::global_thread_handle::get_stream()>>>(); } void profilePhaseSolve() { profilePhaseSolve_kernel <<< 1, 1, 0, thrust::global_thread_handle::get_stream()>>>(); } void profilePhaseNone() { profilePhaseNone_kernel <<< 1, 1, 0, thrust::global_thread_handle::get_stream()>>>(); } void profileSubphaseMatrixColoring() { profileSubphaseMatrixColoring_kernel <<< 1, 1, 0, thrust::global_thread_handle::get_stream()>>>(); } void profileSubphaseSmootherSetup() { profileSubphaseSmootherSetup_kernel <<< 1, 1, 0, thrust::global_thread_handle::get_stream()>>>(); } void profileSubphaseFindAggregates() { profileSubphaseFindAggregates_kernel <<< 1, 1, 0, thrust::global_thread_handle::get_stream()>>>(); } void profileSubphaseComputeRestriction() { profileSubphaseComputeRestriction_kernel <<< 1, 1, 0, thrust::global_thread_handle::get_stream()>>>(); } void profileSubphaseComputeCoarseA() { profileSubphaseComputeCoarseA_kernel <<< 1, 1, 0, thrust::global_thread_handle::get_stream()>>>(); } void profileSubphaseNone() { profileSubphaseNone_kernel <<< 1, 1, 0, thrust::global_thread_handle::get_stream()>>>(); } void profileSubphaseTruncateP() { profileSubphaseTruncateP_kernel <<< 1, 1, 0, thrust::global_thread_handle::get_stream()>>>(); }
63282317eada3600e846074a02a5df9440a4de5b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Fermat * * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cmlt.h> #include <renderer.h> #include <optix_prime/optix_primepp.h> #include <mesh/MeshStorage.h> #include <cugar/basic/timer.h> #include <cugar/basic/cuda/timer.h> #include <cugar/basic/primitives.h> #include <cugar/sampling/random.h> #include <cugar/sampling/distributions.h> #include <bsdf.h> #include <edf.h> #include <bpt_context.h> #include <bpt_control.h> #include <bpt_samplers.h> #include <random_sequence.h> #include <path_inversion.h> #include <ray_queues.h> #include <vector> #define SHIFT_RES 256u #define DEBUG_PIXEL (714 + 66*1600) namespace { ///@addtogroup Fermat ///@{ ///@addtogroup CMLTModule ///@{ FERMAT_HOST_DEVICE FERMAT_FORCEINLINE float bounded_access(const float* vec, const uint32 i) { return (i == uint32(-1)) ? 0.0f : vec[i]; } FERMAT_HOST_DEVICE uint32 chain_coordinate_index(const uint32 n_chains, const uint32 idx, const uint32 dim) { return dim*n_chains + idx; } struct StridedRandoms { FERMAT_HOST_DEVICE StridedRandoms(const float* _u, const uint32 _stride) : u(_u), stride(_stride) {} FERMAT_HOST_DEVICE float next() { const float r = *u; u += stride; return r; } const float* u; uint32 stride; }; ///@} CMLT ///@} Fermat } // anonymous namespace ///@addtogroup Fermat ///@{ ///@addtogroup CMLTModule ///@{ /// The CUDA context class for the CMLT renderer /// struct CMLTContext : BPTContextBase { CMLTContext( CMLT& _cmlt, const RendererView& _renderer) : BPTContextBase( _renderer, _cmlt.m_light_vertices.view(), _cmlt.m_queues.view(_cmlt.m_n_init_paths, _cmlt.m_n_init_light_paths)), options (_cmlt.m_options), sequence (_cmlt.m_sequence.view()), connections_value (_cmlt.m_connections_value.ptr()), connections_index (_cmlt.m_connections_index.ptr()), connections_counter (_cmlt.m_connections_counter.ptr()), st_norms (_cmlt.m_st_norms.ptr()), st_norms_cdf (_cmlt.m_st_norms_cdf.ptr()), seeds (_cmlt.m_seeds.ptr()), st (reinterpret_cast<char4*>(_cmlt.m_seeds.ptr())), // NOTE: aliased to seeds! mut_u (_cmlt.m_mut_u.ptr()), light_u (_cmlt.m_light_u.ptr()), eye_u (_cmlt.m_eye_u.ptr()), vertices_l (_cmlt.m_vertices.ptr()), vertices_e (vertices_l + _cmlt.m_options.n_chains * (_cmlt.m_options.max_path_length + 1)), mut_vertices_l (vertices_e + _cmlt.m_options.n_chains * (_cmlt.m_options.max_path_length + 1)), mut_vertices_e (mut_vertices_l + _cmlt.m_options.n_chains * (_cmlt.m_options.max_path_length + 1)), path_value (_cmlt.m_path_value.ptr()), path_pdf (_cmlt.m_path_pdf.ptr()), new_path_value (_cmlt.m_path_value.ptr() + _cmlt.m_options.n_chains), new_path_st (reinterpret_cast<char2*>(_cmlt.m_path_pdf.ptr() + _cmlt.m_options.n_chains)), rejections (_cmlt.m_rejections.ptr()), n_chains (_cmlt.m_options.n_chains), mutation_type (PerturbedPrimaryCoords::CauchyPerturbation), enable_accumulation (true) {} CMLTOptions options; TiledSequenceView sequence; float4* connections_value; uint4* connections_index; uint32* connections_counter; float* st_norms; float* st_norms_cdf; uint32* seeds; char4* st; float* mut_u; float* light_u; float* eye_u; VertexGeometryId* vertices_l; VertexGeometryId* vertices_e; VertexGeometryId* mut_vertices_l; VertexGeometryId* mut_vertices_e; float4* new_path_value; char2* new_path_st; float4* path_value; float* path_pdf; uint32* rejections; uint32 n_chains; float pdf_norm; PerturbedPrimaryCoords::Type mutation_type; bool enable_accumulation; FERMAT_HOST_DEVICE FERMAT_FORCEINLINE float u_ar(const uint32 chain_id) const { return mut_u ? mut_u[chain_coordinate_index(n_chains, chain_id, (options.max_path_length + 1) * 3 * 2)] : 0.5f; } FERMAT_HOST_DEVICE FERMAT_FORCEINLINE float& u_L(const uint32 chain_id, const uint32 dim) { return light_u[chain_coordinate_index(n_chains, chain_id, dim)]; } FERMAT_HOST_DEVICE FERMAT_FORCEINLINE float& u_E(const uint32 chain_id, const uint32 dim) { return eye_u[chain_coordinate_index(n_chains, chain_id, dim)]; } FERMAT_HOST_DEVICE FERMAT_FORCEINLINE PerturbedPrimaryCoords light_primary_coords() const { return PerturbedPrimaryCoords( n_chains, light_u, 0u, mut_u, 0u, options.light_perturbations ? mutation_type : PerturbedPrimaryCoords::Null, options.perturbation_radius); } FERMAT_HOST_DEVICE FERMAT_FORCEINLINE PerturbedPrimaryCoords eye_primary_coords() const { return PerturbedPrimaryCoords( n_chains, eye_u, 0u, mut_u, (options.max_path_length + 1), options.eye_perturbations ? mutation_type : PerturbedPrimaryCoords::Null, options.perturbation_radius); } }; ///@} CMLTModule ///@} Fermat namespace { // anonymous namespace ///@addtogroup Fermat ///@{ ///@addtogroup CMLTModule ///@{ struct CMLTPresamplingBPTConfig : BPTConfigBase { FERMAT_HOST_DEVICE FERMAT_FORCEINLINE CMLTPresamplingBPTConfig(CMLTContext& _context) : BPTConfigBase( _context.options, VertexSampling::kAll, _context.options.single_connection ? VertexOrdering::kRandomOrdering : VertexOrdering::kPathOrdering, VertexSampling::kAll, _context.options.rr) {} }; struct CMLTChainBPTConfig : BPTConfigBase { FERMAT_HOST_DEVICE FERMAT_FORCEINLINE CMLTChainBPTConfig(CMLTContext& _context) : BPTConfigBase( _context.options, VertexSampling::kEnd, VertexOrdering::kPathOrdering, VertexSampling::kEnd, _context.options.rr), st(_context.st) {} FERMAT_HOST_DEVICE FERMAT_FORCEINLINE bool terminate_light_subpath(const uint32 path_id, const uint32 s) const { return s == st[path_id].z; } //bool terminate_light_subpath(const uint32 path_id, const uint32 s) const { return (st[path_id].z == 0) || s >= BPTConfigBase::max_path_length + 1; } FERMAT_HOST_DEVICE FERMAT_FORCEINLINE bool terminate_eye_subpath(const uint32 path_id, const uint32 t) const { return t == st[path_id].w; } //bool terminate_eye_subpath(const uint32 path_id, const uint32 t) const { return t + st[path_id].z >= BPTConfigBase::max_path_length + 1; } FERMAT_HOST_DEVICE FERMAT_FORCEINLINE bool store_light_vertex(const uint32 path_id, const uint32 s, const bool absorbed) const { return (s == st[path_id].z); } /*bool store_light_vertex(const uint32 path_id, const uint32 s, const bool absorbed) const { if (absorbed) { // store the new path length st[path_id].z = s; } return absorbed; }*/ FERMAT_HOST_DEVICE FERMAT_FORCEINLINE bool perform_connection(const uint32 path_id, const uint32 t, const bool absorbed) const { return t == st[path_id].w && st[path_id].z > 0; } //bool perform_connection(const uint32 path_id, const uint32 t, const bool absorbed) const { return absorbed == true && st[path_id].z > 0; } FERMAT_HOST_DEVICE FERMAT_FORCEINLINE bool accumulate_emissive(const uint32 path_id, const uint32 t, const bool absorbed) const { return t == st[path_id].w && st[path_id].z == 0; } //bool accumulate_emissive(const uint32 path_id, const uint32 t, const bool absorbed) const { return absorbed == true && st[path_id].z == 0; } // store the compact vertex information FERMAT_HOST_DEVICE FERMAT_FORCEINLINE void visit_light_vertex( const uint32 path_id, const uint32 depth, const VertexGeometryId v, CMLTContext& context, RendererView& renderer) const { context.mut_vertices_l[path_id + depth * context.n_chains] = v; } // store the compact vertex information FERMAT_HOST_DEVICE FERMAT_FORCEINLINE void visit_eye_vertex( const uint32 path_id, const uint32 depth, const VertexGeometryId v_id, const EyeVertex& v, CMLTContext& context, RendererView& renderer) const { context.mut_vertices_e[path_id + depth * context.n_chains] = v_id; } /*const*/ char4* st; }; FERMAT_DEVICE FERMAT_FORCEINLINE void reject_accumulate(const uint32 chain_id, CMLTContext& context, RendererView& renderer) { // TODO: keep track of the old sink! const float old_pdf = context.path_pdf[chain_id]; // perform the MH acceptance-rejection test const cugar::Vector2f old_uv( context.u_E(chain_id, 3 + 0), context.u_E(chain_id, 3 + 1)); const uint32 old_pixel_x = cugar::quantize(old_uv.x, renderer.res_x); const uint32 old_pixel_y = cugar::quantize(old_uv.y, renderer.res_y); const uint32 old_pixel = old_pixel_x + old_pixel_y*renderer.res_x; if (context.enable_accumulation) { if (old_pdf > 0) { const float4 old_value = context.path_value[chain_id]; cugar::atomic_add(&renderer.fb(FBufferDesc::COMPOSITED_C, old_pixel).x, context.pdf_norm * (old_value.x / old_pdf) / float(renderer.instance + 1)); cugar::atomic_add(&renderer.fb(FBufferDesc::COMPOSITED_C, old_pixel).y, context.pdf_norm * (old_value.y / old_pdf) / float(renderer.instance + 1)); cugar::atomic_add(&renderer.fb(FBufferDesc::COMPOSITED_C, old_pixel).z, context.pdf_norm * (old_value.z / old_pdf) / float(renderer.instance + 1)); } } // increase the rejections counter context.rejections[chain_id]++; //if ((context.rejections[chain_id] % 20) == 0) //if (context.rejections[chain_id] > 20 && context.rejections[chain_id] > context.rejections[context.n_chains]) // printf("chain[%u,%u:%u] stuck for %u iterations (val: %f)\n", context.st[chain_id].z, context.st[chain_id].w, chain_id, context.rejections[chain_id], old_pdf); atomicMax( &context.rejections[context.n_chains], context.rejections[chain_id] ); } FERMAT_DEVICE FERMAT_FORCEINLINE void accept_reject_accumulate(const uint32 chain_id, const uint32 s, const uint32 t, const cugar::Vector4f w, CMLTContext& context, RendererView& renderer) { // perform the MH acceptance-rejection test const float new_pdf = cugar::max_comp(w.xyz()); const float old_pdf = context.path_pdf[chain_id]; if (new_pdf == 0.0f) { reject_accumulate(chain_id, context, renderer); return; } PerturbedPrimaryCoords eye_primary_coords = context.eye_primary_coords(); const cugar::Vector2f old_uv( eye_primary_coords.u(chain_id, 3 + 0), eye_primary_coords.u(chain_id, 3 + 1)); const cugar::Vector2f new_uv( eye_primary_coords.perturbed_u(chain_id, 3 + 0), eye_primary_coords.perturbed_u(chain_id, 3 + 1)); const uint32 old_pixel_x = cugar::quantize(old_uv.x, renderer.res_x); const uint32 old_pixel_y = cugar::quantize(old_uv.y, renderer.res_y); const uint32 old_pixel = old_pixel_x + old_pixel_y*renderer.res_x; const uint32 new_pixel_x = cugar::quantize(new_uv.x, renderer.res_x); const uint32 new_pixel_y = cugar::quantize(new_uv.y, renderer.res_y); const uint32 new_pixel = new_pixel_x + new_pixel_y*renderer.res_x; const float ar = old_pdf ? fminf(1.0f, new_pdf / old_pdf) : 1.0f; if (context.enable_accumulation) { if (old_pdf > 0) { const float4 old_value = context.path_value[chain_id]; cugar::atomic_add(&renderer.fb(FBufferDesc::COMPOSITED_C, old_pixel).x, context.pdf_norm * (1.0f - ar) * (old_value.x / old_pdf) / float(renderer.instance + 1)); cugar::atomic_add(&renderer.fb(FBufferDesc::COMPOSITED_C, old_pixel).y, context.pdf_norm * (1.0f - ar) * (old_value.y / old_pdf) / float(renderer.instance + 1)); cugar::atomic_add(&renderer.fb(FBufferDesc::COMPOSITED_C, old_pixel).z, context.pdf_norm * (1.0f - ar) * (old_value.z / old_pdf) / float(renderer.instance + 1)); } if (new_pdf > 0) { const float4 new_value = w; cugar::atomic_add(&renderer.fb(FBufferDesc::COMPOSITED_C, new_pixel).x, context.pdf_norm * ar * (new_value.x / new_pdf) / float(renderer.instance + 1)); cugar::atomic_add(&renderer.fb(FBufferDesc::COMPOSITED_C, new_pixel).y, context.pdf_norm * ar * (new_value.y / new_pdf) / float(renderer.instance + 1)); cugar::atomic_add(&renderer.fb(FBufferDesc::COMPOSITED_C, new_pixel).z, context.pdf_norm * ar * (new_value.z / new_pdf) / float(renderer.instance + 1)); } } // fetch the old st const uint32 s_old = context.st[chain_id].x; const uint32 t_old = context.st[chain_id].y; const float st_change_ratio = 1.0f; // assume it's symmetric const float st_norms_ratio = (context.st_norms[s + t * (context.options.max_path_length + 2)] / context.st_norms[s_old + t_old * (context.options.max_path_length + 2)]) * st_change_ratio; if (context.u_ar(chain_id) * old_pdf < new_pdf * st_norms_ratio) { context.path_value[chain_id] = w; context.path_pdf[chain_id] = new_pdf; // write out the successful st context.st[chain_id] = make_char4(s, t, s, t); PerturbedPrimaryCoords light_primary_coords = context.light_primary_coords(); // copy the successful mutation coordinates for (uint32 i = 0; i < (context.options.max_path_length + 1) * 3; ++i) light_primary_coords.u(chain_id, i) = light_primary_coords.perturbed_u(chain_id, i); // copy the successful mutation coordinates for (uint32 i = 0; i < (context.options.max_path_length + 1) * 3; ++i) eye_primary_coords.u(chain_id, i) = eye_primary_coords.perturbed_u(chain_id, i); // copy the compact vertex information for (uint32 i = 0; i < s; ++i) context.vertices_l[chain_id + i * context.n_chains] = context.mut_vertices_l[chain_id + i * context.n_chains]; for (uint32 i = 0; i < t; ++i) context.vertices_e[chain_id + i * context.n_chains] = context.mut_vertices_e[chain_id + i * context.n_chains]; // reset the rejections counter context.rejections[chain_id] = 0; } else { // increase the rejections counter context.rejections[chain_id]++; //if ((context.rejections[chain_id] % 20) == 0) //if (context.rejections[chain_id] > 20 && context.rejections[chain_id] > context.rejections[context.n_chains]) // printf("chain[%u,%u:%u] stuck for %u iterations (val: %f, %f)\n", s, t, chain_id, context.rejections[chain_id], old_pdf, new_pdf); atomicMax( &context.rejections[context.n_chains], context.rejections[chain_id] ); } } /// /// The \ref SampleSinkAnchor "Sample Sink" used by the BPT presampling/seeding pass /// struct ConnectionsSink : SampleSinkBase { FERMAT_HOST_DEVICE ConnectionsSink() {} FERMAT_HOST_DEVICE void sink( const uint32 channel, const cugar::Vector4f value, const uint32 light_path_id, const uint32 eye_path_id, const uint32 s, const uint32 t, CMLTContext& context, RendererView& renderer) { if (cugar::max_comp(value.xyz()) > 0.0f) { const uint32 slot = cugar::atomic_add(context.connections_counter, 1); context.connections_value[slot] = value; context.connections_index[slot] = make_uint4(light_path_id, eye_path_id, s, t); cugar::atomic_add(context.st_norms + s + t * (context.options.max_path_length + 2), cugar::max_comp(value.xyz())); } } }; /// /// The \ref SampleSinkAnchor "Sample Sink" used the MLT pass /// struct AccumulateRejectSink : SampleSinkBase { FERMAT_HOST_DEVICE AccumulateRejectSink() {} FERMAT_HOST_DEVICE void sink( const uint32 channel, const cugar::Vector4f value, const uint32 light_path_id, const uint32 eye_path_id, const uint32 s, const uint32 t, CMLTContext& context, RendererView& renderer) { context.new_path_value[ eye_path_id ] = value; context.new_path_st[ eye_path_id ] = make_char2(s,t); } }; //------------------------------------------------------------------------------ __global__ void accept_reject_mlt_kernel(CMLTContext context, RendererView renderer) { const uint32 chain_id = threadIdx.x + blockIdx.x * blockDim.x; if (chain_id < context.n_chains) accept_reject_accumulate(chain_id, context.new_path_st[chain_id].x, context.new_path_st[chain_id].y, context.new_path_value[chain_id], context, renderer); } void accept_reject_mlt(CMLTContext context, RendererView renderer) { const uint32 blockSize(128); const dim3 gridSize(cugar::divide_ri(context.n_chains, blockSize)); accept_reject_mlt_kernel << < gridSize, blockSize >> > (context, renderer); } //------------------------------------------------------------------------------ /// implement plain MMLT swaps /// __global__ void mmlt_swap_kernel(CMLTContext context, RendererView renderer) { const uint32 chain_id = threadIdx.x + blockIdx.x * blockDim.x; if (chain_id < context.n_chains) { const uint32 s = context.st[chain_id].x; const uint32 t = context.st[chain_id].y; // setup the random stream for this chain StridedRandoms random(context.mut_u + chain_id, context.n_chains); // generate a candidate (s_new,t_new) pair with s_new + t_new = s + t FERMAT_ASSERT(s + t > 1); #if 0 // select a technique at random const uint32 s_new = cugar::quantize(random.next(), s + t - 1); const float st_change_ratio = 1.0f; #elif 1 // perform a random walk on s const uint32 s_new = random.next() > 0.5f ? (t > 2 ? s + 1 : 0) : (s > 0 ? s - 1 : s + t - 2); const float st_change_ratio = 1.0f; #else const float one = cugar::binary_cast<float>(FERMAT_ALMOST_ONE_AS_INT); // propose a technique with the same path length k = s + t - 1 according to their CDF const uint32 k = s + t - 1; const float* st_norms_cdf = context.st_norms_cdf + k * (context.options.max_path_length + 2); const uint32 s_new = cugar::upper_bound_index( fminf( random.next(), one ), st_norms_cdf, k + 2); const float st_change_ratio = (bounded_access(st_norms_cdf, s) - bounded_access(st_norms_cdf, s - 1)) / (bounded_access(st_norms_cdf, s_new) - bounded_access(st_norms_cdf, s_new - 1)); #endif const uint32 t_new = s + t - s_new; FERMAT_ASSERT(s_new < s + t); // NOTE: it should be <= s + t, but we don't support t = 0 FERMAT_ASSERT(s_new + t_new == s + t); // for now, make sure we never generate a technique with less than 2 eye vertices if (t < 2) return; // write the proposed s,t coordinates out context.st[chain_id].z = s_new; context.st[chain_id].w = t_new; } } /// implement plain MMLT swaps /// void mmlt_swap(CMLTContext context, RendererView renderer) { const uint32 blockSize(128); const dim3 gridSize(cugar::divide_ri(context.n_chains, blockSize)); mmlt_swap_kernel << < gridSize, blockSize >> > (context, renderer); } //------------------------------------------------------------------------------ /// implement chart swaps /// __global__ void chart_swap_kernel(CMLTContext context, RendererView renderer) { const uint32 chain_id = threadIdx.x + blockIdx.x * blockDim.x; if (chain_id < context.n_chains) { const uint32 s = context.st[chain_id].x; const uint32 t = context.st[chain_id].y; // setup the random stream for this chain StridedRandoms random(context.mut_u + chain_id, context.n_chains); // generate a candidate (s_new,t_new) pair with s_new + t_new = s + t FERMAT_ASSERT(s + t > 1); #if 0 // select a technique at random const uint32 s_new = cugar::quantize( random.next(), s + t - 1 ); const float st_change_ratio = 1.0f; #elif 0 // perform a random walk on s const uint32 s_new = random.next() > 0.5f ? (t > 2 ? s + 1 : 0) : (s > 0 ? s - 1 : s + t - 2); const float st_change_ratio = 1.0f; #else const float one = cugar::binary_cast<float>(FERMAT_ALMOST_ONE_AS_INT); // propose a technique with the same path length k = s + t - 1 according to their CDF const uint32 k = s + t - 1; const float* st_norms_cdf = context.st_norms_cdf + k * (context.options.max_path_length + 2); const uint32 s_new = cugar::min( cugar::upper_bound_index( fminf( random.next(), one ), st_norms_cdf, k + 2 ), k /*+ 1*/ ); // note: it should be k + 1, but we don't support t = 0 const float st_change_ratio = (bounded_access(st_norms_cdf, s) - bounded_access(st_norms_cdf, s - 1)) / (bounded_access(st_norms_cdf, s_new) - bounded_access(st_norms_cdf, s_new - 1)); #endif const uint32 t_new = s + t - s_new; FERMAT_ASSERT(s_new < s + t); // NOTE: it should be <= s + t, but we don't support t = 0 FERMAT_ASSERT(s_new + t_new == s + t); // for now, make sure we never generate a technique with less than 2 eye vertices if (t_new < 2) return; // setup the path wrapper BidirPath path( s, t, context.vertices_l + chain_id, context.vertices_e + chain_id, context.n_chains ); float u_new[32]; float u_old[32]; float pdf_new; const float st_norms_ratio = (context.st_norms[s_new + t_new * (context.options.max_path_length + 2)] / context.st_norms[s + t * (context.options.max_path_length + 2)]) * st_change_ratio; if (s_new > s) { // invert the [s,s_new) vertices of the light subpath if (invert_light_subpath(path, s, s_new, u_new, &pdf_new, renderer, random) == false) return; // compute the eye subpath inversion pdf for the vertices [t_new,t) for (uint32 i = t_new; i < t; ++i) { u_old[(i - t_new) * 3 + 0] = context.u_E(chain_id, i * 3 + 0); u_old[(i - t_new) * 3 + 1] = context.u_E(chain_id, i * 3 + 1); u_old[(i - t_new) * 3 + 2] = context.u_E(chain_id, i * 3 + 2); } const float pdf_old = eye_subpath_inversion_pdf(path, t_new, t, u_old, renderer); //if (random.next() < (pdf_new / pdf_old) * st_norms_ratio) if (random.next() < (pdf_old / pdf_new) * st_norms_ratio) { // accept the proposal context.st[chain_id] = make_char4( s_new, t_new, s_new, t_new ); for (uint32 i = s; i < s_new; ++i) { context.u_L(chain_id, i * 3 + 0) = u_new[(i - s) * 3 + 0]; context.u_L(chain_id, i * 3 + 1) = u_new[(i - s) * 3 + 1]; context.u_L(chain_id, i * 3 + 2) = u_new[(i - s) * 3 + 2]; } } } else if (t_new > t) { // invert the [t,t_new) vertices of the eye subpath if (invert_eye_subpath(path, t, t_new, u_new, &pdf_new, renderer, random) == false) return; // compute the light subpath inversion pdf for the vertices [s_new,s) for (uint32 i = s_new; i < s; ++i) { u_old[(i - s_new) * 3 + 0] = context.u_L(chain_id, i * 3 + 0); u_old[(i - s_new) * 3 + 1] = context.u_L(chain_id, i * 3 + 1); u_old[(i - s_new) * 3 + 2] = context.u_L(chain_id, i * 3 + 2); } const float pdf_old = light_subpath_inversion_pdf(path, s_new, s, u_old, renderer); //if (random.next() < (pdf_new / pdf_old) * st_norms_ratio) if (random.next() < (pdf_old / pdf_new) * st_norms_ratio) { // accept the proposal context.st[chain_id] = make_char4(s_new, t_new, s_new, t_new); for (uint32 i = t; i < t_new; ++i) { context.u_E(chain_id, i * 3 + 0) = u_new[(i - t) * 3 + 0]; context.u_E(chain_id, i * 3 + 1) = u_new[(i - t) * 3 + 1]; context.u_E(chain_id, i * 3 + 2) = u_new[(i - t) * 3 + 2]; } } } } } /// implement chart swaps /// void chart_swap(CMLTContext context, RendererView renderer) { const uint32 blockSize(128); const dim3 gridSize(cugar::divide_ri(context.n_chains, blockSize)); chart_swap_kernel << < gridSize, blockSize >> > (context, renderer); } //------------------------------------------------------------------------------ /// sample the seed paths /// __global__ void sample_seeds_kernel(const float random_shift, const uint32 n_connections, const float* connections_cdf, const uint4* connections_index, const uint32 n_seeds, uint32* seeds, uint32* st_counters, const uint32 max_path_length) { const uint32 seed_id = threadIdx.x + blockIdx.x * blockDim.x; if (seed_id < n_seeds) { const float r = (seed_id + random_shift) / float(n_seeds); const uint32 seed = cugar::upper_bound_index( r * connections_cdf[n_connections-1], connections_cdf, n_connections); const uint4 connection = connections_index[seed]; seeds[seed_id] = seed | (connection.z << 24) | (connection.w << 28); // keep stats atomicAdd(st_counters + connection.z + connection.w*(max_path_length + 2), 1u); } } /// sample the seed paths /// void sample_seeds(const float random_shift, const uint32 n_connections, const float* connections_cdf, const uint4* connections_index, const uint32 n_seeds, uint32* seeds, uint32* st_counters, const uint32 max_path_length) { dim3 blockSize(128); dim3 gridSize(cugar::divide_ri(n_seeds, blockSize.x)); hipLaunchKernelGGL(( sample_seeds_kernel), dim3(gridSize), dim3(blockSize) , 0, 0, random_shift, n_connections, connections_cdf, connections_index, n_seeds, seeds, st_counters, max_path_length); } //------------------------------------------------------------------------------ /// recover the primary space coordinates of the sampled seed paths /// __global__ void recover_primary_coordinates_kernel(const uint32 n_seeds, const uint32* seeds, const uint32 n_lights, CMLTContext context, RendererView renderer) { const uint32 seed_id = threadIdx.x + blockIdx.x * blockDim.x; if (seed_id < n_seeds) { const uint32 seed = seeds[seed_id]; const uint4 connection = context.connections_index[seed & 0xFFFFFF]; const uint32 light_idx = connection.x; const uint32 eye_idx = connection.y; const uint32 s = connection.z; const uint32 t = connection.w; TiledLightSubpathPrimaryCoords light_primary_coords(context.sequence); for (uint32 i = 0; i < s; ++i) { if (i == 0) { // the first vertex might be somewhat special if (context.options.use_vpls) context.u_L(seed_id, i * 3 + 0) = (float(light_idx) + light_primary_coords.sample(light_idx, i, 0)) / float(n_lights); else context.u_L(seed_id, i * 3 + 0) = light_primary_coords.sample(light_idx, i, 0); context.u_L(seed_id, i * 3 + 1) = light_primary_coords.sample(light_idx, i, 1); context.u_L(seed_id, i * 3 + 2) = light_primary_coords.sample(light_idx, i, 2); } else { // fetch the regular coordinates context.u_L(seed_id, i * 3 + 0) = light_primary_coords.sample(light_idx, i, 0); context.u_L(seed_id, i * 3 + 1) = light_primary_coords.sample(light_idx, i, 1); context.u_L(seed_id, i * 3 + 2) = light_primary_coords.sample(light_idx, i, 2); } } PerPixelEyeSubpathPrimaryCoords eye_primary_coords(context.sequence, renderer.res_x, renderer.res_y); for (uint32 i = 0; i < t; ++i) { if (i == 0) { // we set the lens sample to (0,0,0) context.u_E(seed_id, i * 3 + 0) = 0; context.u_E(seed_id, i * 3 + 1) = 0; context.u_E(seed_id, i * 3 + 2) = 0; } else { // fetch the regular coordinates context.u_E(seed_id, i * 3 + 0) = eye_primary_coords.sample(eye_idx, i, 0); context.u_E(seed_id, i * 3 + 1) = eye_primary_coords.sample(eye_idx, i, 1); context.u_E(seed_id, i * 3 + 2) = eye_primary_coords.sample(eye_idx, i, 2); } } // write the st info context.st[seed_id] = make_char4(s, t, s, t); } } /// recover the primary space coordinates of the sampled seed paths /// void recover_primary_coordinates(const uint32 n_seeds, const uint32* seeds, const uint32 n_lights, CMLTContext context, RendererView renderer) { dim3 blockSize(128); dim3 gridSize(cugar::divide_ri(n_seeds, blockSize.x)); hipLaunchKernelGGL(( recover_primary_coordinates_kernel) , dim3(gridSize), dim3(blockSize) , 0, 0, n_seeds, seeds, n_lights, context, renderer); } ///@} CMLT ///@} Fermat } // anonymous namespace CMLT::CMLT() : m_generator(32, cugar::LFSRGeneratorMatrix::GOOD_PROJECTIONS), m_random(&m_generator, 1u, 1351u) { } void CMLT::init(int argc, char** argv, Renderer& renderer) { const uint2 res = renderer.res(); const uint32 n_pixels = res.x * res.y; // parse the options m_options.parse(argc, argv); // if we perform a single connection, RR must be enabled if (m_options.single_connection) m_options.rr = true; // TODO: re-enable when light tracing is implemented m_options.light_tracing = 0.0f; // compute how long our chains are const uint32 chain_length = (m_options.spp * n_pixels) / m_options.n_chains; // check whether how many actual skips we can afford m_options.startup_skips = cugar::min(m_options.startup_skips, chain_length - 1); fprintf(stderr, " CMLT settings:\n"); fprintf(stderr, " spp : %u\n", m_options.spp); fprintf(stderr, " chains : %u\n", m_options.n_chains); fprintf(stderr, " chain-length : %u\n", chain_length); fprintf(stderr, " startup-skips : %u\n", m_options.startup_skips); fprintf(stderr, " charted-swaps : %u\n", m_options.swap_frequency); fprintf(stderr, " mmlt-swaps : %u\n", m_options.mmlt_frequency); fprintf(stderr, " single-conn : %u\n", m_options.single_connection); fprintf(stderr, " RR : %u\n", m_options.rr); fprintf(stderr, " path-length : %u\n", m_options.max_path_length); fprintf(stderr, " direct-nee : %u\n", m_options.direct_lighting_nee ? 1 : 0); fprintf(stderr, " direct-bsdf : %u\n", m_options.direct_lighting_bsdf ? 1 : 0); fprintf(stderr, " indirect-nee : %u\n", m_options.indirect_lighting_nee ? 1 : 0); fprintf(stderr, " indirect-bsdf : %u\n", m_options.indirect_lighting_bsdf ? 1 : 0); fprintf(stderr, " visible-lights : %u\n", m_options.visible_lights ? 1 : 0); fprintf(stderr, " light-tracing : %u\n", m_options.light_tracing ? 1 : 0); // compute the number of light paths const uint32 n_light_paths = n_pixels; const uint32 n_chains = m_options.n_chains; fprintf(stderr, " creatign mesh lights... started\n"); // initialize the mesh lights sampler renderer.m_mesh_lights.init( n_light_paths, renderer.m_mesh.view(), renderer.m_mesh_d.view(), renderer.m_texture_views_h.ptr(), renderer.m_texture_views_d.ptr() ); fprintf(stderr, " creatign mesh lights... done\n"); const uint32 queue_size = ::max(n_pixels, n_light_paths) * (2 + m_options.max_path_length); // pre-alloc all buffers m_queues.alloc(n_pixels, n_light_paths, m_options.max_path_length); // build the set of shifts const uint32 n_dimensions = (m_options.max_path_length + 1) * 2 * 6; fprintf(stderr, " initializing sampler: %u dimensions\n", n_dimensions); m_sequence.setup(n_dimensions, SHIFT_RES); fprintf(stderr, " allocating light vertex storage... started (%u paths, %u vertices)\n", n_light_paths, n_light_paths * m_options.max_path_length); m_light_vertices.alloc(n_light_paths, n_light_paths * m_options.max_path_length); fprintf(stderr, " allocating light vertex storage... done\n"); if (m_options.use_vpls) hipMemcpy(m_light_vertices.vertex.ptr(), cugar::raw_pointer(renderer.m_mesh_lights.vpls), sizeof(VPL)*n_light_paths, hipMemcpyDeviceToDevice); fprintf(stderr, " allocating bpt connections... started\n"); #if SINGLE_CONNECTION m_connections_value.alloc(n_pixels * (m_options.max_path_length + 1)); m_connections_index.alloc(n_pixels * (m_options.max_path_length + 1)); m_connections_cdf.alloc(n_pixels * (m_options.max_path_length + 1)); #else m_connections_value.alloc(n_pixels * m_options.max_path_length * (m_options.max_path_length + 1)); m_connections_index.alloc(n_pixels * m_options.max_path_length * (m_options.max_path_length + 1)); m_connections_cdf.alloc(n_pixels * m_options.max_path_length * (m_options.max_path_length + 1)); #endif m_connections_counter.alloc(1); m_seeds.alloc(n_chains); fprintf(stderr, " allocating bpt connections... done\n"); fprintf(stderr, " allocating chain storage... started\n"); m_mut_u.alloc(n_chains*((m_options.max_path_length + 1) * 3 * 2 + 2)); m_light_u.alloc(n_chains*(m_options.max_path_length + 1) * 3); m_eye_u.alloc(n_chains*(m_options.max_path_length + 1) * 3); m_path_value.alloc(n_chains * 2); m_path_pdf.alloc(n_chains * 2); m_rejections.alloc(n_chains + 1); m_vertices.alloc(n_chains*(m_options.max_path_length + 1) * 4); m_st_counters.alloc((m_options.max_path_length + 2)*(m_options.max_path_length + 2)); m_st_norms.alloc((m_options.max_path_length + 2)*(m_options.max_path_length + 2)); m_st_norms_cdf.alloc((m_options.max_path_length + 2)*(m_options.max_path_length + 2)); fprintf(stderr, " allocating chain storage... done\n"); m_n_init_light_paths = n_light_paths; m_n_init_paths = n_pixels; } struct max_comp_functor { typedef float4 argument_type; typedef float result_type; FERMAT_HOST_DEVICE float operator() (const argument_type c) { return cugar::max3(c.x, c.y, c.z); } }; // sample seed paths from the stored bidirectional connections // void CMLT::sample_seeds(const uint32 n_chains) { cugar::device_vector<uint8> temp_storage; // compute the connections CDF cugar::inclusive_scan<cugar::device_tag>( m_n_connections, thrust::make_transform_iterator(m_connections_value.ptr(), max_comp_functor()), m_connections_cdf.ptr(), thrust::plus<float>(), temp_storage); // zero out the stats hipMemset(m_st_counters.ptr(), 0x00, sizeof(uint32)*(m_options.max_path_length + 2)*(m_options.max_path_length + 2)); // resample n_chains of them ::sample_seeds(m_random.next(), m_n_connections, m_connections_cdf.ptr(), m_connections_index.ptr(), n_chains, m_seeds.ptr(), m_st_counters.ptr(), m_options.max_path_length); // and sort them cugar::radix_sort<cugar::device_tag>(n_chains, m_seeds.ptr(), temp_storage); m_image_brightness = m_connections_cdf[m_n_connections - 1] / float(m_n_init_paths); } // recover primary sample space coordinates of the sampled light and eye subpaths // void CMLT::recover_primary_coordinates(CMLTContext& context, RendererView& renderer_view) { ::recover_primary_coordinates(context.n_chains, m_seeds.ptr(), m_n_init_light_paths, context, renderer_view ); } // build a CDF on st_norms void CMLT::build_st_norms_cdf() { float st_norms[1024]; float st_norms_cdf[1024] = { 0 }; hipMemcpy(st_norms, m_st_norms.ptr(), sizeof(float)*(m_options.max_path_length + 2)*(m_options.max_path_length + 2), hipMemcpyDeviceToHost); for (uint32 k = 1; k <= m_options.max_path_length; ++k) { // consider all the (k+2) paths of length k for (uint32 s = 0; s < k + 2; ++s) { const uint32 t = k + 1 - s; const float norm = st_norms[s + t*(m_options.max_path_length + 2)] / float(m_n_init_paths); st_norms_cdf[k * (m_options.max_path_length + 2) + s] = norm; } // compute the cumulative sum for (uint32 i = 1; i < k + 2; ++i) st_norms_cdf[k * (m_options.max_path_length + 2) + i] += st_norms_cdf[k * (m_options.max_path_length + 2) + i - 1]; // and normalize it const float inv_sum = 1.0f / st_norms_cdf[k * (m_options.max_path_length + 2) + k + 1]; for (uint32 i = 0; i < k + 2; ++i) st_norms_cdf[k * (m_options.max_path_length + 2) + i] *= inv_sum; //for (uint32 i = 0; i < k + 2; ++i) // fprintf(stderr, "cdf[%u][%u] = %f\n", k, i, st_norms_cdf[k * (m_options.max_path_length + 2) + i]); //fgetc(stdin); } hipMemcpy(m_st_norms_cdf.ptr(), st_norms_cdf, sizeof(float)*(m_options.max_path_length + 2)*(m_options.max_path_length + 2), hipMemcpyHostToDevice); } void CMLT::render(const uint32 instance, Renderer& renderer) { // clear the global timer at instance zero if (instance == 0) m_time = 0.0f; // fetch the renderer view RendererView renderer_view = renderer.view(instance); // pre-multiply the previous frame for blending renderer.multiply_frame(float(instance) / float(instance + 1)); cugar::Timer timer; float chart_swap_time = 0.0f; float presampling_time = 0.0f; float seed_resampling_time = 0.0f; const uint2 res = renderer.res(); const uint32 n_pixels = res.x * res.y; const uint32 n_chains = m_options.n_chains; const uint32 chain_length = (m_options.spp * n_pixels) / n_chains; // initialize the sampling sequence for this frame m_sequence.set_instance(instance); // setup our BPT context CMLTContext context(*this, renderer_view); bool do_reseeding = (instance % m_options.reseeding) == 0; // perform the BPT presampling pass if (do_reseeding) { timer.start(); // reset the connections counter hipMemset(context.connections_counter, 0x00, sizeof(uint32)); // zero out the stats hipMemset(m_st_norms.ptr(), 0x00, sizeof(float)*(context.options.max_path_length + 2)*(context.options.max_path_length + 2)); TiledLightSubpathPrimaryCoords light_primary_coords(context.sequence); PerPixelEyeSubpathPrimaryCoords eye_primary_coords(context.sequence, renderer.m_res_x, renderer.m_res_y); CMLTPresamplingBPTConfig config(context); ConnectionsSink connections_sink; bpt::sample_paths( m_n_init_paths, m_n_init_light_paths, eye_primary_coords, light_primary_coords, connections_sink, context, config, renderer, renderer_view); timer.stop(); presampling_time = timer.seconds(); timer.start(); // fetch the number of connections we found hipMemcpy(&m_n_connections, context.connections_counter, sizeof(uint32), hipMemcpyDeviceToHost); //fprintf(stderr, " n_connections: %u\n", m_n_connections); // exit if we didn't find any valid path if (m_n_connections == 0) return; sample_seeds(n_chains); // print out the chain stats if (instance == 0) { fprintf(stderr, " image brightness: %f\n", m_image_brightness); fprintf(stderr, " st chains\n"); uint32 st_counters[1024]; float st_norms[1024]; hipMemcpy(st_counters, m_st_counters.ptr(), sizeof(uint32)*(m_options.max_path_length + 2)*(m_options.max_path_length + 2), hipMemcpyDeviceToHost); hipMemcpy(st_norms, m_st_norms.ptr(), sizeof(float)*(m_options.max_path_length + 2)*(m_options.max_path_length + 2), hipMemcpyDeviceToHost); for (uint32 s = 0; s < m_options.max_path_length; ++s) { for (uint32 t = 0; t < m_options.max_path_length + 2; ++t) { if (s + t <= m_options.max_path_length + 1) fprintf(stderr, " [%u,%u] : %7u, %f\n", s, t, st_counters[s + t*(m_options.max_path_length + 2)], st_norms[s + t*(m_options.max_path_length + 2)] / float(m_n_init_paths)); } } } // build a CDF on st_norms build_st_norms_cdf(); // setup the normalization constant context.pdf_norm = m_image_brightness * float(res.x * res.y) / float((chain_length - context.options.startup_skips) * n_chains); // recover the primary coordinates of the selected seed paths recover_primary_coordinates(context, renderer_view); CUDA_CHECK(cugar::cuda::sync_and_check_error("recover primary coordinates")); // initialize the initial path pdfs to zero hipMemset(context.path_pdf, 0x00, sizeof(float)*n_chains); // initialize the rejection counters to zero hipMemset(context.rejections, 0x00, sizeof(uint32)*n_chains); timer.stop(); seed_resampling_time = timer.seconds(); } timer.start(); // initialize the random generator DeviceRandomSequence sequence( cugar::hash(instance) ); for (uint32 l = 0; l < chain_length; ++l) { // disable any kind of mutations for the first step, and enable them later on context.mutation_type = (l == 0 && do_reseeding) ? PerturbedPrimaryCoords::Null : PerturbedPrimaryCoords::CauchyPerturbation; // enable accumulation only after a few steps context.enable_accumulation = (l >= context.options.startup_skips) ? true : false; if (!do_reseeding || l > 0) { // generate all the random numbers needed for mutations sequence.next(n_chains*((context.options.max_path_length + 1) * 2 * 3 + 2), context.mut_u); } if (m_options.mmlt_frequency && l && ((l % m_options.mmlt_frequency) == 0)) { // set all mutation components to zero, in order to keep path coordinates unchanged (being careful to leave the one used for the acceptance/rejection test out) //hipMemset(context.mut_u, 0x00, n_chains*((m_options.max_path_length + 1) * 3)); // propose a swap mmlt_swap(context, renderer_view); } // reset the output queue counters hipMemset(context.shadow_queue.size, 0x00, sizeof(uint32)); hipMemset(context.scatter_queue.size, 0x00, sizeof(uint32)); // sample a set of bidirectional paths corresponding to our current primary coordinates CMLTChainBPTConfig config(context); PerturbedPrimaryCoords light_primary_coords = context.light_primary_coords(); PerturbedPrimaryCoords eye_primary_coords = context.eye_primary_coords(); AccumulateRejectSink accept_reject_sink; // initialize the initial new path values to zero hipMemset(context.new_path_value, 0x00, sizeof(float4)*n_chains); bpt::sample_paths( context.n_chains, context.n_chains, eye_primary_coords, light_primary_coords, accept_reject_sink, context, config, renderer, renderer_view, true); // lazy shadows accept_reject_mlt( context, renderer_view ); if (m_options.swap_frequency && l && ((l % m_options.swap_frequency) == 0)) { cugar::ScopedTimer<float> chart_swap_timer( &chart_swap_time ); // generate all the random numbers needed for mutations sequence.next(n_chains*((m_options.max_path_length + 1) * 3 * 2 + 2), context.mut_u); // perform a technique swap chart_swap(context, renderer_view); CUDA_CHECK(cugar::cuda::sync_and_check_error("technique swap")); } } uint32 max_iter_stuck = 0; hipMemcpy(&max_iter_stuck, context.rejections + n_chains, sizeof(uint32), hipMemcpyDeviceToHost); timer.stop(); const float mlt_time = timer.seconds(); m_time += presampling_time + seed_resampling_time + mlt_time; fprintf(stderr, "\r %.1fs (%.1fms = init: %.1fms, seed: %.1fms, mut: %.1fms, c-swaps: %.1fms, stuck: %u) ", m_time, (presampling_time + seed_resampling_time + mlt_time) * 1000.0f, presampling_time * 1000.0f, seed_resampling_time * 1000.0f, mlt_time * 1000.0f, chart_swap_time * 1000.0f, max_iter_stuck); }
63282317eada3600e846074a02a5df9440a4de5b.cu
/* * Fermat * * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cmlt.h> #include <renderer.h> #include <optix_prime/optix_primepp.h> #include <mesh/MeshStorage.h> #include <cugar/basic/timer.h> #include <cugar/basic/cuda/timer.h> #include <cugar/basic/primitives.h> #include <cugar/sampling/random.h> #include <cugar/sampling/distributions.h> #include <bsdf.h> #include <edf.h> #include <bpt_context.h> #include <bpt_control.h> #include <bpt_samplers.h> #include <random_sequence.h> #include <path_inversion.h> #include <ray_queues.h> #include <vector> #define SHIFT_RES 256u #define DEBUG_PIXEL (714 + 66*1600) namespace { ///@addtogroup Fermat ///@{ ///@addtogroup CMLTModule ///@{ FERMAT_HOST_DEVICE FERMAT_FORCEINLINE float bounded_access(const float* vec, const uint32 i) { return (i == uint32(-1)) ? 0.0f : vec[i]; } FERMAT_HOST_DEVICE uint32 chain_coordinate_index(const uint32 n_chains, const uint32 idx, const uint32 dim) { return dim*n_chains + idx; } struct StridedRandoms { FERMAT_HOST_DEVICE StridedRandoms(const float* _u, const uint32 _stride) : u(_u), stride(_stride) {} FERMAT_HOST_DEVICE float next() { const float r = *u; u += stride; return r; } const float* u; uint32 stride; }; ///@} CMLT ///@} Fermat } // anonymous namespace ///@addtogroup Fermat ///@{ ///@addtogroup CMLTModule ///@{ /// The CUDA context class for the CMLT renderer /// struct CMLTContext : BPTContextBase { CMLTContext( CMLT& _cmlt, const RendererView& _renderer) : BPTContextBase( _renderer, _cmlt.m_light_vertices.view(), _cmlt.m_queues.view(_cmlt.m_n_init_paths, _cmlt.m_n_init_light_paths)), options (_cmlt.m_options), sequence (_cmlt.m_sequence.view()), connections_value (_cmlt.m_connections_value.ptr()), connections_index (_cmlt.m_connections_index.ptr()), connections_counter (_cmlt.m_connections_counter.ptr()), st_norms (_cmlt.m_st_norms.ptr()), st_norms_cdf (_cmlt.m_st_norms_cdf.ptr()), seeds (_cmlt.m_seeds.ptr()), st (reinterpret_cast<char4*>(_cmlt.m_seeds.ptr())), // NOTE: aliased to seeds! mut_u (_cmlt.m_mut_u.ptr()), light_u (_cmlt.m_light_u.ptr()), eye_u (_cmlt.m_eye_u.ptr()), vertices_l (_cmlt.m_vertices.ptr()), vertices_e (vertices_l + _cmlt.m_options.n_chains * (_cmlt.m_options.max_path_length + 1)), mut_vertices_l (vertices_e + _cmlt.m_options.n_chains * (_cmlt.m_options.max_path_length + 1)), mut_vertices_e (mut_vertices_l + _cmlt.m_options.n_chains * (_cmlt.m_options.max_path_length + 1)), path_value (_cmlt.m_path_value.ptr()), path_pdf (_cmlt.m_path_pdf.ptr()), new_path_value (_cmlt.m_path_value.ptr() + _cmlt.m_options.n_chains), new_path_st (reinterpret_cast<char2*>(_cmlt.m_path_pdf.ptr() + _cmlt.m_options.n_chains)), rejections (_cmlt.m_rejections.ptr()), n_chains (_cmlt.m_options.n_chains), mutation_type (PerturbedPrimaryCoords::CauchyPerturbation), enable_accumulation (true) {} CMLTOptions options; TiledSequenceView sequence; float4* connections_value; uint4* connections_index; uint32* connections_counter; float* st_norms; float* st_norms_cdf; uint32* seeds; char4* st; float* mut_u; float* light_u; float* eye_u; VertexGeometryId* vertices_l; VertexGeometryId* vertices_e; VertexGeometryId* mut_vertices_l; VertexGeometryId* mut_vertices_e; float4* new_path_value; char2* new_path_st; float4* path_value; float* path_pdf; uint32* rejections; uint32 n_chains; float pdf_norm; PerturbedPrimaryCoords::Type mutation_type; bool enable_accumulation; FERMAT_HOST_DEVICE FERMAT_FORCEINLINE float u_ar(const uint32 chain_id) const { return mut_u ? mut_u[chain_coordinate_index(n_chains, chain_id, (options.max_path_length + 1) * 3 * 2)] : 0.5f; } FERMAT_HOST_DEVICE FERMAT_FORCEINLINE float& u_L(const uint32 chain_id, const uint32 dim) { return light_u[chain_coordinate_index(n_chains, chain_id, dim)]; } FERMAT_HOST_DEVICE FERMAT_FORCEINLINE float& u_E(const uint32 chain_id, const uint32 dim) { return eye_u[chain_coordinate_index(n_chains, chain_id, dim)]; } FERMAT_HOST_DEVICE FERMAT_FORCEINLINE PerturbedPrimaryCoords light_primary_coords() const { return PerturbedPrimaryCoords( n_chains, light_u, 0u, mut_u, 0u, options.light_perturbations ? mutation_type : PerturbedPrimaryCoords::Null, options.perturbation_radius); } FERMAT_HOST_DEVICE FERMAT_FORCEINLINE PerturbedPrimaryCoords eye_primary_coords() const { return PerturbedPrimaryCoords( n_chains, eye_u, 0u, mut_u, (options.max_path_length + 1), options.eye_perturbations ? mutation_type : PerturbedPrimaryCoords::Null, options.perturbation_radius); } }; ///@} CMLTModule ///@} Fermat namespace { // anonymous namespace ///@addtogroup Fermat ///@{ ///@addtogroup CMLTModule ///@{ struct CMLTPresamplingBPTConfig : BPTConfigBase { FERMAT_HOST_DEVICE FERMAT_FORCEINLINE CMLTPresamplingBPTConfig(CMLTContext& _context) : BPTConfigBase( _context.options, VertexSampling::kAll, _context.options.single_connection ? VertexOrdering::kRandomOrdering : VertexOrdering::kPathOrdering, VertexSampling::kAll, _context.options.rr) {} }; struct CMLTChainBPTConfig : BPTConfigBase { FERMAT_HOST_DEVICE FERMAT_FORCEINLINE CMLTChainBPTConfig(CMLTContext& _context) : BPTConfigBase( _context.options, VertexSampling::kEnd, VertexOrdering::kPathOrdering, VertexSampling::kEnd, _context.options.rr), st(_context.st) {} FERMAT_HOST_DEVICE FERMAT_FORCEINLINE bool terminate_light_subpath(const uint32 path_id, const uint32 s) const { return s == st[path_id].z; } //bool terminate_light_subpath(const uint32 path_id, const uint32 s) const { return (st[path_id].z == 0) || s >= BPTConfigBase::max_path_length + 1; } FERMAT_HOST_DEVICE FERMAT_FORCEINLINE bool terminate_eye_subpath(const uint32 path_id, const uint32 t) const { return t == st[path_id].w; } //bool terminate_eye_subpath(const uint32 path_id, const uint32 t) const { return t + st[path_id].z >= BPTConfigBase::max_path_length + 1; } FERMAT_HOST_DEVICE FERMAT_FORCEINLINE bool store_light_vertex(const uint32 path_id, const uint32 s, const bool absorbed) const { return (s == st[path_id].z); } /*bool store_light_vertex(const uint32 path_id, const uint32 s, const bool absorbed) const { if (absorbed) { // store the new path length st[path_id].z = s; } return absorbed; }*/ FERMAT_HOST_DEVICE FERMAT_FORCEINLINE bool perform_connection(const uint32 path_id, const uint32 t, const bool absorbed) const { return t == st[path_id].w && st[path_id].z > 0; } //bool perform_connection(const uint32 path_id, const uint32 t, const bool absorbed) const { return absorbed == true && st[path_id].z > 0; } FERMAT_HOST_DEVICE FERMAT_FORCEINLINE bool accumulate_emissive(const uint32 path_id, const uint32 t, const bool absorbed) const { return t == st[path_id].w && st[path_id].z == 0; } //bool accumulate_emissive(const uint32 path_id, const uint32 t, const bool absorbed) const { return absorbed == true && st[path_id].z == 0; } // store the compact vertex information FERMAT_HOST_DEVICE FERMAT_FORCEINLINE void visit_light_vertex( const uint32 path_id, const uint32 depth, const VertexGeometryId v, CMLTContext& context, RendererView& renderer) const { context.mut_vertices_l[path_id + depth * context.n_chains] = v; } // store the compact vertex information FERMAT_HOST_DEVICE FERMAT_FORCEINLINE void visit_eye_vertex( const uint32 path_id, const uint32 depth, const VertexGeometryId v_id, const EyeVertex& v, CMLTContext& context, RendererView& renderer) const { context.mut_vertices_e[path_id + depth * context.n_chains] = v_id; } /*const*/ char4* st; }; FERMAT_DEVICE FERMAT_FORCEINLINE void reject_accumulate(const uint32 chain_id, CMLTContext& context, RendererView& renderer) { // TODO: keep track of the old sink! const float old_pdf = context.path_pdf[chain_id]; // perform the MH acceptance-rejection test const cugar::Vector2f old_uv( context.u_E(chain_id, 3 + 0), context.u_E(chain_id, 3 + 1)); const uint32 old_pixel_x = cugar::quantize(old_uv.x, renderer.res_x); const uint32 old_pixel_y = cugar::quantize(old_uv.y, renderer.res_y); const uint32 old_pixel = old_pixel_x + old_pixel_y*renderer.res_x; if (context.enable_accumulation) { if (old_pdf > 0) { const float4 old_value = context.path_value[chain_id]; cugar::atomic_add(&renderer.fb(FBufferDesc::COMPOSITED_C, old_pixel).x, context.pdf_norm * (old_value.x / old_pdf) / float(renderer.instance + 1)); cugar::atomic_add(&renderer.fb(FBufferDesc::COMPOSITED_C, old_pixel).y, context.pdf_norm * (old_value.y / old_pdf) / float(renderer.instance + 1)); cugar::atomic_add(&renderer.fb(FBufferDesc::COMPOSITED_C, old_pixel).z, context.pdf_norm * (old_value.z / old_pdf) / float(renderer.instance + 1)); } } // increase the rejections counter context.rejections[chain_id]++; //if ((context.rejections[chain_id] % 20) == 0) //if (context.rejections[chain_id] > 20 && context.rejections[chain_id] > context.rejections[context.n_chains]) // printf("chain[%u,%u:%u] stuck for %u iterations (val: %f)\n", context.st[chain_id].z, context.st[chain_id].w, chain_id, context.rejections[chain_id], old_pdf); atomicMax( &context.rejections[context.n_chains], context.rejections[chain_id] ); } FERMAT_DEVICE FERMAT_FORCEINLINE void accept_reject_accumulate(const uint32 chain_id, const uint32 s, const uint32 t, const cugar::Vector4f w, CMLTContext& context, RendererView& renderer) { // perform the MH acceptance-rejection test const float new_pdf = cugar::max_comp(w.xyz()); const float old_pdf = context.path_pdf[chain_id]; if (new_pdf == 0.0f) { reject_accumulate(chain_id, context, renderer); return; } PerturbedPrimaryCoords eye_primary_coords = context.eye_primary_coords(); const cugar::Vector2f old_uv( eye_primary_coords.u(chain_id, 3 + 0), eye_primary_coords.u(chain_id, 3 + 1)); const cugar::Vector2f new_uv( eye_primary_coords.perturbed_u(chain_id, 3 + 0), eye_primary_coords.perturbed_u(chain_id, 3 + 1)); const uint32 old_pixel_x = cugar::quantize(old_uv.x, renderer.res_x); const uint32 old_pixel_y = cugar::quantize(old_uv.y, renderer.res_y); const uint32 old_pixel = old_pixel_x + old_pixel_y*renderer.res_x; const uint32 new_pixel_x = cugar::quantize(new_uv.x, renderer.res_x); const uint32 new_pixel_y = cugar::quantize(new_uv.y, renderer.res_y); const uint32 new_pixel = new_pixel_x + new_pixel_y*renderer.res_x; const float ar = old_pdf ? fminf(1.0f, new_pdf / old_pdf) : 1.0f; if (context.enable_accumulation) { if (old_pdf > 0) { const float4 old_value = context.path_value[chain_id]; cugar::atomic_add(&renderer.fb(FBufferDesc::COMPOSITED_C, old_pixel).x, context.pdf_norm * (1.0f - ar) * (old_value.x / old_pdf) / float(renderer.instance + 1)); cugar::atomic_add(&renderer.fb(FBufferDesc::COMPOSITED_C, old_pixel).y, context.pdf_norm * (1.0f - ar) * (old_value.y / old_pdf) / float(renderer.instance + 1)); cugar::atomic_add(&renderer.fb(FBufferDesc::COMPOSITED_C, old_pixel).z, context.pdf_norm * (1.0f - ar) * (old_value.z / old_pdf) / float(renderer.instance + 1)); } if (new_pdf > 0) { const float4 new_value = w; cugar::atomic_add(&renderer.fb(FBufferDesc::COMPOSITED_C, new_pixel).x, context.pdf_norm * ar * (new_value.x / new_pdf) / float(renderer.instance + 1)); cugar::atomic_add(&renderer.fb(FBufferDesc::COMPOSITED_C, new_pixel).y, context.pdf_norm * ar * (new_value.y / new_pdf) / float(renderer.instance + 1)); cugar::atomic_add(&renderer.fb(FBufferDesc::COMPOSITED_C, new_pixel).z, context.pdf_norm * ar * (new_value.z / new_pdf) / float(renderer.instance + 1)); } } // fetch the old st const uint32 s_old = context.st[chain_id].x; const uint32 t_old = context.st[chain_id].y; const float st_change_ratio = 1.0f; // assume it's symmetric const float st_norms_ratio = (context.st_norms[s + t * (context.options.max_path_length + 2)] / context.st_norms[s_old + t_old * (context.options.max_path_length + 2)]) * st_change_ratio; if (context.u_ar(chain_id) * old_pdf < new_pdf * st_norms_ratio) { context.path_value[chain_id] = w; context.path_pdf[chain_id] = new_pdf; // write out the successful st context.st[chain_id] = make_char4(s, t, s, t); PerturbedPrimaryCoords light_primary_coords = context.light_primary_coords(); // copy the successful mutation coordinates for (uint32 i = 0; i < (context.options.max_path_length + 1) * 3; ++i) light_primary_coords.u(chain_id, i) = light_primary_coords.perturbed_u(chain_id, i); // copy the successful mutation coordinates for (uint32 i = 0; i < (context.options.max_path_length + 1) * 3; ++i) eye_primary_coords.u(chain_id, i) = eye_primary_coords.perturbed_u(chain_id, i); // copy the compact vertex information for (uint32 i = 0; i < s; ++i) context.vertices_l[chain_id + i * context.n_chains] = context.mut_vertices_l[chain_id + i * context.n_chains]; for (uint32 i = 0; i < t; ++i) context.vertices_e[chain_id + i * context.n_chains] = context.mut_vertices_e[chain_id + i * context.n_chains]; // reset the rejections counter context.rejections[chain_id] = 0; } else { // increase the rejections counter context.rejections[chain_id]++; //if ((context.rejections[chain_id] % 20) == 0) //if (context.rejections[chain_id] > 20 && context.rejections[chain_id] > context.rejections[context.n_chains]) // printf("chain[%u,%u:%u] stuck for %u iterations (val: %f, %f)\n", s, t, chain_id, context.rejections[chain_id], old_pdf, new_pdf); atomicMax( &context.rejections[context.n_chains], context.rejections[chain_id] ); } } /// /// The \ref SampleSinkAnchor "Sample Sink" used by the BPT presampling/seeding pass /// struct ConnectionsSink : SampleSinkBase { FERMAT_HOST_DEVICE ConnectionsSink() {} FERMAT_HOST_DEVICE void sink( const uint32 channel, const cugar::Vector4f value, const uint32 light_path_id, const uint32 eye_path_id, const uint32 s, const uint32 t, CMLTContext& context, RendererView& renderer) { if (cugar::max_comp(value.xyz()) > 0.0f) { const uint32 slot = cugar::atomic_add(context.connections_counter, 1); context.connections_value[slot] = value; context.connections_index[slot] = make_uint4(light_path_id, eye_path_id, s, t); cugar::atomic_add(context.st_norms + s + t * (context.options.max_path_length + 2), cugar::max_comp(value.xyz())); } } }; /// /// The \ref SampleSinkAnchor "Sample Sink" used the MLT pass /// struct AccumulateRejectSink : SampleSinkBase { FERMAT_HOST_DEVICE AccumulateRejectSink() {} FERMAT_HOST_DEVICE void sink( const uint32 channel, const cugar::Vector4f value, const uint32 light_path_id, const uint32 eye_path_id, const uint32 s, const uint32 t, CMLTContext& context, RendererView& renderer) { context.new_path_value[ eye_path_id ] = value; context.new_path_st[ eye_path_id ] = make_char2(s,t); } }; //------------------------------------------------------------------------------ __global__ void accept_reject_mlt_kernel(CMLTContext context, RendererView renderer) { const uint32 chain_id = threadIdx.x + blockIdx.x * blockDim.x; if (chain_id < context.n_chains) accept_reject_accumulate(chain_id, context.new_path_st[chain_id].x, context.new_path_st[chain_id].y, context.new_path_value[chain_id], context, renderer); } void accept_reject_mlt(CMLTContext context, RendererView renderer) { const uint32 blockSize(128); const dim3 gridSize(cugar::divide_ri(context.n_chains, blockSize)); accept_reject_mlt_kernel << < gridSize, blockSize >> > (context, renderer); } //------------------------------------------------------------------------------ /// implement plain MMLT swaps /// __global__ void mmlt_swap_kernel(CMLTContext context, RendererView renderer) { const uint32 chain_id = threadIdx.x + blockIdx.x * blockDim.x; if (chain_id < context.n_chains) { const uint32 s = context.st[chain_id].x; const uint32 t = context.st[chain_id].y; // setup the random stream for this chain StridedRandoms random(context.mut_u + chain_id, context.n_chains); // generate a candidate (s_new,t_new) pair with s_new + t_new = s + t FERMAT_ASSERT(s + t > 1); #if 0 // select a technique at random const uint32 s_new = cugar::quantize(random.next(), s + t - 1); const float st_change_ratio = 1.0f; #elif 1 // perform a random walk on s const uint32 s_new = random.next() > 0.5f ? (t > 2 ? s + 1 : 0) : (s > 0 ? s - 1 : s + t - 2); const float st_change_ratio = 1.0f; #else const float one = cugar::binary_cast<float>(FERMAT_ALMOST_ONE_AS_INT); // propose a technique with the same path length k = s + t - 1 according to their CDF const uint32 k = s + t - 1; const float* st_norms_cdf = context.st_norms_cdf + k * (context.options.max_path_length + 2); const uint32 s_new = cugar::upper_bound_index( fminf( random.next(), one ), st_norms_cdf, k + 2); const float st_change_ratio = (bounded_access(st_norms_cdf, s) - bounded_access(st_norms_cdf, s - 1)) / (bounded_access(st_norms_cdf, s_new) - bounded_access(st_norms_cdf, s_new - 1)); #endif const uint32 t_new = s + t - s_new; FERMAT_ASSERT(s_new < s + t); // NOTE: it should be <= s + t, but we don't support t = 0 FERMAT_ASSERT(s_new + t_new == s + t); // for now, make sure we never generate a technique with less than 2 eye vertices if (t < 2) return; // write the proposed s,t coordinates out context.st[chain_id].z = s_new; context.st[chain_id].w = t_new; } } /// implement plain MMLT swaps /// void mmlt_swap(CMLTContext context, RendererView renderer) { const uint32 blockSize(128); const dim3 gridSize(cugar::divide_ri(context.n_chains, blockSize)); mmlt_swap_kernel << < gridSize, blockSize >> > (context, renderer); } //------------------------------------------------------------------------------ /// implement chart swaps /// __global__ void chart_swap_kernel(CMLTContext context, RendererView renderer) { const uint32 chain_id = threadIdx.x + blockIdx.x * blockDim.x; if (chain_id < context.n_chains) { const uint32 s = context.st[chain_id].x; const uint32 t = context.st[chain_id].y; // setup the random stream for this chain StridedRandoms random(context.mut_u + chain_id, context.n_chains); // generate a candidate (s_new,t_new) pair with s_new + t_new = s + t FERMAT_ASSERT(s + t > 1); #if 0 // select a technique at random const uint32 s_new = cugar::quantize( random.next(), s + t - 1 ); const float st_change_ratio = 1.0f; #elif 0 // perform a random walk on s const uint32 s_new = random.next() > 0.5f ? (t > 2 ? s + 1 : 0) : (s > 0 ? s - 1 : s + t - 2); const float st_change_ratio = 1.0f; #else const float one = cugar::binary_cast<float>(FERMAT_ALMOST_ONE_AS_INT); // propose a technique with the same path length k = s + t - 1 according to their CDF const uint32 k = s + t - 1; const float* st_norms_cdf = context.st_norms_cdf + k * (context.options.max_path_length + 2); const uint32 s_new = cugar::min( cugar::upper_bound_index( fminf( random.next(), one ), st_norms_cdf, k + 2 ), k /*+ 1*/ ); // note: it should be k + 1, but we don't support t = 0 const float st_change_ratio = (bounded_access(st_norms_cdf, s) - bounded_access(st_norms_cdf, s - 1)) / (bounded_access(st_norms_cdf, s_new) - bounded_access(st_norms_cdf, s_new - 1)); #endif const uint32 t_new = s + t - s_new; FERMAT_ASSERT(s_new < s + t); // NOTE: it should be <= s + t, but we don't support t = 0 FERMAT_ASSERT(s_new + t_new == s + t); // for now, make sure we never generate a technique with less than 2 eye vertices if (t_new < 2) return; // setup the path wrapper BidirPath path( s, t, context.vertices_l + chain_id, context.vertices_e + chain_id, context.n_chains ); float u_new[32]; float u_old[32]; float pdf_new; const float st_norms_ratio = (context.st_norms[s_new + t_new * (context.options.max_path_length + 2)] / context.st_norms[s + t * (context.options.max_path_length + 2)]) * st_change_ratio; if (s_new > s) { // invert the [s,s_new) vertices of the light subpath if (invert_light_subpath(path, s, s_new, u_new, &pdf_new, renderer, random) == false) return; // compute the eye subpath inversion pdf for the vertices [t_new,t) for (uint32 i = t_new; i < t; ++i) { u_old[(i - t_new) * 3 + 0] = context.u_E(chain_id, i * 3 + 0); u_old[(i - t_new) * 3 + 1] = context.u_E(chain_id, i * 3 + 1); u_old[(i - t_new) * 3 + 2] = context.u_E(chain_id, i * 3 + 2); } const float pdf_old = eye_subpath_inversion_pdf(path, t_new, t, u_old, renderer); //if (random.next() < (pdf_new / pdf_old) * st_norms_ratio) if (random.next() < (pdf_old / pdf_new) * st_norms_ratio) { // accept the proposal context.st[chain_id] = make_char4( s_new, t_new, s_new, t_new ); for (uint32 i = s; i < s_new; ++i) { context.u_L(chain_id, i * 3 + 0) = u_new[(i - s) * 3 + 0]; context.u_L(chain_id, i * 3 + 1) = u_new[(i - s) * 3 + 1]; context.u_L(chain_id, i * 3 + 2) = u_new[(i - s) * 3 + 2]; } } } else if (t_new > t) { // invert the [t,t_new) vertices of the eye subpath if (invert_eye_subpath(path, t, t_new, u_new, &pdf_new, renderer, random) == false) return; // compute the light subpath inversion pdf for the vertices [s_new,s) for (uint32 i = s_new; i < s; ++i) { u_old[(i - s_new) * 3 + 0] = context.u_L(chain_id, i * 3 + 0); u_old[(i - s_new) * 3 + 1] = context.u_L(chain_id, i * 3 + 1); u_old[(i - s_new) * 3 + 2] = context.u_L(chain_id, i * 3 + 2); } const float pdf_old = light_subpath_inversion_pdf(path, s_new, s, u_old, renderer); //if (random.next() < (pdf_new / pdf_old) * st_norms_ratio) if (random.next() < (pdf_old / pdf_new) * st_norms_ratio) { // accept the proposal context.st[chain_id] = make_char4(s_new, t_new, s_new, t_new); for (uint32 i = t; i < t_new; ++i) { context.u_E(chain_id, i * 3 + 0) = u_new[(i - t) * 3 + 0]; context.u_E(chain_id, i * 3 + 1) = u_new[(i - t) * 3 + 1]; context.u_E(chain_id, i * 3 + 2) = u_new[(i - t) * 3 + 2]; } } } } } /// implement chart swaps /// void chart_swap(CMLTContext context, RendererView renderer) { const uint32 blockSize(128); const dim3 gridSize(cugar::divide_ri(context.n_chains, blockSize)); chart_swap_kernel << < gridSize, blockSize >> > (context, renderer); } //------------------------------------------------------------------------------ /// sample the seed paths /// __global__ void sample_seeds_kernel(const float random_shift, const uint32 n_connections, const float* connections_cdf, const uint4* connections_index, const uint32 n_seeds, uint32* seeds, uint32* st_counters, const uint32 max_path_length) { const uint32 seed_id = threadIdx.x + blockIdx.x * blockDim.x; if (seed_id < n_seeds) { const float r = (seed_id + random_shift) / float(n_seeds); const uint32 seed = cugar::upper_bound_index( r * connections_cdf[n_connections-1], connections_cdf, n_connections); const uint4 connection = connections_index[seed]; seeds[seed_id] = seed | (connection.z << 24) | (connection.w << 28); // keep stats atomicAdd(st_counters + connection.z + connection.w*(max_path_length + 2), 1u); } } /// sample the seed paths /// void sample_seeds(const float random_shift, const uint32 n_connections, const float* connections_cdf, const uint4* connections_index, const uint32 n_seeds, uint32* seeds, uint32* st_counters, const uint32 max_path_length) { dim3 blockSize(128); dim3 gridSize(cugar::divide_ri(n_seeds, blockSize.x)); sample_seeds_kernel<<< gridSize, blockSize >>>(random_shift, n_connections, connections_cdf, connections_index, n_seeds, seeds, st_counters, max_path_length); } //------------------------------------------------------------------------------ /// recover the primary space coordinates of the sampled seed paths /// __global__ void recover_primary_coordinates_kernel(const uint32 n_seeds, const uint32* seeds, const uint32 n_lights, CMLTContext context, RendererView renderer) { const uint32 seed_id = threadIdx.x + blockIdx.x * blockDim.x; if (seed_id < n_seeds) { const uint32 seed = seeds[seed_id]; const uint4 connection = context.connections_index[seed & 0xFFFFFF]; const uint32 light_idx = connection.x; const uint32 eye_idx = connection.y; const uint32 s = connection.z; const uint32 t = connection.w; TiledLightSubpathPrimaryCoords light_primary_coords(context.sequence); for (uint32 i = 0; i < s; ++i) { if (i == 0) { // the first vertex might be somewhat special if (context.options.use_vpls) context.u_L(seed_id, i * 3 + 0) = (float(light_idx) + light_primary_coords.sample(light_idx, i, 0)) / float(n_lights); else context.u_L(seed_id, i * 3 + 0) = light_primary_coords.sample(light_idx, i, 0); context.u_L(seed_id, i * 3 + 1) = light_primary_coords.sample(light_idx, i, 1); context.u_L(seed_id, i * 3 + 2) = light_primary_coords.sample(light_idx, i, 2); } else { // fetch the regular coordinates context.u_L(seed_id, i * 3 + 0) = light_primary_coords.sample(light_idx, i, 0); context.u_L(seed_id, i * 3 + 1) = light_primary_coords.sample(light_idx, i, 1); context.u_L(seed_id, i * 3 + 2) = light_primary_coords.sample(light_idx, i, 2); } } PerPixelEyeSubpathPrimaryCoords eye_primary_coords(context.sequence, renderer.res_x, renderer.res_y); for (uint32 i = 0; i < t; ++i) { if (i == 0) { // we set the lens sample to (0,0,0) context.u_E(seed_id, i * 3 + 0) = 0; context.u_E(seed_id, i * 3 + 1) = 0; context.u_E(seed_id, i * 3 + 2) = 0; } else { // fetch the regular coordinates context.u_E(seed_id, i * 3 + 0) = eye_primary_coords.sample(eye_idx, i, 0); context.u_E(seed_id, i * 3 + 1) = eye_primary_coords.sample(eye_idx, i, 1); context.u_E(seed_id, i * 3 + 2) = eye_primary_coords.sample(eye_idx, i, 2); } } // write the st info context.st[seed_id] = make_char4(s, t, s, t); } } /// recover the primary space coordinates of the sampled seed paths /// void recover_primary_coordinates(const uint32 n_seeds, const uint32* seeds, const uint32 n_lights, CMLTContext context, RendererView renderer) { dim3 blockSize(128); dim3 gridSize(cugar::divide_ri(n_seeds, blockSize.x)); recover_primary_coordinates_kernel <<< gridSize, blockSize >>>(n_seeds, seeds, n_lights, context, renderer); } ///@} CMLT ///@} Fermat } // anonymous namespace CMLT::CMLT() : m_generator(32, cugar::LFSRGeneratorMatrix::GOOD_PROJECTIONS), m_random(&m_generator, 1u, 1351u) { } void CMLT::init(int argc, char** argv, Renderer& renderer) { const uint2 res = renderer.res(); const uint32 n_pixels = res.x * res.y; // parse the options m_options.parse(argc, argv); // if we perform a single connection, RR must be enabled if (m_options.single_connection) m_options.rr = true; // TODO: re-enable when light tracing is implemented m_options.light_tracing = 0.0f; // compute how long our chains are const uint32 chain_length = (m_options.spp * n_pixels) / m_options.n_chains; // check whether how many actual skips we can afford m_options.startup_skips = cugar::min(m_options.startup_skips, chain_length - 1); fprintf(stderr, " CMLT settings:\n"); fprintf(stderr, " spp : %u\n", m_options.spp); fprintf(stderr, " chains : %u\n", m_options.n_chains); fprintf(stderr, " chain-length : %u\n", chain_length); fprintf(stderr, " startup-skips : %u\n", m_options.startup_skips); fprintf(stderr, " charted-swaps : %u\n", m_options.swap_frequency); fprintf(stderr, " mmlt-swaps : %u\n", m_options.mmlt_frequency); fprintf(stderr, " single-conn : %u\n", m_options.single_connection); fprintf(stderr, " RR : %u\n", m_options.rr); fprintf(stderr, " path-length : %u\n", m_options.max_path_length); fprintf(stderr, " direct-nee : %u\n", m_options.direct_lighting_nee ? 1 : 0); fprintf(stderr, " direct-bsdf : %u\n", m_options.direct_lighting_bsdf ? 1 : 0); fprintf(stderr, " indirect-nee : %u\n", m_options.indirect_lighting_nee ? 1 : 0); fprintf(stderr, " indirect-bsdf : %u\n", m_options.indirect_lighting_bsdf ? 1 : 0); fprintf(stderr, " visible-lights : %u\n", m_options.visible_lights ? 1 : 0); fprintf(stderr, " light-tracing : %u\n", m_options.light_tracing ? 1 : 0); // compute the number of light paths const uint32 n_light_paths = n_pixels; const uint32 n_chains = m_options.n_chains; fprintf(stderr, " creatign mesh lights... started\n"); // initialize the mesh lights sampler renderer.m_mesh_lights.init( n_light_paths, renderer.m_mesh.view(), renderer.m_mesh_d.view(), renderer.m_texture_views_h.ptr(), renderer.m_texture_views_d.ptr() ); fprintf(stderr, " creatign mesh lights... done\n"); const uint32 queue_size = std::max(n_pixels, n_light_paths) * (2 + m_options.max_path_length); // pre-alloc all buffers m_queues.alloc(n_pixels, n_light_paths, m_options.max_path_length); // build the set of shifts const uint32 n_dimensions = (m_options.max_path_length + 1) * 2 * 6; fprintf(stderr, " initializing sampler: %u dimensions\n", n_dimensions); m_sequence.setup(n_dimensions, SHIFT_RES); fprintf(stderr, " allocating light vertex storage... started (%u paths, %u vertices)\n", n_light_paths, n_light_paths * m_options.max_path_length); m_light_vertices.alloc(n_light_paths, n_light_paths * m_options.max_path_length); fprintf(stderr, " allocating light vertex storage... done\n"); if (m_options.use_vpls) cudaMemcpy(m_light_vertices.vertex.ptr(), cugar::raw_pointer(renderer.m_mesh_lights.vpls), sizeof(VPL)*n_light_paths, cudaMemcpyDeviceToDevice); fprintf(stderr, " allocating bpt connections... started\n"); #if SINGLE_CONNECTION m_connections_value.alloc(n_pixels * (m_options.max_path_length + 1)); m_connections_index.alloc(n_pixels * (m_options.max_path_length + 1)); m_connections_cdf.alloc(n_pixels * (m_options.max_path_length + 1)); #else m_connections_value.alloc(n_pixels * m_options.max_path_length * (m_options.max_path_length + 1)); m_connections_index.alloc(n_pixels * m_options.max_path_length * (m_options.max_path_length + 1)); m_connections_cdf.alloc(n_pixels * m_options.max_path_length * (m_options.max_path_length + 1)); #endif m_connections_counter.alloc(1); m_seeds.alloc(n_chains); fprintf(stderr, " allocating bpt connections... done\n"); fprintf(stderr, " allocating chain storage... started\n"); m_mut_u.alloc(n_chains*((m_options.max_path_length + 1) * 3 * 2 + 2)); m_light_u.alloc(n_chains*(m_options.max_path_length + 1) * 3); m_eye_u.alloc(n_chains*(m_options.max_path_length + 1) * 3); m_path_value.alloc(n_chains * 2); m_path_pdf.alloc(n_chains * 2); m_rejections.alloc(n_chains + 1); m_vertices.alloc(n_chains*(m_options.max_path_length + 1) * 4); m_st_counters.alloc((m_options.max_path_length + 2)*(m_options.max_path_length + 2)); m_st_norms.alloc((m_options.max_path_length + 2)*(m_options.max_path_length + 2)); m_st_norms_cdf.alloc((m_options.max_path_length + 2)*(m_options.max_path_length + 2)); fprintf(stderr, " allocating chain storage... done\n"); m_n_init_light_paths = n_light_paths; m_n_init_paths = n_pixels; } struct max_comp_functor { typedef float4 argument_type; typedef float result_type; FERMAT_HOST_DEVICE float operator() (const argument_type c) { return cugar::max3(c.x, c.y, c.z); } }; // sample seed paths from the stored bidirectional connections // void CMLT::sample_seeds(const uint32 n_chains) { cugar::device_vector<uint8> temp_storage; // compute the connections CDF cugar::inclusive_scan<cugar::device_tag>( m_n_connections, thrust::make_transform_iterator(m_connections_value.ptr(), max_comp_functor()), m_connections_cdf.ptr(), thrust::plus<float>(), temp_storage); // zero out the stats cudaMemset(m_st_counters.ptr(), 0x00, sizeof(uint32)*(m_options.max_path_length + 2)*(m_options.max_path_length + 2)); // resample n_chains of them ::sample_seeds(m_random.next(), m_n_connections, m_connections_cdf.ptr(), m_connections_index.ptr(), n_chains, m_seeds.ptr(), m_st_counters.ptr(), m_options.max_path_length); // and sort them cugar::radix_sort<cugar::device_tag>(n_chains, m_seeds.ptr(), temp_storage); m_image_brightness = m_connections_cdf[m_n_connections - 1] / float(m_n_init_paths); } // recover primary sample space coordinates of the sampled light and eye subpaths // void CMLT::recover_primary_coordinates(CMLTContext& context, RendererView& renderer_view) { ::recover_primary_coordinates(context.n_chains, m_seeds.ptr(), m_n_init_light_paths, context, renderer_view ); } // build a CDF on st_norms void CMLT::build_st_norms_cdf() { float st_norms[1024]; float st_norms_cdf[1024] = { 0 }; cudaMemcpy(st_norms, m_st_norms.ptr(), sizeof(float)*(m_options.max_path_length + 2)*(m_options.max_path_length + 2), cudaMemcpyDeviceToHost); for (uint32 k = 1; k <= m_options.max_path_length; ++k) { // consider all the (k+2) paths of length k for (uint32 s = 0; s < k + 2; ++s) { const uint32 t = k + 1 - s; const float norm = st_norms[s + t*(m_options.max_path_length + 2)] / float(m_n_init_paths); st_norms_cdf[k * (m_options.max_path_length + 2) + s] = norm; } // compute the cumulative sum for (uint32 i = 1; i < k + 2; ++i) st_norms_cdf[k * (m_options.max_path_length + 2) + i] += st_norms_cdf[k * (m_options.max_path_length + 2) + i - 1]; // and normalize it const float inv_sum = 1.0f / st_norms_cdf[k * (m_options.max_path_length + 2) + k + 1]; for (uint32 i = 0; i < k + 2; ++i) st_norms_cdf[k * (m_options.max_path_length + 2) + i] *= inv_sum; //for (uint32 i = 0; i < k + 2; ++i) // fprintf(stderr, "cdf[%u][%u] = %f\n", k, i, st_norms_cdf[k * (m_options.max_path_length + 2) + i]); //fgetc(stdin); } cudaMemcpy(m_st_norms_cdf.ptr(), st_norms_cdf, sizeof(float)*(m_options.max_path_length + 2)*(m_options.max_path_length + 2), cudaMemcpyHostToDevice); } void CMLT::render(const uint32 instance, Renderer& renderer) { // clear the global timer at instance zero if (instance == 0) m_time = 0.0f; // fetch the renderer view RendererView renderer_view = renderer.view(instance); // pre-multiply the previous frame for blending renderer.multiply_frame(float(instance) / float(instance + 1)); cugar::Timer timer; float chart_swap_time = 0.0f; float presampling_time = 0.0f; float seed_resampling_time = 0.0f; const uint2 res = renderer.res(); const uint32 n_pixels = res.x * res.y; const uint32 n_chains = m_options.n_chains; const uint32 chain_length = (m_options.spp * n_pixels) / n_chains; // initialize the sampling sequence for this frame m_sequence.set_instance(instance); // setup our BPT context CMLTContext context(*this, renderer_view); bool do_reseeding = (instance % m_options.reseeding) == 0; // perform the BPT presampling pass if (do_reseeding) { timer.start(); // reset the connections counter cudaMemset(context.connections_counter, 0x00, sizeof(uint32)); // zero out the stats cudaMemset(m_st_norms.ptr(), 0x00, sizeof(float)*(context.options.max_path_length + 2)*(context.options.max_path_length + 2)); TiledLightSubpathPrimaryCoords light_primary_coords(context.sequence); PerPixelEyeSubpathPrimaryCoords eye_primary_coords(context.sequence, renderer.m_res_x, renderer.m_res_y); CMLTPresamplingBPTConfig config(context); ConnectionsSink connections_sink; bpt::sample_paths( m_n_init_paths, m_n_init_light_paths, eye_primary_coords, light_primary_coords, connections_sink, context, config, renderer, renderer_view); timer.stop(); presampling_time = timer.seconds(); timer.start(); // fetch the number of connections we found cudaMemcpy(&m_n_connections, context.connections_counter, sizeof(uint32), cudaMemcpyDeviceToHost); //fprintf(stderr, " n_connections: %u\n", m_n_connections); // exit if we didn't find any valid path if (m_n_connections == 0) return; sample_seeds(n_chains); // print out the chain stats if (instance == 0) { fprintf(stderr, " image brightness: %f\n", m_image_brightness); fprintf(stderr, " st chains\n"); uint32 st_counters[1024]; float st_norms[1024]; cudaMemcpy(st_counters, m_st_counters.ptr(), sizeof(uint32)*(m_options.max_path_length + 2)*(m_options.max_path_length + 2), cudaMemcpyDeviceToHost); cudaMemcpy(st_norms, m_st_norms.ptr(), sizeof(float)*(m_options.max_path_length + 2)*(m_options.max_path_length + 2), cudaMemcpyDeviceToHost); for (uint32 s = 0; s < m_options.max_path_length; ++s) { for (uint32 t = 0; t < m_options.max_path_length + 2; ++t) { if (s + t <= m_options.max_path_length + 1) fprintf(stderr, " [%u,%u] : %7u, %f\n", s, t, st_counters[s + t*(m_options.max_path_length + 2)], st_norms[s + t*(m_options.max_path_length + 2)] / float(m_n_init_paths)); } } } // build a CDF on st_norms build_st_norms_cdf(); // setup the normalization constant context.pdf_norm = m_image_brightness * float(res.x * res.y) / float((chain_length - context.options.startup_skips) * n_chains); // recover the primary coordinates of the selected seed paths recover_primary_coordinates(context, renderer_view); CUDA_CHECK(cugar::cuda::sync_and_check_error("recover primary coordinates")); // initialize the initial path pdfs to zero cudaMemset(context.path_pdf, 0x00, sizeof(float)*n_chains); // initialize the rejection counters to zero cudaMemset(context.rejections, 0x00, sizeof(uint32)*n_chains); timer.stop(); seed_resampling_time = timer.seconds(); } timer.start(); // initialize the random generator DeviceRandomSequence sequence( cugar::hash(instance) ); for (uint32 l = 0; l < chain_length; ++l) { // disable any kind of mutations for the first step, and enable them later on context.mutation_type = (l == 0 && do_reseeding) ? PerturbedPrimaryCoords::Null : PerturbedPrimaryCoords::CauchyPerturbation; // enable accumulation only after a few steps context.enable_accumulation = (l >= context.options.startup_skips) ? true : false; if (!do_reseeding || l > 0) { // generate all the random numbers needed for mutations sequence.next(n_chains*((context.options.max_path_length + 1) * 2 * 3 + 2), context.mut_u); } if (m_options.mmlt_frequency && l && ((l % m_options.mmlt_frequency) == 0)) { // set all mutation components to zero, in order to keep path coordinates unchanged (being careful to leave the one used for the acceptance/rejection test out) //cudaMemset(context.mut_u, 0x00, n_chains*((m_options.max_path_length + 1) * 3)); // propose a swap mmlt_swap(context, renderer_view); } // reset the output queue counters cudaMemset(context.shadow_queue.size, 0x00, sizeof(uint32)); cudaMemset(context.scatter_queue.size, 0x00, sizeof(uint32)); // sample a set of bidirectional paths corresponding to our current primary coordinates CMLTChainBPTConfig config(context); PerturbedPrimaryCoords light_primary_coords = context.light_primary_coords(); PerturbedPrimaryCoords eye_primary_coords = context.eye_primary_coords(); AccumulateRejectSink accept_reject_sink; // initialize the initial new path values to zero cudaMemset(context.new_path_value, 0x00, sizeof(float4)*n_chains); bpt::sample_paths( context.n_chains, context.n_chains, eye_primary_coords, light_primary_coords, accept_reject_sink, context, config, renderer, renderer_view, true); // lazy shadows accept_reject_mlt( context, renderer_view ); if (m_options.swap_frequency && l && ((l % m_options.swap_frequency) == 0)) { cugar::ScopedTimer<float> chart_swap_timer( &chart_swap_time ); // generate all the random numbers needed for mutations sequence.next(n_chains*((m_options.max_path_length + 1) * 3 * 2 + 2), context.mut_u); // perform a technique swap chart_swap(context, renderer_view); CUDA_CHECK(cugar::cuda::sync_and_check_error("technique swap")); } } uint32 max_iter_stuck = 0; cudaMemcpy(&max_iter_stuck, context.rejections + n_chains, sizeof(uint32), cudaMemcpyDeviceToHost); timer.stop(); const float mlt_time = timer.seconds(); m_time += presampling_time + seed_resampling_time + mlt_time; fprintf(stderr, "\r %.1fs (%.1fms = init: %.1fms, seed: %.1fms, mut: %.1fms, c-swaps: %.1fms, stuck: %u) ", m_time, (presampling_time + seed_resampling_time + mlt_time) * 1000.0f, presampling_time * 1000.0f, seed_resampling_time * 1000.0f, mlt_time * 1000.0f, chart_swap_time * 1000.0f, max_iter_stuck); }
4e9a996cb521ec113b2fcd1bdff868faa558171d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <hip/hip_runtime_api.h> //#include <cutil.h> #include <hip/hip_runtime.h> #include <string> #define SHARED_MEM_ELEMENTS 1024 #define GLOBAL_MEM_ELEMENTS 5660800 // #define GLOBAL_MEM_ELEMENTS 566080 // #define GLOBAL_MEM_ELEMENTS 19660800 int num_blocks; int num_threads_per_block; int num_iterations; int divergence; float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) { int block_id; int warp_id; int i; int index; int tid = blockDim.x * blockIdx.x + threadIdx.x; void **ptr_array = (void **)my_ptr_array; unsigned long long *array = (unsigned long long *)my_array; if (tid == 0) { int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; int elements_per_warp = elements_per_block / num_warps_per_block; // int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; for (block_id = 0; block_id < num_blocks_k; block_id++) { for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) { for (i = 0; i < elements_per_warp; i++) { index = (block_id * elements_per_block) + (warp_id * elements_per_warp); // index = (warp_id * elements_per_warp); ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))]; } } } /* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS]; } */ for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { //array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS]; array[i] = (unsigned long long)ptr_array[i]; } } __syncthreads(); } __global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) { // unsigned long long int start_time, end_time; unsigned long long int sum_time = 0; int i, k; int tid = blockDim.x * blockIdx.x + threadIdx.x; int block_id = blockIdx.x; int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x % 32; int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; int elements_per_warp = elements_per_block / num_warps_per_block; // int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id; // int index1 = (warp_id * elements_per_warp) + warp_thread_id; void **ptr_array = (void **)my_ptr_array; unsigned long long int *array = (unsigned long long int *)my_array; void **tmp_ptr; //tmp_ptr = (void *)sdata; //tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[index1])); tmp_ptr = (void **)(&(array[index1])); double f1, f2, f3; f1 = 1.1; f2 = 2.5; if (warp_thread_id < divergence) { /* __asm volatile ( ".reg .f32 %r14;\n\t" "mov.f32 %r14, 2.2;\n\t" ); */ for (int l = 0; l < iterations; l++) { f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid); // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } void parametric_measure_shared(int N, int iterations, int stride) { hipProfilerStop(); int i; unsigned long long int * h_a; unsigned long long int * d_a; unsigned long long ** h_ptr_a; unsigned long long ** d_ptr_a; unsigned long long * duration; unsigned long long * latency; hipError_t error_id; /* allocate array on CPU */ h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N); h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N); latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks); /* initialize array elements on CPU */ for (i = 0; i < N; i++) { h_ptr_a[i] = (unsigned long long *)&h_a[i]; } for (i = 0; i < N; i++) { h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N]; } /* allocate arrays on GPU */ hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N ); hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N ); hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks); hipDeviceSynchronize (); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 1 is %s\n", hipGetErrorString(error_id)); } /* copy array elements from CPU to GPU */ hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice); hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice); hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice); hipDeviceSynchronize (); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 2 is %s\n", hipGetErrorString(error_id)); } hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block); hipDeviceSynchronize(); /* launch kernel*/ //dim3 Db = dim3(13); //dim3 Dg = dim3(768,1,1); //printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); // int sharedMemSize = sizeof(unsigned long long int) * N ; hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipProfilerStart(); hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1); //shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration); //shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence); hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block); hipDeviceSynchronize(); ///hipDeviceSynchronize (); hipProfilerStop(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 3 is %s\n", hipGetErrorString(error_id)); } /* copy results from GPU to CPU */ hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost); hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost); hipDeviceSynchronize (); /* print results*/ unsigned long long max_dur = latency[0]; unsigned long long min_dur = latency[0]; unsigned long long avg_lat = latency[0]; for (int i = 1; i < num_threads_per_block * num_blocks; i++) { avg_lat += latency[i]; if (latency[i] > max_dur) { max_dur = latency[i]; } else if (latency[i] < min_dur) { min_dur = latency[i]; } } // printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time); printf("%f\n", time); /* free memory on GPU */ hipFree(d_a); hipFree(d_ptr_a); hipFree(duration); hipDeviceSynchronize (); /*free memory on CPU */ free(h_a); free(h_ptr_a); free(latency); } int main(int argc, char **argv) { int N; if (argc != 6) { usage(); exit(1); } num_blocks = atoi(argv[1]); num_threads_per_block = atoi(argv[2]); num_iterations = atoi(argv[3]); divergence = atoi(argv[4]); int stride = atoi(argv[5]); N = GLOBAL_MEM_ELEMENTS; parametric_measure_shared(N, 10, stride); return 0; }
4e9a996cb521ec113b2fcd1bdff868faa558171d.cu
#include <stdio.h> #include <iostream> #include <cuda_profiler_api.h> //#include <cutil.h> #include <cuda_runtime.h> #include <string> #define SHARED_MEM_ELEMENTS 1024 #define GLOBAL_MEM_ELEMENTS 5660800 // #define GLOBAL_MEM_ELEMENTS 566080 // #define GLOBAL_MEM_ELEMENTS 19660800 int num_blocks; int num_threads_per_block; int num_iterations; int divergence; float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) { int block_id; int warp_id; int i; int index; int tid = blockDim.x * blockIdx.x + threadIdx.x; void **ptr_array = (void **)my_ptr_array; unsigned long long *array = (unsigned long long *)my_array; if (tid == 0) { int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; int elements_per_warp = elements_per_block / num_warps_per_block; // int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; for (block_id = 0; block_id < num_blocks_k; block_id++) { for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) { for (i = 0; i < elements_per_warp; i++) { index = (block_id * elements_per_block) + (warp_id * elements_per_warp); // index = (warp_id * elements_per_warp); ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))]; } } } /* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS]; } */ for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { //array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS]; array[i] = (unsigned long long)ptr_array[i]; } } __syncthreads(); } __global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) { // unsigned long long int start_time, end_time; unsigned long long int sum_time = 0; int i, k; int tid = blockDim.x * blockIdx.x + threadIdx.x; int block_id = blockIdx.x; int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x % 32; int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; int elements_per_warp = elements_per_block / num_warps_per_block; // int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id; // int index1 = (warp_id * elements_per_warp) + warp_thread_id; void **ptr_array = (void **)my_ptr_array; unsigned long long int *array = (unsigned long long int *)my_array; void **tmp_ptr; //tmp_ptr = (void *)sdata; //tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[index1])); tmp_ptr = (void **)(&(array[index1])); double f1, f2, f3; f1 = 1.1; f2 = 2.5; if (warp_thread_id < divergence) { /* __asm volatile ( ".reg .f32 %r14;\n\t" "mov.f32 %r14, 2.2;\n\t" ); */ for (int l = 0; l < iterations; l++) { f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid); // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } void parametric_measure_shared(int N, int iterations, int stride) { cudaProfilerStop(); int i; unsigned long long int * h_a; unsigned long long int * d_a; unsigned long long ** h_ptr_a; unsigned long long ** d_ptr_a; unsigned long long * duration; unsigned long long * latency; cudaError_t error_id; /* allocate array on CPU */ h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N); h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N); latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks); /* initialize array elements on CPU */ for (i = 0; i < N; i++) { h_ptr_a[i] = (unsigned long long *)&h_a[i]; } for (i = 0; i < N; i++) { h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N]; } /* allocate arrays on GPU */ cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N ); cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N ); cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 1 is %s\n", cudaGetErrorString(error_id)); } /* copy array elements from CPU to GPU */ cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice); cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice); cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 2 is %s\n", cudaGetErrorString(error_id)); } init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block); cudaDeviceSynchronize(); /* launch kernel*/ //dim3 Db = dim3(13); //dim3 Dg = dim3(768,1,1); //printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); // int sharedMemSize = sizeof(unsigned long long int) * N ; cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaProfilerStart(); cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1); //shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration); //shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence); shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block); cudaDeviceSynchronize(); ///cudaThreadSynchronize (); cudaProfilerStop(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 3 is %s\n", cudaGetErrorString(error_id)); } /* copy results from GPU to CPU */ cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost); cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost); cudaThreadSynchronize (); /* print results*/ unsigned long long max_dur = latency[0]; unsigned long long min_dur = latency[0]; unsigned long long avg_lat = latency[0]; for (int i = 1; i < num_threads_per_block * num_blocks; i++) { avg_lat += latency[i]; if (latency[i] > max_dur) { max_dur = latency[i]; } else if (latency[i] < min_dur) { min_dur = latency[i]; } } // printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time); printf("%f\n", time); /* free memory on GPU */ cudaFree(d_a); cudaFree(d_ptr_a); cudaFree(duration); cudaThreadSynchronize (); /*free memory on CPU */ free(h_a); free(h_ptr_a); free(latency); } int main(int argc, char **argv) { int N; if (argc != 6) { usage(); exit(1); } num_blocks = atoi(argv[1]); num_threads_per_block = atoi(argv[2]); num_iterations = atoi(argv[3]); divergence = atoi(argv[4]); int stride = atoi(argv[5]); N = GLOBAL_MEM_ELEMENTS; parametric_measure_shared(N, 10, stride); return 0; }
3d2649f79009eb6682b484b5221dd3fe24e32ad3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file main.cu * \brief Kernels to study the effects of coalescing on misaligned and strided * memory operations. * * Invocation: * ./coalescing <operation> <size> * * Parameters: * - operation: 'offset' or 'strided'. * - size: size of the vector in megabytes */ #include <iostream> #include <string> #include <vector> #include "common.h" /** * \brief Do some computation with element pointed by \p A. */ template <typename T> __device__ void do_operation(T *A) { *A = *A + 1; } /** * \brief CUDA kernel which access misaligned memory positions. */ template <typename T> __global__ void offset_access(T *A, const size_t offset) { int i = blockDim.x * blockIdx.x + threadIdx.x + offset; do_operation(&A[i]); } /** * \brief CUDA kernel which access strided memory positions. */ template <typename T> __global__ void stride_access(T *A, const size_t stride) { int i = (blockDim.x * blockIdx.x + threadIdx.x) * stride; do_operation(&A[i]); } /** * \brief Test GPU misaligned access. * * \param mbs size of the buffer in megabytes */ template <typename T> int test_offset(size_t mbs) { float ms, mean_time; hipEvent_t start, stop; T *d_A; // Create events CUDACHECK(hipEventCreate(&start)); CUDACHECK(hipEventCreate(&stop)); // Get the actual size in number of elements size_t N = (mbs * 1024 * 1024) / sizeof(T); // Allocate host and device data // Needs 33 times more data because of far strides CUDACHECK(hipMalloc(&d_A, sizeof(T) * N * 33)); CUDACHECK(hipMemset(d_A, 0.0, N * sizeof(T))); // Vary offset, execute kernel and measure time for (int offset = 0; offset <= 32; offset++) { mean_time = 0.0f; // Sample the kernel a couple of times for (int i = 0; i < 10; i++) { CUDACHECK(hipEventRecord(start)); hipLaunchKernelGGL(( offset_access), dim3((N + 255) / 256), dim3(256), 0, 0, d_A, offset); CUDACHECK(hipEventRecord(stop)); CUDACHECK(hipEventSynchronize(stop)); CUDACHECK(hipEventElapsedTime(&ms, start, stop)); mean_time += ms; } // Take the average of 10 runs mean_time /= 10; // Calculate effective bandwidth auto ebw = 2 * mbs / mean_time; // Output to stdout std::printf(" %2d\t%6.4f\t%6.2f\n", offset, mean_time, ebw); } // Cleanup CUDACHECK(hipFree(d_A)); CUDACHECK(hipEventDestroy(start)); CUDACHECK(hipEventDestroy(stop)); return EXIT_SUCCESS; } /** * \brief Test GPU strided access. * * \param mbs size of the buffer in megabytes */ template <typename T> int test_stride(size_t mbs) { float ms, mean_time; hipEvent_t start, stop; T *d_A; // Create events CUDACHECK(hipEventCreate(&start)); CUDACHECK(hipEventCreate(&stop)); // Get the actual size in number of elements size_t N = (mbs * 1024 * 1024) / sizeof(T); // Allocate host and device data // Needs 33 times more data because of far strides CUDACHECK(hipMalloc(&d_A, sizeof(T) * N * 33)); CUDACHECK(hipMemset(d_A, 0.0, N * sizeof(T))); // Vary stride, execute kernel and measure time for (int stride = 1; stride <= 32; stride++) { mean_time = 0.0f; // Sample the kernel a couple of times for (int i = 0; i < 10; i++) { CUDACHECK(hipEventRecord(start)); hipLaunchKernelGGL(( stride_access), dim3((N + 255) / 256), dim3(256), 0, 0, d_A, stride); CUDACHECK(hipEventRecord(stop)); CUDACHECK(hipEventSynchronize(stop)); CUDACHECK(hipEventElapsedTime(&ms, start, stop)); mean_time += ms; } // Take the average of 10 runs mean_time /= 10; // Calculate effective bandwidth auto ebw = 2 * mbs / mean_time; // Output to stdout std::printf(" %2d\t%6.4f\t%6.2f\n", stride, mean_time, ebw); } // Cleanup CUDACHECK(hipFree(d_A)); CUDACHECK(hipEventDestroy(start)); CUDACHECK(hipEventDestroy(stop)); return EXIT_SUCCESS; } /** * \brief Profile stride kernel. */ template <typename T> int prof_stride(size_t mbs, int stride) { auto N = mbs * 1024 * 1024 / sizeof(T); T *d_A; CUDACHECK(hipMalloc(&d_A, N * sizeof(T) * 33)); CUDACHECK(hipMemset(d_A, 0, N * sizeof(T) * 33)); hipLaunchKernelGGL(( stride_access), dim3((N + 255) / 256), dim3(256), 0, 0, d_A, stride); CUDACHECK(hipDeviceSynchronize()); CUDACHECK(hipFree(d_A)); return EXIT_SUCCESS; } /** * \brief Program entry-point. */ int main(int argc, char **argv) { if (argc < 3) { std::fprintf(stderr, "Error: two command-line parameters expected!\n"); std::exit(EXIT_FAILURE); } std::string type = argv[1]; size_t N = atol(argv[2]); hipDeviceProp_t prop; CUDACHECK(hipGetDeviceProperties(&prop, 0)); std::fprintf(stderr, "Device name: %s\n", prop.name); if (type == "offset") return test_offset<float>(N); if (type == "stride") return test_stride<float>(N); if (type == "prof-stride") return prof_stride<float>(N, (argc >= 4) ? atoi(argv[3]) : 1); std::fprintf(stderr, "Error: Unrecognized operation.\n"); return EXIT_FAILURE; }
3d2649f79009eb6682b484b5221dd3fe24e32ad3.cu
/** * \file main.cu * \brief Kernels to study the effects of coalescing on misaligned and strided * memory operations. * * Invocation: * ./coalescing <operation> <size> * * Parameters: * - operation: 'offset' or 'strided'. * - size: size of the vector in megabytes */ #include <iostream> #include <string> #include <vector> #include "common.h" /** * \brief Do some computation with element pointed by \p A. */ template <typename T> __device__ void do_operation(T *A) { *A = *A + 1; } /** * \brief CUDA kernel which access misaligned memory positions. */ template <typename T> __global__ void offset_access(T *A, const size_t offset) { int i = blockDim.x * blockIdx.x + threadIdx.x + offset; do_operation(&A[i]); } /** * \brief CUDA kernel which access strided memory positions. */ template <typename T> __global__ void stride_access(T *A, const size_t stride) { int i = (blockDim.x * blockIdx.x + threadIdx.x) * stride; do_operation(&A[i]); } /** * \brief Test GPU misaligned access. * * \param mbs size of the buffer in megabytes */ template <typename T> int test_offset(size_t mbs) { float ms, mean_time; cudaEvent_t start, stop; T *d_A; // Create events CUDACHECK(cudaEventCreate(&start)); CUDACHECK(cudaEventCreate(&stop)); // Get the actual size in number of elements size_t N = (mbs * 1024 * 1024) / sizeof(T); // Allocate host and device data // Needs 33 times more data because of far strides CUDACHECK(cudaMalloc(&d_A, sizeof(T) * N * 33)); CUDACHECK(cudaMemset(d_A, 0.0, N * sizeof(T))); // Vary offset, execute kernel and measure time for (int offset = 0; offset <= 32; offset++) { mean_time = 0.0f; // Sample the kernel a couple of times for (int i = 0; i < 10; i++) { CUDACHECK(cudaEventRecord(start)); offset_access<<<(N + 255) / 256, 256>>>(d_A, offset); CUDACHECK(cudaEventRecord(stop)); CUDACHECK(cudaEventSynchronize(stop)); CUDACHECK(cudaEventElapsedTime(&ms, start, stop)); mean_time += ms; } // Take the average of 10 runs mean_time /= 10; // Calculate effective bandwidth auto ebw = 2 * mbs / mean_time; // Output to stdout std::printf(" %2d\t%6.4f\t%6.2f\n", offset, mean_time, ebw); } // Cleanup CUDACHECK(cudaFree(d_A)); CUDACHECK(cudaEventDestroy(start)); CUDACHECK(cudaEventDestroy(stop)); return EXIT_SUCCESS; } /** * \brief Test GPU strided access. * * \param mbs size of the buffer in megabytes */ template <typename T> int test_stride(size_t mbs) { float ms, mean_time; cudaEvent_t start, stop; T *d_A; // Create events CUDACHECK(cudaEventCreate(&start)); CUDACHECK(cudaEventCreate(&stop)); // Get the actual size in number of elements size_t N = (mbs * 1024 * 1024) / sizeof(T); // Allocate host and device data // Needs 33 times more data because of far strides CUDACHECK(cudaMalloc(&d_A, sizeof(T) * N * 33)); CUDACHECK(cudaMemset(d_A, 0.0, N * sizeof(T))); // Vary stride, execute kernel and measure time for (int stride = 1; stride <= 32; stride++) { mean_time = 0.0f; // Sample the kernel a couple of times for (int i = 0; i < 10; i++) { CUDACHECK(cudaEventRecord(start)); stride_access<<<(N + 255) / 256, 256>>>(d_A, stride); CUDACHECK(cudaEventRecord(stop)); CUDACHECK(cudaEventSynchronize(stop)); CUDACHECK(cudaEventElapsedTime(&ms, start, stop)); mean_time += ms; } // Take the average of 10 runs mean_time /= 10; // Calculate effective bandwidth auto ebw = 2 * mbs / mean_time; // Output to stdout std::printf(" %2d\t%6.4f\t%6.2f\n", stride, mean_time, ebw); } // Cleanup CUDACHECK(cudaFree(d_A)); CUDACHECK(cudaEventDestroy(start)); CUDACHECK(cudaEventDestroy(stop)); return EXIT_SUCCESS; } /** * \brief Profile stride kernel. */ template <typename T> int prof_stride(size_t mbs, int stride) { auto N = mbs * 1024 * 1024 / sizeof(T); T *d_A; CUDACHECK(cudaMalloc(&d_A, N * sizeof(T) * 33)); CUDACHECK(cudaMemset(d_A, 0, N * sizeof(T) * 33)); stride_access<<<(N + 255) / 256, 256>>>(d_A, stride); CUDACHECK(cudaDeviceSynchronize()); CUDACHECK(cudaFree(d_A)); return EXIT_SUCCESS; } /** * \brief Program entry-point. */ int main(int argc, char **argv) { if (argc < 3) { std::fprintf(stderr, "Error: two command-line parameters expected!\n"); std::exit(EXIT_FAILURE); } std::string type = argv[1]; size_t N = atol(argv[2]); cudaDeviceProp prop; CUDACHECK(cudaGetDeviceProperties(&prop, 0)); std::fprintf(stderr, "Device name: %s\n", prop.name); if (type == "offset") return test_offset<float>(N); if (type == "stride") return test_stride<float>(N); if (type == "prof-stride") return prof_stride<float>(N, (argc >= 4) ? atoi(argv[3]) : 1); std::fprintf(stderr, "Error: Unrecognized operation.\n"); return EXIT_FAILURE; }
04b23965299c273cf69fec3327c77fbf84e252ef.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 32, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
04b23965299c273cf69fec3327c77fbf84e252ef.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 32, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
375fac79a672849991ffb75424db16058a23e6e0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include "cuda_utils.h" #include "timer.c" #define TILE_SIZE (32 * 32) typedef float dtype; __global__ void matTrans(dtype* AT, dtype* A, int N) { /* Fill your code here */ int tile_dim = 32; int block_row = 8; int x = blockIdx.x * tile_dim + threadIdx.x; int y = blockIdx.y * tile_dim + threadIdx.y; int width = gridDim.x * tile_dim; for (int j = 0; j < tile_dim; j+= block_row) { AT[x*width + (y+j)]= A[(y+j) * width + x]; } } void parseArg (int argc, char** argv, int* N) { if(argc == 2) { *N = atoi (argv[1]); assert (*N > 0); } else { fprintf (stderr, "usage: %s <N>\n", argv[0]); exit (EXIT_FAILURE); } } void initArr (dtype* in, int N) { int i; for(i = 0; i < N; i++) { in[i] = (dtype) rand () / RAND_MAX; } } void cpuTranspose (dtype* A, dtype* AT, int N) { int i, j; for(i = 0; i < N; i++) { for(j = 0; j < N; j++) { AT[j * N + i] = A[i * N + j]; } } } int cmpArr (dtype* a, dtype* b, int N) { int cnt, i; cnt = 0; for(i = 0; i < N; i++) { if(abs(a[i] - b[i]) > 1e-6) cnt++; } return cnt; } void gpuTranspose (dtype* A, dtype* AT, int N) { struct stopwatch_t* timer = NULL; long double t_gpu; dtype *i_data, *o_data; //input data and outdata int TILE_DIM = 32; //defining the block and number of threads dim3 gb(N/TILE_DIM, N/TILE_DIM, 1); dim3 tb(TILE_DIM, 8, 1); //Allocating the memory for the input and output matrix CUDA_CHECK_ERROR(hipMalloc(&i_data, N*N*sizeof(dtype))); CUDA_CHECK_ERROR(hipMalloc(&o_data, N*N*sizeof(dtype))); CUDA_CHECK_ERROR(hipMemcpy(i_data, A, N*N*sizeof(dtype), hipMemcpyHostToDevice)); /* Setup timers */ stopwatch_init (); timer = stopwatch_create (); stopwatch_start (timer); /* run your kernel here */ hipLaunchKernelGGL(( matTrans) , dim3(gb), dim3(tb), 0, 0, o_data, i_data, N); hipDeviceSynchronize (); t_gpu = stopwatch_stop (timer); fprintf (stderr, "GPU transpose: %Lg secs ==> %Lg billion elements/second\n", t_gpu, (N * N) / t_gpu * 1e-9 ); CUDA_CHECK_ERROR(hipMemcpy (AT, o_data, N*N*sizeof(dtype),hipMemcpyDeviceToHost)); } int main(int argc, char** argv) { /* variables */ dtype *A, *ATgpu, *ATcpu; int err; int N; struct stopwatch_t* timer = NULL; long double t_cpu; N = -1; parseArg (argc, argv, &N); /* input and output matrices on host */ /* output */ ATcpu = (dtype*) malloc (N * N * sizeof (dtype)); ATgpu = (dtype*) malloc (N * N * sizeof (dtype)); /* input */ A = (dtype*) malloc (N * N * sizeof (dtype)); initArr (A, N * N); /* GPU transpose kernel */ gpuTranspose (A, ATgpu, N); /* Setup timers */ stopwatch_init (); timer = stopwatch_create (); stopwatch_start (timer); /* compute reference array */ cpuTranspose (A, ATcpu, N); t_cpu = stopwatch_stop (timer); fprintf (stderr, "Time to execute CPU transpose kernel: %Lg secs\n", t_cpu); /* check correctness */ err = cmpArr (ATgpu, ATcpu, N * N); if(err) { fprintf (stderr, "Transpose failed: %d\n", err); } else { fprintf (stderr, "Transpose successful\n"); } free (A); free (ATgpu); free (ATcpu); return 0; }
375fac79a672849991ffb75424db16058a23e6e0.cu
#include <stdlib.h> #include <stdio.h> #include "cuda_utils.h" #include "timer.c" #define TILE_SIZE (32 * 32) typedef float dtype; __global__ void matTrans(dtype* AT, dtype* A, int N) { /* Fill your code here */ int tile_dim = 32; int block_row = 8; int x = blockIdx.x * tile_dim + threadIdx.x; int y = blockIdx.y * tile_dim + threadIdx.y; int width = gridDim.x * tile_dim; for (int j = 0; j < tile_dim; j+= block_row) { AT[x*width + (y+j)]= A[(y+j) * width + x]; } } void parseArg (int argc, char** argv, int* N) { if(argc == 2) { *N = atoi (argv[1]); assert (*N > 0); } else { fprintf (stderr, "usage: %s <N>\n", argv[0]); exit (EXIT_FAILURE); } } void initArr (dtype* in, int N) { int i; for(i = 0; i < N; i++) { in[i] = (dtype) rand () / RAND_MAX; } } void cpuTranspose (dtype* A, dtype* AT, int N) { int i, j; for(i = 0; i < N; i++) { for(j = 0; j < N; j++) { AT[j * N + i] = A[i * N + j]; } } } int cmpArr (dtype* a, dtype* b, int N) { int cnt, i; cnt = 0; for(i = 0; i < N; i++) { if(abs(a[i] - b[i]) > 1e-6) cnt++; } return cnt; } void gpuTranspose (dtype* A, dtype* AT, int N) { struct stopwatch_t* timer = NULL; long double t_gpu; dtype *i_data, *o_data; //input data and outdata int TILE_DIM = 32; //defining the block and number of threads dim3 gb(N/TILE_DIM, N/TILE_DIM, 1); dim3 tb(TILE_DIM, 8, 1); //Allocating the memory for the input and output matrix CUDA_CHECK_ERROR(cudaMalloc(&i_data, N*N*sizeof(dtype))); CUDA_CHECK_ERROR(cudaMalloc(&o_data, N*N*sizeof(dtype))); CUDA_CHECK_ERROR(cudaMemcpy(i_data, A, N*N*sizeof(dtype), cudaMemcpyHostToDevice)); /* Setup timers */ stopwatch_init (); timer = stopwatch_create (); stopwatch_start (timer); /* run your kernel here */ matTrans <<<gb, tb>>> (o_data, i_data, N); cudaThreadSynchronize (); t_gpu = stopwatch_stop (timer); fprintf (stderr, "GPU transpose: %Lg secs ==> %Lg billion elements/second\n", t_gpu, (N * N) / t_gpu * 1e-9 ); CUDA_CHECK_ERROR(cudaMemcpy (AT, o_data, N*N*sizeof(dtype),cudaMemcpyDeviceToHost)); } int main(int argc, char** argv) { /* variables */ dtype *A, *ATgpu, *ATcpu; int err; int N; struct stopwatch_t* timer = NULL; long double t_cpu; N = -1; parseArg (argc, argv, &N); /* input and output matrices on host */ /* output */ ATcpu = (dtype*) malloc (N * N * sizeof (dtype)); ATgpu = (dtype*) malloc (N * N * sizeof (dtype)); /* input */ A = (dtype*) malloc (N * N * sizeof (dtype)); initArr (A, N * N); /* GPU transpose kernel */ gpuTranspose (A, ATgpu, N); /* Setup timers */ stopwatch_init (); timer = stopwatch_create (); stopwatch_start (timer); /* compute reference array */ cpuTranspose (A, ATcpu, N); t_cpu = stopwatch_stop (timer); fprintf (stderr, "Time to execute CPU transpose kernel: %Lg secs\n", t_cpu); /* check correctness */ err = cmpArr (ATgpu, ATcpu, N * N); if(err) { fprintf (stderr, "Transpose failed: %d\n", err); } else { fprintf (stderr, "Transpose successful\n"); } free (A); free (ATgpu); free (ATcpu); return 0; }
5a3d5d35cad8f030029615ec79abbfb3b0b24578.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "sub0.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *div0 = NULL; hipMalloc(&div0, XSIZE*YSIZE); float *div = NULL; hipMalloc(&div, XSIZE*YSIZE); float *g = NULL; hipMalloc(&g, XSIZE*YSIZE); float lambda = 1; int nx = 1; int ny = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( sub0), dim3(gridBlock),dim3(threadBlock), 0, 0, div0,div,g,lambda,nx,ny); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( sub0), dim3(gridBlock),dim3(threadBlock), 0, 0, div0,div,g,lambda,nx,ny); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( sub0), dim3(gridBlock),dim3(threadBlock), 0, 0, div0,div,g,lambda,nx,ny); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5a3d5d35cad8f030029615ec79abbfb3b0b24578.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "sub0.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *div0 = NULL; cudaMalloc(&div0, XSIZE*YSIZE); float *div = NULL; cudaMalloc(&div, XSIZE*YSIZE); float *g = NULL; cudaMalloc(&g, XSIZE*YSIZE); float lambda = 1; int nx = 1; int ny = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); sub0<<<gridBlock,threadBlock>>>(div0,div,g,lambda,nx,ny); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { sub0<<<gridBlock,threadBlock>>>(div0,div,g,lambda,nx,ny); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { sub0<<<gridBlock,threadBlock>>>(div0,div,g,lambda,nx,ny); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
09b04dcb458aa9f78b5e73f7d071fd4ac87f3199.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "HSwish.hpp" #include <cuda_fp16.hpp> static __global__ void hswish_kernel_fp32(float* input, float* output, int edge) { KernelPositionBlock; float x = input[position]; float a = x + 3; a = a < 0 ? 0 : (a >= 6 ? 6 : a); output[position] = x * a / 6; } static __global__ void hswish_kernel_fp16(__half* input, __half* output, int edge) { KernelPositionBlock; __half _six = 6.0f; __half x = input[position]; __half a = x + __half(3.0f); __half _zero = 0.0f; a = a < _zero ? _zero : (a >= _six ? _six : a); output[position] = x * a / _six; } void HSwishConfig::init(){ INFO("init hswish config: %s", info_.c_str()); INFO("weights = %d", this->weights_.size()); for(int i = 0; i < this->weights_.size(); ++i){ auto& w = this->weights_[i]; if(w->type() == TRT::DataType::Float16){ INFO("Weight[%d] shape is %s, dtype = %s, value[0] = %f", i, w->shape_string(), data_type_string(w->type()), float(w->at<__half>(0))); }else{ INFO("Weight[%d] shape is %s, dtype = %s, value[0] = %f", i, w->shape_string(), data_type_string(w->type()), w->at<float>(0)); } } } nvinfer1::DimsExprs HSwish::getOutputDimensions( int32_t outputIndex, const nvinfer1::DimsExprs* inputs, int32_t nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept{ return inputs[0]; } std::shared_ptr<LayerConfig> HSwish::config(const std::string& layerName) { auto cfg = std::shared_ptr<LayerConfig>(new HSwishConfig()); //cfg->supportDataType_ = {nvinfer1::DataType::kHALF, nvinfer1::DataType::kFLOAT}; //cfg->supportDataType_ = {nvinfer1::DataType::kHALF}; cfg->supportDataType_ = {nvinfer1::DataType::kFLOAT}; return cfg; } int HSwish::enqueue(const std::vector<GTensor>& inputs, std::vector<GTensor>& outputs, const std::vector<GTensor>& weights, void* workspace, hipStream_t stream) { int count = inputs[0].count(); auto grid = CUDATools::grid_dims(count); auto block = CUDATools::block_dims(count); if (config_->configDataType_ == TRT::DataType::Float) { INFO("enqueue for float"); hipLaunchKernelGGL(( hswish_kernel_fp32) , dim3(grid), dim3(block), 0, stream , inputs[0].ptr<float>(), outputs[0].ptr<float>(), count); } else if (config_->configDataType_ == TRT::DataType::Float16) { INFO("enqueue for half"); hipLaunchKernelGGL(( hswish_kernel_fp16) , dim3(grid), dim3(block), 0, stream , inputs[0].ptr<__half>(), outputs[0].ptr<__half>(), count); } else{ INFOF("not implement function"); } return 0; } RegisterPlugin(HSwish);
09b04dcb458aa9f78b5e73f7d071fd4ac87f3199.cu
#include "HSwish.hpp" #include <cuda_fp16.hpp> static __global__ void hswish_kernel_fp32(float* input, float* output, int edge) { KernelPositionBlock; float x = input[position]; float a = x + 3; a = a < 0 ? 0 : (a >= 6 ? 6 : a); output[position] = x * a / 6; } static __global__ void hswish_kernel_fp16(__half* input, __half* output, int edge) { KernelPositionBlock; __half _six = 6.0f; __half x = input[position]; __half a = x + __half(3.0f); __half _zero = 0.0f; a = a < _zero ? _zero : (a >= _six ? _six : a); output[position] = x * a / _six; } void HSwishConfig::init(){ INFO("init hswish config: %s", info_.c_str()); INFO("weights = %d", this->weights_.size()); for(int i = 0; i < this->weights_.size(); ++i){ auto& w = this->weights_[i]; if(w->type() == TRT::DataType::Float16){ INFO("Weight[%d] shape is %s, dtype = %s, value[0] = %f", i, w->shape_string(), data_type_string(w->type()), float(w->at<__half>(0))); }else{ INFO("Weight[%d] shape is %s, dtype = %s, value[0] = %f", i, w->shape_string(), data_type_string(w->type()), w->at<float>(0)); } } } nvinfer1::DimsExprs HSwish::getOutputDimensions( int32_t outputIndex, const nvinfer1::DimsExprs* inputs, int32_t nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept{ return inputs[0]; } std::shared_ptr<LayerConfig> HSwish::config(const std::string& layerName) { auto cfg = std::shared_ptr<LayerConfig>(new HSwishConfig()); //cfg->supportDataType_ = {nvinfer1::DataType::kHALF, nvinfer1::DataType::kFLOAT}; //cfg->supportDataType_ = {nvinfer1::DataType::kHALF}; cfg->supportDataType_ = {nvinfer1::DataType::kFLOAT}; return cfg; } int HSwish::enqueue(const std::vector<GTensor>& inputs, std::vector<GTensor>& outputs, const std::vector<GTensor>& weights, void* workspace, cudaStream_t stream) { int count = inputs[0].count(); auto grid = CUDATools::grid_dims(count); auto block = CUDATools::block_dims(count); if (config_->configDataType_ == TRT::DataType::Float) { INFO("enqueue for float"); hswish_kernel_fp32 <<<grid, block, 0, stream >>> (inputs[0].ptr<float>(), outputs[0].ptr<float>(), count); } else if (config_->configDataType_ == TRT::DataType::Float16) { INFO("enqueue for half"); hswish_kernel_fp16 <<<grid, block, 0, stream >>> (inputs[0].ptr<__half>(), outputs[0].ptr<__half>(), count); } else{ INFOF("not implement function"); } return 0; } RegisterPlugin(HSwish);
7863b872d81801568d0cedc1b8bb459dfa0e3861.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <errno.h> #include <omp.h> #include <semaphore.h> #ifdef MPI_ON #include <mpi.h> #endif #include "chooseV.h" #include "signal.h" #ifdef MPI_ON MPI_Datatype MPI_DMDRAGTYPE; MPI_Datatype MPI_RAGPMLTYPE; MPI_Datatype MPI_HLFRAGTYPE; #endif int* mapNodeSize; //============================================= ftype* __restrict__ hostKpmlx1; ftype* __restrict__ hostKpmlx2; ftype* __restrict__ hostKpmly1; ftype* __restrict__ hostKpmly2; ftype* __restrict__ hostKpmlz1; ftype* __restrict__ hostKpmlz2; GeoParamsHost parsHost; __constant__ GeoParams pars; __constant__ int devNStripe[NDev] = STRIPES; __constant__ ftype Kpmlx1[(KNpmlx==0)?1:KNpmlx]; __constant__ ftype Kpmlx2[(KNpmlx==0)?1:KNpmlx]; __constant__ ftype Kpmly1[(KNpmly==0)?1:KNpmly]; __constant__ ftype Kpmly2[(KNpmly==0)?1:KNpmly]; __constant__ ftype Kpmlz1[(KNpmlz==0)?1:KNpmlz]; __constant__ ftype Kpmlz2[(KNpmlz==0)?1:KNpmlz]; //__shared__ ftype2 shared_fld[2][7][Nz]; //__shared__ ftype2 shared_fld[(FTYPESIZE*Nv*28>0xc000)?7:14][Nv]; __shared__ ftype2 shared_fld[SHARED_SIZE][NzMax]; texture<char, hipTextureType3D> index_tex; hipArray* index_texArray=0; #include "window.hpp" struct AsyncMPIexch{ int even,ix,t0,Nt,mpirank; bool do_run; double exch_time; sem_t sem_mpi, sem_calc; void exch(const int _even, const int _ix, const int _t0, const int _Nt, const int _mpirank) { even=_even; ix=_ix; t0=_t0; Nt=_Nt, mpirank=_mpirank; exch_time=0; if(sem_post(&sem_mpi)<0) printf("exch sem_post error %d\n",errno); } void exch_sync(){ if(sem_wait(&sem_calc)<0) printf("exch_sync sem error %d\n",errno); } void run() { if(sem_wait(&sem_mpi)<0) printf("run sem_wait error %d\n",errno); if(do_run==0) return; double start_time = omp_get_wtime(); if(even==0) DiamondRag::bufSendMPIp(mpirank, t0,Nt); if(even==1) DiamondRag::bufSendMPIm(mpirank, t0,Nt); exch_time = omp_get_wtime()-start_time; if(sem_post(&sem_calc)<0) printf("run sem_post error %d\n",errno);; } } ampi_exch; #ifdef TIMERS_ON #define IFPMLS(func,a,b,c,d,TIMER,args) {\ /*printf(#func" idev=%d ix=%d iym=%d Nblocks=%d\n", idev,ix, iym, a);*/ TIMER.init(d); \ if(isPMLs)hipLaunchKernelGGL(( PMLS##func), dim3(a),dim3(b),c,d, args; else func, dim3(a),dim3(b),c,d, args; TIMER.record); } #else #define IFPMLS(func,a,b,c,d,EVENT,args) {\ /*printf(#func" PMLS=%d idev=%d w0=%d ix=%d iym=%d Nblocks=%d\n", isPMLs, idev,w0,ix, iym, a);*/\ for(int iz=0 ; iz<Nv; iz+=2*Nw+6) { if(isPMLs)hipLaunchKernelGGL(( PMLS##func<0>), dim3(a),dim3(b),c,d, args; else func<0>, dim3(a),dim3(b),c,d, args; } \ forint iz=Nw+3; iz<Nv; iz+=2*Nw+6) { if(isPMLs)hipLaunchKernelGGL(( PMLS##func<1>), dim3(a),dim3(b),c,d, args; else func<1>, dim3(a),dim3(b),c,d, args; } } #endif //#define IFPMLSfunc,a,b,c,d,args) { if(!isPMLs) func<<<a,b,c,d>>>args; } //#define IFPMLS(func,a,b,c,d,args) func<<<a,b,c,d>>>args; template<int even> inline void Window::Dtorre(int ix, int Nt, int t0, double disbal[NDev], bool isPMLs, bool isTFSF) { if(Nt<=t0 || Nt<=0) return; DEBUG_PRINT(("Dtorre%d isPMLs=%d isTFSF=%d ix=%d, t0=%d Nt=%d wleft=%d\n", even, isPMLs, isTFSF, ix,t0,Nt, parsHost.wleft)); const int Nw=Nzw-10;/*min(Nv-10,Nzw-10);*//*Nv/2*/; const int Nth=Nw+10; CHECK_ERROR( hipSetDevice(0) ); #ifdef TIMERS_ON cuTimer ttDm[NDev], ttDo[NDev]; hipStream_t stPMLbot; CHECK_ERROR( hipStreamCreate(&stPMLbot) ); hipStream_t stI; CHECK_ERROR( hipStreamCreate(&stI ) ); cuTimer ttPMLtop, ttI; hipStream_t stDm[NDev],stDo[NDev]; for(int i=0;i<NDev;i++) { if(i!=0) CHECK_ERROR( hipSetDevice(i) ); CHECK_ERROR( hipStreamCreate(&stDm[i]) ); CHECK_ERROR( hipStreamCreate(&stDo[i]) ); ttDm[i].created=0; ttDo[i].created=0; } hipStream_t stPMLtop; CHECK_ERROR( hipStreamCreate(&stPMLtop) ); hipStream_t stX; CHECK_ERROR( hipStreamCreate(&stX ) ); cuTimer ttPMLbot, ttX; hipStream_t stP; cuTimer ttP,ttPmpi; if(even==0) { hipSetDevice(NDev-1); CHECK_ERROR( hipStreamCreate(&stP ) ); } else if(even==1) { hipSetDevice(0 ); CHECK_ERROR( hipStreamCreate(&stP ) ); } cuTimer ttMPIa; #else//TIMER_S_ON not def hipStream_t stPMLbot; CHECK_ERROR( hipStreamCreate(&stPMLbot) ); hipStream_t stI; CHECK_ERROR( hipStreamCreate(&stI ) ); hipStream_t stDm[NDev],stDo[NDev]; for(int i=0;i<NDev;i++) { if(i!=0) CHECK_ERROR( hipSetDevice(i) ); CHECK_ERROR( hipStreamCreate(&stDm[i]) ); CHECK_ERROR( hipStreamCreate(&stDo[i]) ); } hipStream_t stPMLtop; CHECK_ERROR( hipStreamCreate(&stPMLtop) ); hipStream_t stX; CHECK_ERROR( hipStreamCreate(&stX ) ); hipStream_t stP ; if(even==0) { hipSetDevice(NDev-1); CHECK_ERROR( hipStreamCreate(&stP ) ); } else if(even==1) { hipSetDevice(0 ); CHECK_ERROR( hipStreamCreate(&stP ) ); } #endif//TIMERS_ON CHECK_ERROR( hipSetDevice(0) ); int iym=0, iyp=0; int Nblk=0; iyp++; int Iy=iym, Xy, D1oy[NDev], D0oy[NDev], Dmy[NDev], DmBlk[NDev], Syb,Syt, SybBlk,SytBlk; int is_oneL[NDev], is_oneU[NDev], is_many[NDev], is_I[NDev], is_X[NDev], is_Sb[NDev], is_St[NDev], is_P[NDev]; for(int i=0; i<NDev; i++) { is_oneL[i]=0; is_oneU[i]=0; is_many[i]=0; is_I[i]=0; is_X[i]=0; is_Sb[i]=0; is_St[i]=0; is_P[i]=0; } is_I[0]=1; iym=iyp; Nblk=0; while(iyp<Npmly/2) { iyp++; Nblk++; } if(Nblk>0) is_Sb[0]=1; Syb=iym; SybBlk=Nblk; for(int idev=0,nextY=0; idev<NDev; idev++) { nextY+=NStripe[idev]; if(idev==NDev-1) nextY-=max(1,Npmly/2); if(idev!=0) { // Dtorre1 only if(iyp<nextY && even==1) is_oneL[idev]=1; D1oy[idev]=iyp; if(iyp<nextY) iyp++; } iym=iyp; Nblk=0; while(iyp<nextY-(idev==NDev-1?0:1)) { iyp++; Nblk++; } // Main Region if(Nblk>0) is_many[idev]=1; Dmy[idev]=iym, DmBlk[idev]=Nblk; if(idev!=NDev-1) { // Dtorre0 only if(iyp<nextY && even==0) is_oneU[idev]=1; D0oy[idev]=iyp; if(iyp<nextY) iyp++; } } iym=iyp; Nblk=0; while(iyp<Na-1) { iyp++; Nblk++; } if(Nblk>0) is_St[NDev-1]=1; is_X[NDev-1]=1; Syt=iym; SytBlk=Nblk; Xy=iyp; if(subnode!=0) { is_I [0]=0; if(even==1) is_P[0]=1; is_Sb[0]=0; DmBlk[0]+=SybBlk; Dmy[0]=Syb; } if(subnode!=NasyncNodes-1) { is_X [NDev-1]=0; if(even==0) is_P[NDev-1]=1; is_St[NDev-1]=0; DmBlk[NDev-1]+=SytBlk; } int mpirank = node*NasyncNodes+subnode; for(int idev=0; idev<NDev; idev++) { if(idev!=0) CHECK_ERROR( hipSetDevice(idev) ); if(is_oneL[idev] && even==1 && isTFSF ) IFPMLS(torreTFSF1 ,1 ,Nth,0,stDo[idev],ttDo[idev],(ix,D1oy[idev],iz,iz+Nw,Nt,t0)) if(is_oneL[idev] && even==1 && !isTFSF ) IFPMLS(torreD1 ,1 ,Nth,0,stDo[idev],ttDo[idev],(ix,D1oy[idev],iz,iz+Nw,Nt,t0)) if(is_oneL[idev] && even==1 )hipLaunchKernelGGL(( bufsave<1>), dim3((Nv+Nw-1)/Nw),dim3(Nw),0,stDo[idev], ix,D1oy[idev],Nt,t0); if(is_oneU[idev] && even==0 && isTFSF ) IFPMLS(torreTFSF0 ,1 ,Nth,0,stDo[idev],ttDo[idev],(ix,D0oy[idev],iz,iz+Nw,Nt,t0)) if(is_oneU[idev] && even==0 && !isTFSF ) IFPMLS(torreD0 ,1 ,Nth,0,stDo[idev],ttDo[idev],(ix,D0oy[idev],iz,iz+Nw,Nt,t0)) if(is_oneU[idev] && even==0 )hipLaunchKernelGGL(( bufsave<0>), dim3((Nv+Nw-1)/Nw),dim3(Nw),0,stDo[idev], ix,D0oy[idev],Nt,t0); if(is_I[idev] && even==0 && Npmly==0) IFPMLS(torreId0 ,1 ,Nth,0,stI ,ttI ,(ix,Iy ,iz,iz+Nw,Nt,t0)) if(is_I[idev] && even==0 && Npmly!=0) IFPMLS(torreIs0 ,1 ,Nth,0,stI ,ttI ,(ix,Iy ,iz,iz+Nw,Nt,t0)) if(is_I[idev] && even==1 && Npmly==0) IFPMLS(torreId1 ,1 ,Nth,0,stI ,ttI ,(ix,Iy ,iz,iz+Nw,Nt,t0)) if(is_I[idev] && even==1 && Npmly!=0) IFPMLS(torreIs1 ,1 ,Nth,0,stI ,ttI ,(ix,Iy ,iz,iz+Nw,Nt,t0)) if(is_X[idev] && even==0 && Npmly==0) IFPMLS(torreXd0 ,1 ,Nth,0,stX ,ttX ,(ix,Xy ,iz,iz+Nw,Nt,t0)) if(is_X[idev] && even==0 && Npmly!=0) IFPMLS(torreXs0 ,1 ,Nth,0,stX ,ttX ,(ix,Xy ,iz,iz+Nw,Nt,t0)) if(is_X[idev] && even==1 && Npmly==0) IFPMLS(torreXd1 ,1 ,Nth,0,stX ,ttX ,(ix,Xy ,iz,iz+Nw,Nt,t0)) if(is_X[idev] && even==1 && Npmly!=0) IFPMLS(torreXs1 ,1 ,Nth,0,stX ,ttX ,(ix,Xy ,iz,iz+Nw,Nt,t0)) if(is_P[idev] && even==0 ) IFPMLS(torreD0 ,1 ,Nth,0,stP ,ttP ,(ix,Xy ,iz,iz+Nw,Nt,t0)) if(is_P[idev] && even==1 ) IFPMLS(torreD1 ,1 ,Nth,0,stP ,ttP ,(ix,Iy ,iz,iz+Nw,Nt,t0)) if(is_P[idev] && even==0 )hipLaunchKernelGGL(( bufsave<0>), dim3((Nv+Nw-1)/Nw),dim3(Nw),0,stP , ix,Xy ,Nt,t0); if(is_P[idev] && even==1 )hipLaunchKernelGGL(( bufsave<1>), dim3((Nv+Nw-1)/Nw),dim3(Nw),0,stP , ix,Iy ,Nt,t0); if(is_Sb[idev] && even==0 ) IFPMLS(torreS0 ,SybBlk ,Nth,0,stPMLbot ,ttPMLbot ,(ix,Syb ,iz,iz+Nw,Nt,t0)) if(is_Sb[idev] && even==1 ) IFPMLS(torreS1 ,SybBlk ,Nth,0,stPMLbot ,ttPMLbot ,(ix,Syb ,iz,iz+Nw,Nt,t0)) if(is_St[idev] && even==0 ) IFPMLS(torreS0 ,SytBlk ,Nth,0,stPMLtop ,ttPMLtop ,(ix,Syt ,iz,iz+Nw,Nt,t0)) if(is_St[idev] && even==1 ) IFPMLS(torreS1 ,SytBlk ,Nth,0,stPMLtop ,ttPMLtop ,(ix,Syt ,iz,iz+Nw,Nt,t0)) if(is_many[idev] && even==0 && isTFSF ) IFPMLS(torreTFSF0 ,DmBlk[idev],Nth,0,stDm[idev],ttDm[idev],(ix,Dmy[idev] ,iz,iz+Nw,Nt,t0)) if(is_many[idev] && even==1 && isTFSF ) IFPMLS(torreTFSF1 ,DmBlk[idev],Nth,0,stDm[idev],ttDm[idev],(ix,Dmy[idev] ,iz,iz+Nw,Nt,t0)) if(is_many[idev] && even==0 && !isTFSF ) IFPMLS(torreD0 ,DmBlk[idev],Nth,0,stDm[idev],ttDm[idev],(ix,Dmy[idev] ,iz,iz+Nw,Nt,t0)) if(is_many[idev] && even==1 && !isTFSF ) IFPMLS(torreD1 ,DmBlk[idev],Nth,0,stDm[idev],ttDm[idev],(ix,Dmy[idev] ,iz,iz+Nw,Nt,t0)) if(is_oneL[idev] && even==1 ) DiamondRag::copyMbuf(idev, t0,Nt, stDo[idev]); if(is_oneU[idev] && even==0 ) DiamondRag::copyPbuf(idev, t0,Nt, stDo[idev]); #ifdef TIMERS_ON if(is_oneL[idev] && even==1 || is_oneU[idev] && even==0) ttDo[idev].record(); #endif } if(NasyncNodes>1 && even==1 ) DiamondRag::prepTransM(mpirank, t0,Nt, stP); if(NasyncNodes>1 && even==0 ) DiamondRag::prepTransP(mpirank, t0,Nt, stP); #ifdef TIMERS_ON for(int idev=0; idev<NDev; idev++) { CHECK_ERROR(hipSetDevice(idev)); if(is_P[idev]) ttP.record(); } CHECK_ERROR(hipSetDevice(0)); #endif CHECK_ERROR( hipSetDevice(0) ); float copytime=0; bool doSynccopy=0; if(!doneMemcopy) { doSynccopy=1; #ifdef TIMERS_ON for(int idev=0; idev<NDev; idev++) { CHECK_ERROR( hipSetDevice(idev) ); CHECK_ERROR( hipEventRecord(copyEventStart[idev], streamCopy[idev]) ); } CHECK_ERROR( hipSetDevice(0) ); if(even==0) MemcopyDtH(ix4copy); if(even==1) MemcopyHtD(ix4copy); for(int idev=0; idev<NDev; idev++) { CHECK_ERROR( hipSetDevice(idev) ); CHECK_ERROR( hipEventRecord(copyEventEnd[idev], streamCopy[idev]) ); } CHECK_ERROR( hipSetDevice(0) ); #else if(even==0) MemcopyDtH(ix4copy); if(even==1) MemcopyHtD(ix4copy); #endif if(even==1) doneMemcopy=true; } CHECK_ERROR( hipStreamSynchronize(stP ) ); #ifdef TIMERS_ON timerP += ttP.gettime_rec(); #endif if(NasyncNodes>1) ampi_exch.exch(even, ix, t0, Nt, mpirank); if(NasyncNodes>1) ampi_exch.exch_sync(); #ifdef TIMERS_ON for(int idev=0; idev<NDev; idev++) { CHECK_ERROR(hipSetDevice(idev)); if(is_P[idev]) ttPmpi.init(stP); } CHECK_ERROR(hipSetDevice(0)); #endif if(NasyncNodes>1 && even==1 ) DiamondRag::postTransM(mpirank, t0,Nt, stP); if(NasyncNodes>1 && even==0 ) DiamondRag::postTransP(mpirank, t0,Nt, stP); if(NasyncNodes>1) CHECK_ERROR( hipStreamSynchronize(stP) ); #ifdef TIMERS_ON for(int idev=0; idev<NDev; idev++) { CHECK_ERROR(hipSetDevice(idev)); if(is_P[idev]) ttPmpi.record(); } CHECK_ERROR(hipSetDevice(0)); #endif if(doSynccopy) for(int idev=0; idev<NDev; idev++) CHECK_ERROR( hipStreamSynchronize(streamCopy[idev]) ); #ifdef TIMERS_ON if(doSynccopy) for(int idev=0; idev<NDev; idev++) { float copytime_idev; CHECK_ERROR( hipEventElapsedTime(&copytime_idev, copyEventStart[idev], copyEventEnd[idev]) ); copytime=max(copytime,copytime_idev); } timerCopy+= copytime; #endif //if(even==1) parsHost.drop.save(stPMLm); CHECK_ERROR( hipStreamSynchronize(stPMLbot) ); CHECK_ERROR( hipStreamSynchronize(stPMLtop) ); CHECK_ERROR( hipStreamSynchronize(stI ) ); CHECK_ERROR( hipStreamSynchronize(stX ) ); //CHECK_ERROR( hipStreamSynchronize(stB ) ); for(int i=0;i<NDev;i++) CHECK_ERROR( hipStreamSynchronize(stDo[i]) ); int firsti=parsHost.iStep%NDev; double tt=omp_get_wtime(); CHECK_ERROR( hipStreamSynchronize(stDm[firsti]) ); disbal[0]+=omp_get_wtime()-tt; for(int j=1;j<NDev;j++) { int i=(j+parsHost.iStep)%NDev; double tt=omp_get_wtime(); CHECK_ERROR( hipStreamSynchronize(stDm[i]) ); disbal[j]+=omp_get_wtime()-tt; } CHECK_ERROR( hipStreamDestroy(stPMLbot) ); CHECK_ERROR( hipStreamDestroy(stPMLtop) ); CHECK_ERROR( hipStreamDestroy(stI ) ); CHECK_ERROR( hipStreamDestroy(stX ) ); //CHECK_ERROR( hipStreamDestroy(stB ) ); CHECK_ERROR( hipStreamDestroy(stP ) ); for(int i=0;i<NDev;i++) CHECK_ERROR( hipStreamDestroy(stDo[i]) ); for(int i=0;i<NDev;i++) CHECK_ERROR( hipStreamDestroy(stDm[i]) ); #ifdef TIMERS_ON timerPMLtop+= ttPMLtop.gettime_rec(); timerI+= ttI.gettime_rec(); for(int i=0;i<NDev;i++) timerDm[i]+= ttDm[i].gettime_rec(); timerPMLbot+= ttPMLbot.gettime_rec(); timerX+= ttX.gettime_rec(); for(int i=0;i<NDev;i++) timerDo[i]+= ttDo[i].gettime_rec(); timerP += ttPmpi.gettime_rec(); timerP += ampi_exch.exch_time*1e3; float calctime = max(ttPMLtop.diftime,max(ttPMLbot.diftime,max(ttI.diftime,max(ttX.diftime,ttP.diftime+ttPmpi.diftime+ampi_exch.exch_time*1e3)))); for(int i=0;i<NDev;i++) calctime=max(calctime,max(ttDm[i].diftime,ttDo[i].diftime)); timerExec+= max(copytime, calctime); #endif } inline void Window::Dtorres(int ix, int Nt, int t0, double disbal[NDev], bool isPMLs, bool isTFSF) { Dtorre<0>(ix,Nt,t0,disbal,isPMLs,isTFSF); //hipDeviceSynchronize(); CHECK_ERROR( hipGetLastError() ); Dtorre<1>(ix,Nt,t0,disbal,isPMLs,isTFSF); //hipDeviceSynchronize(); CHECK_ERROR( hipGetLastError() ); } #ifdef MPI_ON MPI_Request reqSp, reqSm, reqRp, reqRm, reqSp_pml, reqSm_pml, reqRp_pml, reqRm_pml; MPI_Request reqSM_p2pbuf[NDev],reqSP_p2pbuf[NDev],reqRM_p2pbuf[NDev],reqRP_p2pbuf[NDev]; MPI_Status status; int flagSp,flagRp,flagSm,flagRm,flagSp_pml,flagRp_pml,flagSm_pml,flagRm_pml; mpi_message Window::mes[8]; int doWaitM,doWaitP; //#define BLOCK_SEND //#define MPI_NUDGE //#define USE_MPI_THREADING #ifndef USE_MPI_THREADING #define WaitMPI(nreq,req,st) MPI_Wait(req,st) //#define SendMPI(p,sz,tp,rnk,tag,world,req,nreq) MPI_Isend(p,sz,tp,rnk,tag,world,req); #define SendMPI(p,sz,tp,rnk,tag,world,req,nreq) MPI_Send(p,sz,tp,rnk,tag,world); #define RecvMPI(p,sz,tp,rnk,tag,world,req,nreq) MPI_Irecv(p,sz,tp,rnk,tag,world,req); #else #define WaitMPI(nreq,req,st) { mpi_message* mes = &window.mes[nreq]; \ int s=pthread_join(mes->mpith,0); if(s!=0) printf("node %d: Error joining thread %ld retcode=%d\n",window.node,mes->mpith,s); } static void* send_func(void* args){ mpi_message *mes = (mpi_message*)args; MPI_Send(mes->buf,mes->count,mes->datatype,mes->dest,mes->tag,mes->comm); return 0; } static void* recv_func(void* args){ mpi_message *mes = (mpi_message*)args; MPI_Status stat; MPI_Recv(mes->buf,mes->count,mes->datatype,mes->dest,mes->tag,mes->comm,&stat); return 0; } #define SendMPI(p,sz,tp,rnk,tag,world,req,nreq) {mpi_message* mes = &window.mes[nreq]; mes->set(p,sz,tp,rnk,tag,world); \ if(pthread_create(&mes->mpith,0,send_func,(void*)mes)!=0) {printf("Error: cannot create thread for MPI_send %d node=%d\n",nreq,window.node); MPI_Abort(MPI_COMM_WORLD, 1);};} #define RecvMPI(p,sz,tp,rnk,tag,world,req,nreq) {mpi_message* mes = &window.mes[nreq]; mes->set(p,sz,tp,rnk,tag,world); \ if(pthread_create(&mes->mpith,0,recv_func,(void*)mes)!=0) {printf("Error: cannot create thread for MPI_recv %d node=%d\n",nreq,window.node); MPI_Abort(MPI_COMM_WORLD, 1);};} #endif//USE_MPI_THREADING #endif// MPI_ON int calcStep(){ // CHECK_ERROR( hipDeviceSetSharedMemConfig ( hipSharedMemBankSizeEightByte ) ); if(parsHost.iStep==0) printf("Starting...\n"); cuTimer t0; t0.init(); int torreNum=0; double dropTime=0; CHECK_ERROR(hipDeviceSynchronize()); #ifdef TEST_RATE for(int ix=Ns-Ntime; ix>0; ix--) { // printf("ix=%d\n",ix); const int block_spacing = TEST_RATE; hipLaunchKernelGGL(( torreD0), dim3((Na-2)/block_spacing),dim3(Nv), 0, 0, ix, 1, Ntime, 0); hipDeviceSynchronize(); CHECK_ERROR( hipGetLastError() ); hipLaunchKernelGGL(( torreD1), dim3((Na-2)/block_spacing),dim3(Nv), 0, 0, ix, 1, Ntime, 0); hipDeviceSynchronize(); CHECK_ERROR( hipGetLastError() ); torreNum++; } #else Window window; window.prepare(); int node_shift=0; for(int inode=0; inode<window.node; inode++) node_shift+= mapNodeSize[inode]; node_shift-= Ns*window.node; int nsize=mapNodeSize[window.node]; int nL=node_shift; int nR=nL+nsize; #ifdef MPI_ON if(parsHost.iStep==0) { int wleftP=nR-Ns; int wleftM=nL; doWaitP=0; doWaitM=0; if(window.node!=window.Nprocs-1) { #ifndef MPI_TEST DEBUG_MPI(("timestamp %10.2f: Recv P (node %d) wleft=%d / tag %d, req %p\n", omp_get_wtime(), window.node, wleftP, 2, &reqRp)); for(int idev=0; idev<NDev; idev++) { RecvMPI( parsHost.p2pBufM_host_rcv[idev], Ntime , MPI_HLFRAGTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+0+window.Nprocs*10+2*idev+0, MPI_COMM_WORLD, &reqRM_p2pbuf[idev],); RecvMPI( parsHost.p2pBufP_host_rcv[idev], Ntime , MPI_HLFRAGTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+0+window.Nprocs*10+2*idev+1, MPI_COMM_WORLD, &reqRP_p2pbuf[idev],); } RecvMPI(&window.data [wleftP*Na ], Ns*Na , MPI_DMDRAGTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+0, MPI_COMM_WORLD, &reqRp , 2);flagRp =0; RecvMPI(&window.dataPMLa[wleftP*Npmly], Ns*Npmly , MPI_RAGPMLTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+1, MPI_COMM_WORLD, &reqRp_pml, 6);flagRp_pml=0; doWaitP=1; #endif } } #endif//MPI_ON while(window.w0+Ns>=0) { #ifdef MPI_ON if( true ) { #ifdef DROP_DATA if(parsHost.wleft==nR-Ns-Ns-1) { cuTimer tdrop; tdrop.init(); parsHost.drop.drop( nsize-Ns ,nsize ,window.data,parsHost.iStep); dropTime+= tdrop.gettime(); } if(parsHost.wleft==nL-Ns-1 ) { cuTimer tdrop; tdrop.init(); parsHost.drop.drop((window.node==0)?0:Ns,nsize-Ns,window.data,parsHost.iStep); dropTime+= tdrop.gettime(); } #endif bool doSend[2] = {1,1}; bool doRecv[2] = {1,1}; #ifdef MPI_TEST if(parsHost.iStep -window.node<=0) { doSend[0]=0; doSend[1]=0; doRecv[1]=0; } if(parsHost.iStep+1-window.node<=0) { doRecv[0]=0; } #endif if(doWaitP && parsHost.wleft==nR+(Ns-Ntime-1) ) { if(window.node!=window.Nprocs-1) DEBUG_MPI(("timestamp %10.2f: waiting P (node %d) wleft=%d / requests %p %p\n", omp_get_wtime(), window.node, parsHost.wleft, &reqRp,&reqSp)); if(window.node!=window.Nprocs-1) { WaitMPI(2,&reqRp, &status);WaitMPI(6,&reqRp_pml, &status); flagRp=1;flagRp_pml=1;} if(window.node!=window.Nprocs-1) for(int idev=0;idev<NDev;idev++) {WaitMPI(,&reqRM_p2pbuf[idev], &status);WaitMPI(,&reqRP_p2pbuf[idev], &status);} if(window.node!=window.Nprocs-1) for(int idev=0;idev<NDev;idev++) { CHECK_ERROR(hipMemcpy(parsHost.p2pBufM[idev],parsHost.p2pBufM_host_rcv[idev],Ntime*sizeof(halfRag),hipMemcpyHostToDevice)); CHECK_ERROR(hipMemcpy(parsHost.p2pBufP[idev],parsHost.p2pBufP_host_rcv[idev],Ntime*sizeof(halfRag),hipMemcpyHostToDevice)); } } if(parsHost.wleft==nR-Ns-Ns-1 && window.node!=window.Nprocs-1) { if(doSend[1]) { DEBUG_MPI(("timestamp %10.2f: Send&Recv P(%d) (node %d) wleft=%d (tags %d,%d, reqs %p,%p)\n", omp_get_wtime(), parsHost.wleft+Ns, window.node, parsHost.wleft, 2+(parsHost.iStep+1)*2+0, 2+(parsHost.iStep+1)*2+1, &reqSp, &reqRp)); SendMPI(&window.data [(nR-Ns)*Na ], doSend[1]*Ns*Na , MPI_DMDRAGTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+0, MPI_COMM_WORLD, &reqSp ,0);flagSp =0; SendMPI(&window.dataPMLa[(nR-Ns)*Npmly], doSend[1]*Ns*Npmly, MPI_RAGPMLTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+1, MPI_COMM_WORLD, &reqSp_pml,4);flagSp_pml=0; DEBUG_MPI(("timestamp %10.2f: ok Send P(%d) (node %d) wleft=%d (tags %d,%d, reqs %p,%p)\n", omp_get_wtime(), parsHost.wleft+Ns, window.node, parsHost.wleft, 2+(parsHost.iStep+1)*2+0, 2+(parsHost.iStep+1)*2+0, &reqSp, &reqRp)); } if(doRecv[1]) { for(int idev=0; idev<NDev; idev++) { RecvMPI( parsHost.p2pBufM_host_rcv[idev], doRecv[1]*Ntime, MPI_HLFRAGTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+window.Nprocs*10+2*idev+0, MPI_COMM_WORLD, &reqRM_p2pbuf[idev],); RecvMPI( parsHost.p2pBufP_host_rcv[idev], doRecv[1]*Ntime, MPI_HLFRAGTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+window.Nprocs*10+2*idev+1, MPI_COMM_WORLD, &reqRP_p2pbuf[idev],); } RecvMPI(&window.data [(nR-Ns)*Na ], doRecv[1]*Ns*Na , MPI_DMDRAGTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+0, MPI_COMM_WORLD, &reqRp ,2);flagRp =0; RecvMPI(&window.dataPMLa[(nR-Ns)*Npmly], doRecv[1]*Ns*Npmly, MPI_RAGPMLTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+1, MPI_COMM_WORLD, &reqRp_pml,6);flagRp_pml=0; doWaitP=1; } } // if(doWaitM && parsHost.wleft==nL+Ns+(Ns-Ntime-1) && parsHost.iStep!=0) { if(doWaitM && parsHost.wleft==nR-1-Ns-((window.node==window.Nprocs-1)?(Ntime/2+1):Ns) && parsHost.iStep!=0) { if(window.node!=0) DEBUG_MPI(("timestamp %10.2f: waiting M (node %d) wleft=%d / requests %p %p\n", omp_get_wtime(), window.node, parsHost.wleft, &reqRm, &reqSm)); if(window.node!=0) { WaitMPI(3,&reqRm, &status);WaitMPI(7,&reqRm_pml, &status); flagRm=1;flagRm_pml=1;} } #ifdef MPI_NUDGE if((parsHost.wleft+Ns)%1==0) { if(doWaitP) if( window.node!=window.Nprocs-1) { if(!flagRp_pml || !flagRp) DEBUG_MPI(("timestamp %10.2f: testing recvP(%p) (node %d) wleft=%d\n", omp_get_wtime(), &reqRp, window.node, parsHost.wleft)); if(!flagRp) MPI_Test(&reqRp, &flagRp, &status); if(!flagRp_pml)MPI_Test(&reqRp_pml, &flagRp_pml, &status); } if(doWaitM) if(parsHost.iStep!=0 && window.node!=0 ) { if(!flagRm_pml || !flagRm) DEBUG_MPI(("timestamp %10.2f: testing recvM(%p) (node %d) wleft=%d\n", omp_get_wtime(), &reqRm, window.node, parsHost.wleft)); if(!flagRm) MPI_Test(&reqRm, &flagRm, &status); if(!flagRm_pml)MPI_Test(&reqRm_pml, &flagRm_pml, &status); } } #endif #ifdef MPI_TEST if(parsHost.iStep-window.node>0) { #endif ampi_exch.do_run=1; if(NasyncNodes>1) { if(sem_init(&ampi_exch.sem_calc, 0,0)==-1) printf("Error semaphore init errno=%d\n", errno); if(sem_init(&ampi_exch.sem_mpi , 0,0)==-1) printf("Error semaphore init errno=%d\n", errno); } #pragma omp parallel num_threads(2) { if(omp_get_thread_num()==1) { window.calcDtorres(nL,nR, parsHost.wleft<nL && window.node!=0, parsHost.wleft>=nR-Ns && window.node!=window.Nprocs-1); ampi_exch.do_run=0; if(NasyncNodes>1) if(sem_post(&ampi_exch.sem_mpi)<0) printf("sem_post_mpi end error %d\n",errno); } #pragma omp master if(NasyncNodes>1) { while(ampi_exch.do_run) ampi_exch.run(); if(sem_post(&ampi_exch.sem_calc)<0) printf("sem_post_calc end error %d\n",errno); } } if(NasyncNodes>1) { if(sem_destroy(&ampi_exch.sem_mpi )<0) printf("sem_destroy error %d\n",errno); if(sem_destroy(&ampi_exch.sem_calc)<0) printf("sem_destroy error %d\n",errno); } #ifdef MPI_TEST } #endif if(parsHost.wleft==nL-Ns-1 && window.node!=0 ) { if(doSend[0]) { DEBUG_MPI(("timestamp %10.2f: Send&Recv M(%d) (node %d) wleft=%d (tags %d,%d, reqs %p,%p)\n", omp_get_wtime(), parsHost.wleft+Ns+1, window.node, parsHost.wleft, 2+(parsHost.iStep )*2+0, 2+(parsHost.iStep+1)*2+0, &reqSm, &reqRm)); for(int idev=0; idev<NDev; idev++) { CHECK_ERROR(hipMemcpy(parsHost.p2pBufM_host_snd[idev],parsHost.p2pBufM[idev],Ntime*sizeof(halfRag),hipMemcpyDeviceToHost)); CHECK_ERROR(hipMemcpy(parsHost.p2pBufP_host_snd[idev],parsHost.p2pBufP[idev],Ntime*sizeof(halfRag),hipMemcpyDeviceToHost)); SendMPI( parsHost.p2pBufM_host_snd[idev] , doSend[0]*Ntime , MPI_HLFRAGTYPE, (window.node-1)*NasyncNodes+window.subnode, 2+(parsHost.iStep )*2+window.Nprocs*10+2*idev+0, MPI_COMM_WORLD, &reqSM_p2pbuf[idev],); SendMPI( parsHost.p2pBufP_host_snd[idev] , doSend[0]*Ntime , MPI_HLFRAGTYPE, (window.node-1)*NasyncNodes+window.subnode, 2+(parsHost.iStep )*2+window.Nprocs*10+2*idev+1, MPI_COMM_WORLD, &reqSP_p2pbuf[idev],); } SendMPI(&window.data [ nL *Na ], doSend[0]*Ns*Na , MPI_DMDRAGTYPE, (window.node-1)*NasyncNodes+window.subnode, 2+(parsHost.iStep )*2+0, MPI_COMM_WORLD, &reqSm ,1);flagSm =0; SendMPI(&window.dataPMLa[ nL *Npmly], doSend[0]*Ns*Npmly, MPI_RAGPMLTYPE, (window.node-1)*NasyncNodes+window.subnode, 2+(parsHost.iStep )*2+1, MPI_COMM_WORLD, &reqSm_pml,5);flagSm_pml=0; DEBUG_MPI(("timestamp %10.2f: ok Send M(%d) (node %d) wleft=%d (tags %d,%d, reqs %p,%p)\n", omp_get_wtime(), parsHost.wleft+Ns+1, window.node, parsHost.wleft, 2+(parsHost.iStep )*2+0, 2+(parsHost.iStep+1)*2+0, &reqSm, &reqRm)); } if(doRecv[0]) { RecvMPI(&window.data [ nL *Na ], doRecv[0]*Ns*Na , MPI_DMDRAGTYPE, (window.node-1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+0, MPI_COMM_WORLD, &reqRm, 3);flagRm =0; RecvMPI(&window.dataPMLa[ nL *Npmly], doRecv[0]*Ns*Npmly, MPI_RAGPMLTYPE, (window.node-1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+1, MPI_COMM_WORLD, &reqRm_pml,7);flagRm_pml=0; doWaitM=1; } } } #else//MPI_ON not def window.calcDtorres(); #endif//MPI_ON window.synchronize(); } window.finalize(); #endif//TEST_RATE #if not defined MPI_ON && defined DROP_DATA cuTimer tdrop; tdrop.init(); parsHost.drop.drop(0,Np,parsHost.data,parsHost.iStep); dropTime+= tdrop.gettime(); /* parsHost.drop.dump(); #ifndef MPI_TEST if(0 && parsHost.iStep%(10*window.Nprocs)==0) parsHost.drop.sync(); #endif */ #endif double calcTime=t0.gettime(); unsigned long int yee_cells = 0; double overhead=0; #ifndef TEST_RATE yee_cells = NDT*NDT*Ntime*(unsigned long long)(Nv*((Na+1-NDev)*NasyncNodes+1-NasyncNodes))*Np; overhead = window.RAMcopytime/window.GPUcalctime; printf("Step %d /node %d/ subnode %d/: Time %9.09f ms |drop %3.03f%% ||rate %9.09f GYee_cells/sec |total grid %dx%dx%d=%ld cells | isTFSF=%d\n", parsHost.iStep, window.node, window.subnode, calcTime, 100*dropTime/calcTime, 1.e-9*yee_cells/(calcTime*1.e-3), NDT*Np,NDT*((Na+1-NDev)*NasyncNodes+1-NasyncNodes),Nv,yee_cells/Ntime, (parsHost.iStep+1)*Ntime*dt<shotpoint.tStop ); // for(int idev=0;idev<NDev;idev++) printf("%3.03f%% ", 100*window.disbal[idev]/window.GPUcalctime); #ifdef TIMERS_ON printf(" |waitings%d %5.05f",(parsHost.iStep)%NDev,1.e3*window.disbal[0]); for(int idev=1; idev<NDev; idev++) printf(", %5.05f", 1.e3*window.disbal[idev]); printf("\n"); for(int idev=0; idev<NDev; idev++) printf(" |timers(Step,node,subnode,device): %d %d %d %d | PMLbot PMLtop I X Do Dmi P Copy Exec:| %.02f %.02f %.02f %.02f %.02f %.02f %.02f %.02f %.02f\n", parsHost.iStep,window.node,window.subnode,idev, window.timerPMLbot, window.timerPMLtop, window.timerI, window.timerX, window.timerDo[idev], window.timerDm[idev], window.timerP, window.timerCopy, window.timerExec); #endif//TIMERS_ON #else//if def TEST_RATE yee_cells = NDT*NDT*Ntime*(unsigned long long)(Nv*((Na-2)/TEST_RATE))*torreNum; printf("Step %d: Time %9.09f ms |drop %3.03f%% |rate %9.09f %d %d %d %d (GYee cells/sec,Np,Na,Nv,Ntime) |isTFSF=%d \n", parsHost.iStep, calcTime, 100*dropTime/calcTime, 1.e-9*yee_cells/(calcTime*1.e-3), Np,Na,Nv,Ntime, (parsHost.iStep+1)*Ntime*dt<shotpoint.tStop ); #endif//TEST_RATE #ifdef MPI_ON double AllCalcTime; MPI_Reduce(&calcTime, &AllCalcTime, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if(window.node==0 && 0) printf("===(%3d)===AllCalcTime %9.09f sec |rate %9.09f GYee_cells/sec\n", parsHost.iStep, AllCalcTime*1e-3, 1.e-9*yee_cells/(AllCalcTime*1.e-3) ); #endif fflush(stdout); parsHost.iStep++; copy2dev(parsHost, pars); return 0; }
7863b872d81801568d0cedc1b8bb459dfa0e3861.cu
#include <stdio.h> #include <errno.h> #include <omp.h> #include <semaphore.h> #ifdef MPI_ON #include <mpi.h> #endif #include "chooseV.h" #include "signal.h" #ifdef MPI_ON MPI_Datatype MPI_DMDRAGTYPE; MPI_Datatype MPI_RAGPMLTYPE; MPI_Datatype MPI_HLFRAGTYPE; #endif int* mapNodeSize; //============================================= ftype* __restrict__ hostKpmlx1; ftype* __restrict__ hostKpmlx2; ftype* __restrict__ hostKpmly1; ftype* __restrict__ hostKpmly2; ftype* __restrict__ hostKpmlz1; ftype* __restrict__ hostKpmlz2; GeoParamsHost parsHost; __constant__ GeoParams pars; __constant__ int devNStripe[NDev] = STRIPES; __constant__ ftype Kpmlx1[(KNpmlx==0)?1:KNpmlx]; __constant__ ftype Kpmlx2[(KNpmlx==0)?1:KNpmlx]; __constant__ ftype Kpmly1[(KNpmly==0)?1:KNpmly]; __constant__ ftype Kpmly2[(KNpmly==0)?1:KNpmly]; __constant__ ftype Kpmlz1[(KNpmlz==0)?1:KNpmlz]; __constant__ ftype Kpmlz2[(KNpmlz==0)?1:KNpmlz]; //__shared__ ftype2 shared_fld[2][7][Nz]; //__shared__ ftype2 shared_fld[(FTYPESIZE*Nv*28>0xc000)?7:14][Nv]; __shared__ ftype2 shared_fld[SHARED_SIZE][NzMax]; texture<char, cudaTextureType3D> index_tex; cudaArray* index_texArray=0; #include "window.hpp" struct AsyncMPIexch{ int even,ix,t0,Nt,mpirank; bool do_run; double exch_time; sem_t sem_mpi, sem_calc; void exch(const int _even, const int _ix, const int _t0, const int _Nt, const int _mpirank) { even=_even; ix=_ix; t0=_t0; Nt=_Nt, mpirank=_mpirank; exch_time=0; if(sem_post(&sem_mpi)<0) printf("exch sem_post error %d\n",errno); } void exch_sync(){ if(sem_wait(&sem_calc)<0) printf("exch_sync sem error %d\n",errno); } void run() { if(sem_wait(&sem_mpi)<0) printf("run sem_wait error %d\n",errno); if(do_run==0) return; double start_time = omp_get_wtime(); if(even==0) DiamondRag::bufSendMPIp(mpirank, t0,Nt); if(even==1) DiamondRag::bufSendMPIm(mpirank, t0,Nt); exch_time = omp_get_wtime()-start_time; if(sem_post(&sem_calc)<0) printf("run sem_post error %d\n",errno);; } } ampi_exch; #ifdef TIMERS_ON #define IFPMLS(func,a,b,c,d,TIMER,args) {\ /*printf(#func" idev=%d ix=%d iym=%d Nblocks=%d\n", idev,ix, iym, a);*/ TIMER.init(d); \ if(isPMLs) PMLS##func<<<a,b,c,d>>>args; else func<<<a,b,c,d>>>args; TIMER.record(); } #else #define IFPMLS(func,a,b,c,d,EVENT,args) {\ /*printf(#func" PMLS=%d idev=%d w0=%d ix=%d iym=%d Nblocks=%d\n", isPMLs, idev,w0,ix, iym, a);*/\ for(int iz=0 ; iz<Nv; iz+=2*Nw+6) { if(isPMLs) PMLS##func<0><<<a,b,c,d>>>args; else func<0><<<a,b,c,d>>>args; } \ for(int iz=Nw+3; iz<Nv; iz+=2*Nw+6) { if(isPMLs) PMLS##func<1><<<a,b,c,d>>>args; else func<1><<<a,b,c,d>>>args; } } #endif //#define IFPMLS(func,a,b,c,d,args) { if(!isPMLs) func<<<a,b,c,d>>>args; } //#define IFPMLS(func,a,b,c,d,args) func<<<a,b,c,d>>>args; template<int even> inline void Window::Dtorre(int ix, int Nt, int t0, double disbal[NDev], bool isPMLs, bool isTFSF) { if(Nt<=t0 || Nt<=0) return; DEBUG_PRINT(("Dtorre%d isPMLs=%d isTFSF=%d ix=%d, t0=%d Nt=%d wleft=%d\n", even, isPMLs, isTFSF, ix,t0,Nt, parsHost.wleft)); const int Nw=Nzw-10;/*min(Nv-10,Nzw-10);*//*Nv/2*/; const int Nth=Nw+10; CHECK_ERROR( cudaSetDevice(0) ); #ifdef TIMERS_ON cuTimer ttDm[NDev], ttDo[NDev]; cudaStream_t stPMLbot; CHECK_ERROR( cudaStreamCreate(&stPMLbot) ); cudaStream_t stI; CHECK_ERROR( cudaStreamCreate(&stI ) ); cuTimer ttPMLtop, ttI; cudaStream_t stDm[NDev],stDo[NDev]; for(int i=0;i<NDev;i++) { if(i!=0) CHECK_ERROR( cudaSetDevice(i) ); CHECK_ERROR( cudaStreamCreate(&stDm[i]) ); CHECK_ERROR( cudaStreamCreate(&stDo[i]) ); ttDm[i].created=0; ttDo[i].created=0; } cudaStream_t stPMLtop; CHECK_ERROR( cudaStreamCreate(&stPMLtop) ); cudaStream_t stX; CHECK_ERROR( cudaStreamCreate(&stX ) ); cuTimer ttPMLbot, ttX; cudaStream_t stP; cuTimer ttP,ttPmpi; if(even==0) { cudaSetDevice(NDev-1); CHECK_ERROR( cudaStreamCreate(&stP ) ); } else if(even==1) { cudaSetDevice(0 ); CHECK_ERROR( cudaStreamCreate(&stP ) ); } cuTimer ttMPIa; #else//TIMER_S_ON not def cudaStream_t stPMLbot; CHECK_ERROR( cudaStreamCreate(&stPMLbot) ); cudaStream_t stI; CHECK_ERROR( cudaStreamCreate(&stI ) ); cudaStream_t stDm[NDev],stDo[NDev]; for(int i=0;i<NDev;i++) { if(i!=0) CHECK_ERROR( cudaSetDevice(i) ); CHECK_ERROR( cudaStreamCreate(&stDm[i]) ); CHECK_ERROR( cudaStreamCreate(&stDo[i]) ); } cudaStream_t stPMLtop; CHECK_ERROR( cudaStreamCreate(&stPMLtop) ); cudaStream_t stX; CHECK_ERROR( cudaStreamCreate(&stX ) ); cudaStream_t stP ; if(even==0) { cudaSetDevice(NDev-1); CHECK_ERROR( cudaStreamCreate(&stP ) ); } else if(even==1) { cudaSetDevice(0 ); CHECK_ERROR( cudaStreamCreate(&stP ) ); } #endif//TIMERS_ON CHECK_ERROR( cudaSetDevice(0) ); int iym=0, iyp=0; int Nblk=0; iyp++; int Iy=iym, Xy, D1oy[NDev], D0oy[NDev], Dmy[NDev], DmBlk[NDev], Syb,Syt, SybBlk,SytBlk; int is_oneL[NDev], is_oneU[NDev], is_many[NDev], is_I[NDev], is_X[NDev], is_Sb[NDev], is_St[NDev], is_P[NDev]; for(int i=0; i<NDev; i++) { is_oneL[i]=0; is_oneU[i]=0; is_many[i]=0; is_I[i]=0; is_X[i]=0; is_Sb[i]=0; is_St[i]=0; is_P[i]=0; } is_I[0]=1; iym=iyp; Nblk=0; while(iyp<Npmly/2) { iyp++; Nblk++; } if(Nblk>0) is_Sb[0]=1; Syb=iym; SybBlk=Nblk; for(int idev=0,nextY=0; idev<NDev; idev++) { nextY+=NStripe[idev]; if(idev==NDev-1) nextY-=max(1,Npmly/2); if(idev!=0) { // Dtorre1 only if(iyp<nextY && even==1) is_oneL[idev]=1; D1oy[idev]=iyp; if(iyp<nextY) iyp++; } iym=iyp; Nblk=0; while(iyp<nextY-(idev==NDev-1?0:1)) { iyp++; Nblk++; } // Main Region if(Nblk>0) is_many[idev]=1; Dmy[idev]=iym, DmBlk[idev]=Nblk; if(idev!=NDev-1) { // Dtorre0 only if(iyp<nextY && even==0) is_oneU[idev]=1; D0oy[idev]=iyp; if(iyp<nextY) iyp++; } } iym=iyp; Nblk=0; while(iyp<Na-1) { iyp++; Nblk++; } if(Nblk>0) is_St[NDev-1]=1; is_X[NDev-1]=1; Syt=iym; SytBlk=Nblk; Xy=iyp; if(subnode!=0) { is_I [0]=0; if(even==1) is_P[0]=1; is_Sb[0]=0; DmBlk[0]+=SybBlk; Dmy[0]=Syb; } if(subnode!=NasyncNodes-1) { is_X [NDev-1]=0; if(even==0) is_P[NDev-1]=1; is_St[NDev-1]=0; DmBlk[NDev-1]+=SytBlk; } int mpirank = node*NasyncNodes+subnode; for(int idev=0; idev<NDev; idev++) { if(idev!=0) CHECK_ERROR( cudaSetDevice(idev) ); if(is_oneL[idev] && even==1 && isTFSF ) IFPMLS(torreTFSF1 ,1 ,Nth,0,stDo[idev],ttDo[idev],(ix,D1oy[idev],iz,iz+Nw,Nt,t0)) if(is_oneL[idev] && even==1 && !isTFSF ) IFPMLS(torreD1 ,1 ,Nth,0,stDo[idev],ttDo[idev],(ix,D1oy[idev],iz,iz+Nw,Nt,t0)) if(is_oneL[idev] && even==1 ) bufsave<1><<<(Nv+Nw-1)/Nw,Nw,0,stDo[idev]>>>(ix,D1oy[idev],Nt,t0); if(is_oneU[idev] && even==0 && isTFSF ) IFPMLS(torreTFSF0 ,1 ,Nth,0,stDo[idev],ttDo[idev],(ix,D0oy[idev],iz,iz+Nw,Nt,t0)) if(is_oneU[idev] && even==0 && !isTFSF ) IFPMLS(torreD0 ,1 ,Nth,0,stDo[idev],ttDo[idev],(ix,D0oy[idev],iz,iz+Nw,Nt,t0)) if(is_oneU[idev] && even==0 ) bufsave<0><<<(Nv+Nw-1)/Nw,Nw,0,stDo[idev]>>>(ix,D0oy[idev],Nt,t0); if(is_I[idev] && even==0 && Npmly==0) IFPMLS(torreId0 ,1 ,Nth,0,stI ,ttI ,(ix,Iy ,iz,iz+Nw,Nt,t0)) if(is_I[idev] && even==0 && Npmly!=0) IFPMLS(torreIs0 ,1 ,Nth,0,stI ,ttI ,(ix,Iy ,iz,iz+Nw,Nt,t0)) if(is_I[idev] && even==1 && Npmly==0) IFPMLS(torreId1 ,1 ,Nth,0,stI ,ttI ,(ix,Iy ,iz,iz+Nw,Nt,t0)) if(is_I[idev] && even==1 && Npmly!=0) IFPMLS(torreIs1 ,1 ,Nth,0,stI ,ttI ,(ix,Iy ,iz,iz+Nw,Nt,t0)) if(is_X[idev] && even==0 && Npmly==0) IFPMLS(torreXd0 ,1 ,Nth,0,stX ,ttX ,(ix,Xy ,iz,iz+Nw,Nt,t0)) if(is_X[idev] && even==0 && Npmly!=0) IFPMLS(torreXs0 ,1 ,Nth,0,stX ,ttX ,(ix,Xy ,iz,iz+Nw,Nt,t0)) if(is_X[idev] && even==1 && Npmly==0) IFPMLS(torreXd1 ,1 ,Nth,0,stX ,ttX ,(ix,Xy ,iz,iz+Nw,Nt,t0)) if(is_X[idev] && even==1 && Npmly!=0) IFPMLS(torreXs1 ,1 ,Nth,0,stX ,ttX ,(ix,Xy ,iz,iz+Nw,Nt,t0)) if(is_P[idev] && even==0 ) IFPMLS(torreD0 ,1 ,Nth,0,stP ,ttP ,(ix,Xy ,iz,iz+Nw,Nt,t0)) if(is_P[idev] && even==1 ) IFPMLS(torreD1 ,1 ,Nth,0,stP ,ttP ,(ix,Iy ,iz,iz+Nw,Nt,t0)) if(is_P[idev] && even==0 ) bufsave<0><<<(Nv+Nw-1)/Nw,Nw,0,stP >>>(ix,Xy ,Nt,t0); if(is_P[idev] && even==1 ) bufsave<1><<<(Nv+Nw-1)/Nw,Nw,0,stP >>>(ix,Iy ,Nt,t0); if(is_Sb[idev] && even==0 ) IFPMLS(torreS0 ,SybBlk ,Nth,0,stPMLbot ,ttPMLbot ,(ix,Syb ,iz,iz+Nw,Nt,t0)) if(is_Sb[idev] && even==1 ) IFPMLS(torreS1 ,SybBlk ,Nth,0,stPMLbot ,ttPMLbot ,(ix,Syb ,iz,iz+Nw,Nt,t0)) if(is_St[idev] && even==0 ) IFPMLS(torreS0 ,SytBlk ,Nth,0,stPMLtop ,ttPMLtop ,(ix,Syt ,iz,iz+Nw,Nt,t0)) if(is_St[idev] && even==1 ) IFPMLS(torreS1 ,SytBlk ,Nth,0,stPMLtop ,ttPMLtop ,(ix,Syt ,iz,iz+Nw,Nt,t0)) if(is_many[idev] && even==0 && isTFSF ) IFPMLS(torreTFSF0 ,DmBlk[idev],Nth,0,stDm[idev],ttDm[idev],(ix,Dmy[idev] ,iz,iz+Nw,Nt,t0)) if(is_many[idev] && even==1 && isTFSF ) IFPMLS(torreTFSF1 ,DmBlk[idev],Nth,0,stDm[idev],ttDm[idev],(ix,Dmy[idev] ,iz,iz+Nw,Nt,t0)) if(is_many[idev] && even==0 && !isTFSF ) IFPMLS(torreD0 ,DmBlk[idev],Nth,0,stDm[idev],ttDm[idev],(ix,Dmy[idev] ,iz,iz+Nw,Nt,t0)) if(is_many[idev] && even==1 && !isTFSF ) IFPMLS(torreD1 ,DmBlk[idev],Nth,0,stDm[idev],ttDm[idev],(ix,Dmy[idev] ,iz,iz+Nw,Nt,t0)) if(is_oneL[idev] && even==1 ) DiamondRag::copyMbuf(idev, t0,Nt, stDo[idev]); if(is_oneU[idev] && even==0 ) DiamondRag::copyPbuf(idev, t0,Nt, stDo[idev]); #ifdef TIMERS_ON if(is_oneL[idev] && even==1 || is_oneU[idev] && even==0) ttDo[idev].record(); #endif } if(NasyncNodes>1 && even==1 ) DiamondRag::prepTransM(mpirank, t0,Nt, stP); if(NasyncNodes>1 && even==0 ) DiamondRag::prepTransP(mpirank, t0,Nt, stP); #ifdef TIMERS_ON for(int idev=0; idev<NDev; idev++) { CHECK_ERROR(cudaSetDevice(idev)); if(is_P[idev]) ttP.record(); } CHECK_ERROR(cudaSetDevice(0)); #endif CHECK_ERROR( cudaSetDevice(0) ); float copytime=0; bool doSynccopy=0; if(!doneMemcopy) { doSynccopy=1; #ifdef TIMERS_ON for(int idev=0; idev<NDev; idev++) { CHECK_ERROR( cudaSetDevice(idev) ); CHECK_ERROR( cudaEventRecord(copyEventStart[idev], streamCopy[idev]) ); } CHECK_ERROR( cudaSetDevice(0) ); if(even==0) MemcopyDtH(ix4copy); if(even==1) MemcopyHtD(ix4copy); for(int idev=0; idev<NDev; idev++) { CHECK_ERROR( cudaSetDevice(idev) ); CHECK_ERROR( cudaEventRecord(copyEventEnd[idev], streamCopy[idev]) ); } CHECK_ERROR( cudaSetDevice(0) ); #else if(even==0) MemcopyDtH(ix4copy); if(even==1) MemcopyHtD(ix4copy); #endif if(even==1) doneMemcopy=true; } CHECK_ERROR( cudaStreamSynchronize(stP ) ); #ifdef TIMERS_ON timerP += ttP.gettime_rec(); #endif if(NasyncNodes>1) ampi_exch.exch(even, ix, t0, Nt, mpirank); if(NasyncNodes>1) ampi_exch.exch_sync(); #ifdef TIMERS_ON for(int idev=0; idev<NDev; idev++) { CHECK_ERROR(cudaSetDevice(idev)); if(is_P[idev]) ttPmpi.init(stP); } CHECK_ERROR(cudaSetDevice(0)); #endif if(NasyncNodes>1 && even==1 ) DiamondRag::postTransM(mpirank, t0,Nt, stP); if(NasyncNodes>1 && even==0 ) DiamondRag::postTransP(mpirank, t0,Nt, stP); if(NasyncNodes>1) CHECK_ERROR( cudaStreamSynchronize(stP) ); #ifdef TIMERS_ON for(int idev=0; idev<NDev; idev++) { CHECK_ERROR(cudaSetDevice(idev)); if(is_P[idev]) ttPmpi.record(); } CHECK_ERROR(cudaSetDevice(0)); #endif if(doSynccopy) for(int idev=0; idev<NDev; idev++) CHECK_ERROR( cudaStreamSynchronize(streamCopy[idev]) ); #ifdef TIMERS_ON if(doSynccopy) for(int idev=0; idev<NDev; idev++) { float copytime_idev; CHECK_ERROR( cudaEventElapsedTime(&copytime_idev, copyEventStart[idev], copyEventEnd[idev]) ); copytime=max(copytime,copytime_idev); } timerCopy+= copytime; #endif //if(even==1) parsHost.drop.save(stPMLm); CHECK_ERROR( cudaStreamSynchronize(stPMLbot) ); CHECK_ERROR( cudaStreamSynchronize(stPMLtop) ); CHECK_ERROR( cudaStreamSynchronize(stI ) ); CHECK_ERROR( cudaStreamSynchronize(stX ) ); //CHECK_ERROR( cudaStreamSynchronize(stB ) ); for(int i=0;i<NDev;i++) CHECK_ERROR( cudaStreamSynchronize(stDo[i]) ); int firsti=parsHost.iStep%NDev; double tt=omp_get_wtime(); CHECK_ERROR( cudaStreamSynchronize(stDm[firsti]) ); disbal[0]+=omp_get_wtime()-tt; for(int j=1;j<NDev;j++) { int i=(j+parsHost.iStep)%NDev; double tt=omp_get_wtime(); CHECK_ERROR( cudaStreamSynchronize(stDm[i]) ); disbal[j]+=omp_get_wtime()-tt; } CHECK_ERROR( cudaStreamDestroy(stPMLbot) ); CHECK_ERROR( cudaStreamDestroy(stPMLtop) ); CHECK_ERROR( cudaStreamDestroy(stI ) ); CHECK_ERROR( cudaStreamDestroy(stX ) ); //CHECK_ERROR( cudaStreamDestroy(stB ) ); CHECK_ERROR( cudaStreamDestroy(stP ) ); for(int i=0;i<NDev;i++) CHECK_ERROR( cudaStreamDestroy(stDo[i]) ); for(int i=0;i<NDev;i++) CHECK_ERROR( cudaStreamDestroy(stDm[i]) ); #ifdef TIMERS_ON timerPMLtop+= ttPMLtop.gettime_rec(); timerI+= ttI.gettime_rec(); for(int i=0;i<NDev;i++) timerDm[i]+= ttDm[i].gettime_rec(); timerPMLbot+= ttPMLbot.gettime_rec(); timerX+= ttX.gettime_rec(); for(int i=0;i<NDev;i++) timerDo[i]+= ttDo[i].gettime_rec(); timerP += ttPmpi.gettime_rec(); timerP += ampi_exch.exch_time*1e3; float calctime = max(ttPMLtop.diftime,max(ttPMLbot.diftime,max(ttI.diftime,max(ttX.diftime,ttP.diftime+ttPmpi.diftime+ampi_exch.exch_time*1e3)))); for(int i=0;i<NDev;i++) calctime=max(calctime,max(ttDm[i].diftime,ttDo[i].diftime)); timerExec+= max(copytime, calctime); #endif } inline void Window::Dtorres(int ix, int Nt, int t0, double disbal[NDev], bool isPMLs, bool isTFSF) { Dtorre<0>(ix,Nt,t0,disbal,isPMLs,isTFSF); //cudaDeviceSynchronize(); CHECK_ERROR( cudaGetLastError() ); Dtorre<1>(ix,Nt,t0,disbal,isPMLs,isTFSF); //cudaDeviceSynchronize(); CHECK_ERROR( cudaGetLastError() ); } #ifdef MPI_ON MPI_Request reqSp, reqSm, reqRp, reqRm, reqSp_pml, reqSm_pml, reqRp_pml, reqRm_pml; MPI_Request reqSM_p2pbuf[NDev],reqSP_p2pbuf[NDev],reqRM_p2pbuf[NDev],reqRP_p2pbuf[NDev]; MPI_Status status; int flagSp,flagRp,flagSm,flagRm,flagSp_pml,flagRp_pml,flagSm_pml,flagRm_pml; mpi_message Window::mes[8]; int doWaitM,doWaitP; //#define BLOCK_SEND //#define MPI_NUDGE //#define USE_MPI_THREADING #ifndef USE_MPI_THREADING #define WaitMPI(nreq,req,st) MPI_Wait(req,st) //#define SendMPI(p,sz,tp,rnk,tag,world,req,nreq) MPI_Isend(p,sz,tp,rnk,tag,world,req); #define SendMPI(p,sz,tp,rnk,tag,world,req,nreq) MPI_Send(p,sz,tp,rnk,tag,world); #define RecvMPI(p,sz,tp,rnk,tag,world,req,nreq) MPI_Irecv(p,sz,tp,rnk,tag,world,req); #else #define WaitMPI(nreq,req,st) { mpi_message* mes = &window.mes[nreq]; \ int s=pthread_join(mes->mpith,0); if(s!=0) printf("node %d: Error joining thread %ld retcode=%d\n",window.node,mes->mpith,s); } static void* send_func(void* args){ mpi_message *mes = (mpi_message*)args; MPI_Send(mes->buf,mes->count,mes->datatype,mes->dest,mes->tag,mes->comm); return 0; } static void* recv_func(void* args){ mpi_message *mes = (mpi_message*)args; MPI_Status stat; MPI_Recv(mes->buf,mes->count,mes->datatype,mes->dest,mes->tag,mes->comm,&stat); return 0; } #define SendMPI(p,sz,tp,rnk,tag,world,req,nreq) {mpi_message* mes = &window.mes[nreq]; mes->set(p,sz,tp,rnk,tag,world); \ if(pthread_create(&mes->mpith,0,send_func,(void*)mes)!=0) {printf("Error: cannot create thread for MPI_send %d node=%d\n",nreq,window.node); MPI_Abort(MPI_COMM_WORLD, 1);};} #define RecvMPI(p,sz,tp,rnk,tag,world,req,nreq) {mpi_message* mes = &window.mes[nreq]; mes->set(p,sz,tp,rnk,tag,world); \ if(pthread_create(&mes->mpith,0,recv_func,(void*)mes)!=0) {printf("Error: cannot create thread for MPI_recv %d node=%d\n",nreq,window.node); MPI_Abort(MPI_COMM_WORLD, 1);};} #endif//USE_MPI_THREADING #endif// MPI_ON int calcStep(){ // CHECK_ERROR( cudaDeviceSetSharedMemConfig ( cudaSharedMemBankSizeEightByte ) ); if(parsHost.iStep==0) printf("Starting...\n"); cuTimer t0; t0.init(); int torreNum=0; double dropTime=0; CHECK_ERROR(cudaDeviceSynchronize()); #ifdef TEST_RATE for(int ix=Ns-Ntime; ix>0; ix--) { // printf("ix=%d\n",ix); const int block_spacing = TEST_RATE; torreD0<<<(Na-2)/block_spacing,Nv>>>(ix, 1, Ntime, 0); cudaDeviceSynchronize(); CHECK_ERROR( cudaGetLastError() ); torreD1<<<(Na-2)/block_spacing,Nv>>>(ix, 1, Ntime, 0); cudaDeviceSynchronize(); CHECK_ERROR( cudaGetLastError() ); torreNum++; } #else Window window; window.prepare(); int node_shift=0; for(int inode=0; inode<window.node; inode++) node_shift+= mapNodeSize[inode]; node_shift-= Ns*window.node; int nsize=mapNodeSize[window.node]; int nL=node_shift; int nR=nL+nsize; #ifdef MPI_ON if(parsHost.iStep==0) { int wleftP=nR-Ns; int wleftM=nL; doWaitP=0; doWaitM=0; if(window.node!=window.Nprocs-1) { #ifndef MPI_TEST DEBUG_MPI(("timestamp %10.2f: Recv P (node %d) wleft=%d / tag %d, req %p\n", omp_get_wtime(), window.node, wleftP, 2, &reqRp)); for(int idev=0; idev<NDev; idev++) { RecvMPI( parsHost.p2pBufM_host_rcv[idev], Ntime , MPI_HLFRAGTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+0+window.Nprocs*10+2*idev+0, MPI_COMM_WORLD, &reqRM_p2pbuf[idev],); RecvMPI( parsHost.p2pBufP_host_rcv[idev], Ntime , MPI_HLFRAGTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+0+window.Nprocs*10+2*idev+1, MPI_COMM_WORLD, &reqRP_p2pbuf[idev],); } RecvMPI(&window.data [wleftP*Na ], Ns*Na , MPI_DMDRAGTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+0, MPI_COMM_WORLD, &reqRp , 2);flagRp =0; RecvMPI(&window.dataPMLa[wleftP*Npmly], Ns*Npmly , MPI_RAGPMLTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+1, MPI_COMM_WORLD, &reqRp_pml, 6);flagRp_pml=0; doWaitP=1; #endif } } #endif//MPI_ON while(window.w0+Ns>=0) { #ifdef MPI_ON if( true ) { #ifdef DROP_DATA if(parsHost.wleft==nR-Ns-Ns-1) { cuTimer tdrop; tdrop.init(); parsHost.drop.drop( nsize-Ns ,nsize ,window.data,parsHost.iStep); dropTime+= tdrop.gettime(); } if(parsHost.wleft==nL-Ns-1 ) { cuTimer tdrop; tdrop.init(); parsHost.drop.drop((window.node==0)?0:Ns,nsize-Ns,window.data,parsHost.iStep); dropTime+= tdrop.gettime(); } #endif bool doSend[2] = {1,1}; bool doRecv[2] = {1,1}; #ifdef MPI_TEST if(parsHost.iStep -window.node<=0) { doSend[0]=0; doSend[1]=0; doRecv[1]=0; } if(parsHost.iStep+1-window.node<=0) { doRecv[0]=0; } #endif if(doWaitP && parsHost.wleft==nR+(Ns-Ntime-1) ) { if(window.node!=window.Nprocs-1) DEBUG_MPI(("timestamp %10.2f: waiting P (node %d) wleft=%d / requests %p %p\n", omp_get_wtime(), window.node, parsHost.wleft, &reqRp,&reqSp)); if(window.node!=window.Nprocs-1) { WaitMPI(2,&reqRp, &status);WaitMPI(6,&reqRp_pml, &status); flagRp=1;flagRp_pml=1;} if(window.node!=window.Nprocs-1) for(int idev=0;idev<NDev;idev++) {WaitMPI(,&reqRM_p2pbuf[idev], &status);WaitMPI(,&reqRP_p2pbuf[idev], &status);} if(window.node!=window.Nprocs-1) for(int idev=0;idev<NDev;idev++) { CHECK_ERROR(cudaMemcpy(parsHost.p2pBufM[idev],parsHost.p2pBufM_host_rcv[idev],Ntime*sizeof(halfRag),cudaMemcpyHostToDevice)); CHECK_ERROR(cudaMemcpy(parsHost.p2pBufP[idev],parsHost.p2pBufP_host_rcv[idev],Ntime*sizeof(halfRag),cudaMemcpyHostToDevice)); } } if(parsHost.wleft==nR-Ns-Ns-1 && window.node!=window.Nprocs-1) { if(doSend[1]) { DEBUG_MPI(("timestamp %10.2f: Send&Recv P(%d) (node %d) wleft=%d (tags %d,%d, reqs %p,%p)\n", omp_get_wtime(), parsHost.wleft+Ns, window.node, parsHost.wleft, 2+(parsHost.iStep+1)*2+0, 2+(parsHost.iStep+1)*2+1, &reqSp, &reqRp)); SendMPI(&window.data [(nR-Ns)*Na ], doSend[1]*Ns*Na , MPI_DMDRAGTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+0, MPI_COMM_WORLD, &reqSp ,0);flagSp =0; SendMPI(&window.dataPMLa[(nR-Ns)*Npmly], doSend[1]*Ns*Npmly, MPI_RAGPMLTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+1, MPI_COMM_WORLD, &reqSp_pml,4);flagSp_pml=0; DEBUG_MPI(("timestamp %10.2f: ok Send P(%d) (node %d) wleft=%d (tags %d,%d, reqs %p,%p)\n", omp_get_wtime(), parsHost.wleft+Ns, window.node, parsHost.wleft, 2+(parsHost.iStep+1)*2+0, 2+(parsHost.iStep+1)*2+0, &reqSp, &reqRp)); } if(doRecv[1]) { for(int idev=0; idev<NDev; idev++) { RecvMPI( parsHost.p2pBufM_host_rcv[idev], doRecv[1]*Ntime, MPI_HLFRAGTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+window.Nprocs*10+2*idev+0, MPI_COMM_WORLD, &reqRM_p2pbuf[idev],); RecvMPI( parsHost.p2pBufP_host_rcv[idev], doRecv[1]*Ntime, MPI_HLFRAGTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+window.Nprocs*10+2*idev+1, MPI_COMM_WORLD, &reqRP_p2pbuf[idev],); } RecvMPI(&window.data [(nR-Ns)*Na ], doRecv[1]*Ns*Na , MPI_DMDRAGTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+0, MPI_COMM_WORLD, &reqRp ,2);flagRp =0; RecvMPI(&window.dataPMLa[(nR-Ns)*Npmly], doRecv[1]*Ns*Npmly, MPI_RAGPMLTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+1, MPI_COMM_WORLD, &reqRp_pml,6);flagRp_pml=0; doWaitP=1; } } // if(doWaitM && parsHost.wleft==nL+Ns+(Ns-Ntime-1) && parsHost.iStep!=0) { if(doWaitM && parsHost.wleft==nR-1-Ns-((window.node==window.Nprocs-1)?(Ntime/2+1):Ns) && parsHost.iStep!=0) { if(window.node!=0) DEBUG_MPI(("timestamp %10.2f: waiting M (node %d) wleft=%d / requests %p %p\n", omp_get_wtime(), window.node, parsHost.wleft, &reqRm, &reqSm)); if(window.node!=0) { WaitMPI(3,&reqRm, &status);WaitMPI(7,&reqRm_pml, &status); flagRm=1;flagRm_pml=1;} } #ifdef MPI_NUDGE if((parsHost.wleft+Ns)%1==0) { if(doWaitP) if( window.node!=window.Nprocs-1) { if(!flagRp_pml || !flagRp) DEBUG_MPI(("timestamp %10.2f: testing recvP(%p) (node %d) wleft=%d\n", omp_get_wtime(), &reqRp, window.node, parsHost.wleft)); if(!flagRp) MPI_Test(&reqRp, &flagRp, &status); if(!flagRp_pml)MPI_Test(&reqRp_pml, &flagRp_pml, &status); } if(doWaitM) if(parsHost.iStep!=0 && window.node!=0 ) { if(!flagRm_pml || !flagRm) DEBUG_MPI(("timestamp %10.2f: testing recvM(%p) (node %d) wleft=%d\n", omp_get_wtime(), &reqRm, window.node, parsHost.wleft)); if(!flagRm) MPI_Test(&reqRm, &flagRm, &status); if(!flagRm_pml)MPI_Test(&reqRm_pml, &flagRm_pml, &status); } } #endif #ifdef MPI_TEST if(parsHost.iStep-window.node>0) { #endif ampi_exch.do_run=1; if(NasyncNodes>1) { if(sem_init(&ampi_exch.sem_calc, 0,0)==-1) printf("Error semaphore init errno=%d\n", errno); if(sem_init(&ampi_exch.sem_mpi , 0,0)==-1) printf("Error semaphore init errno=%d\n", errno); } #pragma omp parallel num_threads(2) { if(omp_get_thread_num()==1) { window.calcDtorres(nL,nR, parsHost.wleft<nL && window.node!=0, parsHost.wleft>=nR-Ns && window.node!=window.Nprocs-1); ampi_exch.do_run=0; if(NasyncNodes>1) if(sem_post(&ampi_exch.sem_mpi)<0) printf("sem_post_mpi end error %d\n",errno); } #pragma omp master if(NasyncNodes>1) { while(ampi_exch.do_run) ampi_exch.run(); if(sem_post(&ampi_exch.sem_calc)<0) printf("sem_post_calc end error %d\n",errno); } } if(NasyncNodes>1) { if(sem_destroy(&ampi_exch.sem_mpi )<0) printf("sem_destroy error %d\n",errno); if(sem_destroy(&ampi_exch.sem_calc)<0) printf("sem_destroy error %d\n",errno); } #ifdef MPI_TEST } #endif if(parsHost.wleft==nL-Ns-1 && window.node!=0 ) { if(doSend[0]) { DEBUG_MPI(("timestamp %10.2f: Send&Recv M(%d) (node %d) wleft=%d (tags %d,%d, reqs %p,%p)\n", omp_get_wtime(), parsHost.wleft+Ns+1, window.node, parsHost.wleft, 2+(parsHost.iStep )*2+0, 2+(parsHost.iStep+1)*2+0, &reqSm, &reqRm)); for(int idev=0; idev<NDev; idev++) { CHECK_ERROR(cudaMemcpy(parsHost.p2pBufM_host_snd[idev],parsHost.p2pBufM[idev],Ntime*sizeof(halfRag),cudaMemcpyDeviceToHost)); CHECK_ERROR(cudaMemcpy(parsHost.p2pBufP_host_snd[idev],parsHost.p2pBufP[idev],Ntime*sizeof(halfRag),cudaMemcpyDeviceToHost)); SendMPI( parsHost.p2pBufM_host_snd[idev] , doSend[0]*Ntime , MPI_HLFRAGTYPE, (window.node-1)*NasyncNodes+window.subnode, 2+(parsHost.iStep )*2+window.Nprocs*10+2*idev+0, MPI_COMM_WORLD, &reqSM_p2pbuf[idev],); SendMPI( parsHost.p2pBufP_host_snd[idev] , doSend[0]*Ntime , MPI_HLFRAGTYPE, (window.node-1)*NasyncNodes+window.subnode, 2+(parsHost.iStep )*2+window.Nprocs*10+2*idev+1, MPI_COMM_WORLD, &reqSP_p2pbuf[idev],); } SendMPI(&window.data [ nL *Na ], doSend[0]*Ns*Na , MPI_DMDRAGTYPE, (window.node-1)*NasyncNodes+window.subnode, 2+(parsHost.iStep )*2+0, MPI_COMM_WORLD, &reqSm ,1);flagSm =0; SendMPI(&window.dataPMLa[ nL *Npmly], doSend[0]*Ns*Npmly, MPI_RAGPMLTYPE, (window.node-1)*NasyncNodes+window.subnode, 2+(parsHost.iStep )*2+1, MPI_COMM_WORLD, &reqSm_pml,5);flagSm_pml=0; DEBUG_MPI(("timestamp %10.2f: ok Send M(%d) (node %d) wleft=%d (tags %d,%d, reqs %p,%p)\n", omp_get_wtime(), parsHost.wleft+Ns+1, window.node, parsHost.wleft, 2+(parsHost.iStep )*2+0, 2+(parsHost.iStep+1)*2+0, &reqSm, &reqRm)); } if(doRecv[0]) { RecvMPI(&window.data [ nL *Na ], doRecv[0]*Ns*Na , MPI_DMDRAGTYPE, (window.node-1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+0, MPI_COMM_WORLD, &reqRm, 3);flagRm =0; RecvMPI(&window.dataPMLa[ nL *Npmly], doRecv[0]*Ns*Npmly, MPI_RAGPMLTYPE, (window.node-1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+1, MPI_COMM_WORLD, &reqRm_pml,7);flagRm_pml=0; doWaitM=1; } } } #else//MPI_ON not def window.calcDtorres(); #endif//MPI_ON window.synchronize(); } window.finalize(); #endif//TEST_RATE #if not defined MPI_ON && defined DROP_DATA cuTimer tdrop; tdrop.init(); parsHost.drop.drop(0,Np,parsHost.data,parsHost.iStep); dropTime+= tdrop.gettime(); /* parsHost.drop.dump(); #ifndef MPI_TEST if(0 && parsHost.iStep%(10*window.Nprocs)==0) parsHost.drop.sync(); #endif */ #endif double calcTime=t0.gettime(); unsigned long int yee_cells = 0; double overhead=0; #ifndef TEST_RATE yee_cells = NDT*NDT*Ntime*(unsigned long long)(Nv*((Na+1-NDev)*NasyncNodes+1-NasyncNodes))*Np; overhead = window.RAMcopytime/window.GPUcalctime; printf("Step %d /node %d/ subnode %d/: Time %9.09f ms |drop %3.03f%% ||rate %9.09f GYee_cells/sec |total grid %dx%dx%d=%ld cells | isTFSF=%d\n", parsHost.iStep, window.node, window.subnode, calcTime, 100*dropTime/calcTime, 1.e-9*yee_cells/(calcTime*1.e-3), NDT*Np,NDT*((Na+1-NDev)*NasyncNodes+1-NasyncNodes),Nv,yee_cells/Ntime, (parsHost.iStep+1)*Ntime*dt<shotpoint.tStop ); // for(int idev=0;idev<NDev;idev++) printf("%3.03f%% ", 100*window.disbal[idev]/window.GPUcalctime); #ifdef TIMERS_ON printf(" |waitings%d %5.05f",(parsHost.iStep)%NDev,1.e3*window.disbal[0]); for(int idev=1; idev<NDev; idev++) printf(", %5.05f", 1.e3*window.disbal[idev]); printf("\n"); for(int idev=0; idev<NDev; idev++) printf(" |timers(Step,node,subnode,device): %d %d %d %d | PMLbot PMLtop I X Do Dmi P Copy Exec:| %.02f %.02f %.02f %.02f %.02f %.02f %.02f %.02f %.02f\n", parsHost.iStep,window.node,window.subnode,idev, window.timerPMLbot, window.timerPMLtop, window.timerI, window.timerX, window.timerDo[idev], window.timerDm[idev], window.timerP, window.timerCopy, window.timerExec); #endif//TIMERS_ON #else//if def TEST_RATE yee_cells = NDT*NDT*Ntime*(unsigned long long)(Nv*((Na-2)/TEST_RATE))*torreNum; printf("Step %d: Time %9.09f ms |drop %3.03f%% |rate %9.09f %d %d %d %d (GYee cells/sec,Np,Na,Nv,Ntime) |isTFSF=%d \n", parsHost.iStep, calcTime, 100*dropTime/calcTime, 1.e-9*yee_cells/(calcTime*1.e-3), Np,Na,Nv,Ntime, (parsHost.iStep+1)*Ntime*dt<shotpoint.tStop ); #endif//TEST_RATE #ifdef MPI_ON double AllCalcTime; MPI_Reduce(&calcTime, &AllCalcTime, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if(window.node==0 && 0) printf("===(%3d)===AllCalcTime %9.09f sec |rate %9.09f GYee_cells/sec\n", parsHost.iStep, AllCalcTime*1e-3, 1.e-9*yee_cells/(AllCalcTime*1.e-3) ); #endif fflush(stdout); parsHost.iStep++; copy2dev(parsHost, pars); return 0; }
aa0b327ca55bb2d816dd2f6d58e04390b0beb64e.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <iomanip> #include <math.h> #include <helper_cuda.h> #include <thrust/complex.h> #include <algorithm> //#include <thrust> using namespace thrust; #include "ragridder_plan.h" #include "conv_interp_invoker.h" #include "cuft.h" #include "cugridder.h" #include "precomp.h" #include "utils.h" ///conv improved WS, method 0 correctness cheak int main(int argc, char *argv[]) { // suppose there is just one channel // range of uvw [-lamda/2,lamda/2] rescale with factor resolution / fov compatible with l // l and m need to be converted into pixels /* Input: nrow, nchan, nxdirty, nydirty, fov, epsilon row - number of visibility nchan - number of channels nxdirty, nydirty - image size (height width) fov - field of view epsilon - tolerance */ int ier = 0; if (argc < 7) { fprintf(stderr, "Usage: W Stacking\n" "Arguments:\n" " method: One of\n" " 0: nupts driven,\n" " 2: sub-problem, or\n" " 4: block gather (each nf must be multiple of 8).\n" " w_term_method: \n" " 0: w-stacking\n" " 1: improved w-stacking\n" " nxdirty, nydirty : image size.\n" " nrow: The number of non-uniform points.\n" " fov: Field of view.\n" " nchan: number of chanels (default 1)\n" " epsilon: NUFFT tolerance (default 1e-6).\n" " kerevalmeth: Kernel evaluation method; one of\n" " 0: Exponential of square root (default), or\n" " 1: Horner evaluation.\n"); return 1; } int nxdirty, nydirty; PCS sigma = 2; // upsampling factor int nrow, nchan; PCS fov; double inp; int method; sscanf(argv[1], "%d", &method); int w_term_method; sscanf(argv[2], "%d", &w_term_method); sscanf(argv[3], "%d", &nxdirty); sscanf(argv[4], "%d", &nydirty); sscanf(argv[5], "%d", &nrow); sscanf(argv[6], "%lf", &inp); fov = inp; nchan = 1; if (argc > 7) { sscanf(argv[7], "%d", &nchan); } PCS epsilon = 1e-12; if (argc > 8) { sscanf(argv[8], "%lf", &inp); epsilon = (PCS)inp; // so can read 1e6 right! } int kerevalmeth = 0; if (argc > 9) { sscanf(argv[9], "%d", &kerevalmeth); } // degree per pixel (unit radius) // PCS deg_per_pixelx = fov / 180.0 * PI / (PCS)nxdirty; // PCS deg_per_pixely = fov / 180.0 * PI / (PCS)nydirty; // chanel setting PCS f0 = 1e9; PCS *freq = (PCS *)malloc(sizeof(PCS) * nchan); for (int i = 0; i < nchan; i++) { freq[i] = f0 + i / (double)nchan * fov; //! } //improved WS stacking 1, //gpu_method == 0, nupts driven //N1 = 5; N2 = 5; M = 25; //for correctness checking //int ier; PCS *u, *v, *w; CPX *vis; PCS *wgt=NULL; //currently no mask u = (PCS *)malloc(nrow * sizeof(PCS)); //Allocates page-locked memory on the host. v = (PCS *)malloc(nrow * sizeof(PCS)); w = (PCS *)malloc(nrow * sizeof(PCS)); vis = (CPX *)malloc(nrow * sizeof(CPX)); PCS *d_u, *d_v, *d_w; CUCPX *d_vis, *d_fk; checkCudaErrors(hipMalloc((void**)&d_u, nrow * sizeof(PCS))); checkCudaErrors(hipMalloc((void**)&d_v, nrow * sizeof(PCS))); checkCudaErrors(hipMalloc((void**)&d_w, nrow * sizeof(PCS))); checkCudaErrors(hipMalloc((void**)&d_vis, nrow * sizeof(CUCPX))); // generating data for (int i = 0; i < nrow; i++) { u[i] = randm11() * 0.5 * SPEEDOFLIGHT / f0; //xxxxx remove v[i] = randm11() * 0.5 * SPEEDOFLIGHT / f0; w[i] = randm11() * 0.5 * SPEEDOFLIGHT / f0; vis[i].real(randm11()); // nrow vis per channel, weight? vis[i].imag(randm11()); // wgt[i] = 1; } #ifdef DEBUG printf("origial input data...\n"); for(int i=0; i<nrow; i++){ printf("%.3lf ",w[i]); } printf("\n"); for(int i=0; i<nrow; i++){ printf("%.3lf ",vis[i].real()); } printf("\n"); #endif // ignore the tdirty // Timing begin ++++ //data transfer checkCudaErrors(hipMemcpy(d_u, u, nrow * sizeof(PCS), hipMemcpyHostToDevice)); //u checkCudaErrors(hipMemcpy(d_v, v, nrow * sizeof(PCS), hipMemcpyHostToDevice)); //v checkCudaErrors(hipMemcpy(d_w, w, nrow * sizeof(PCS), hipMemcpyHostToDevice)); //w checkCudaErrors(hipMemcpy(d_vis, vis, nrow * sizeof(CUCPX), hipMemcpyHostToDevice)); /* -----------Step1: Baseline setting-------------- skip negative v uvw, nrow = M, shift, mask, f_over_c (fixed due to single channel) */ int shift = 0; while ((int(1) << shift) < nchan) ++shift; // int mask = (int(1) << shift) - 1; // ??? PCS *f_over_c = (PCS*) malloc(sizeof(PCS)*nchan); for(int i=0; i<nchan; i++){ f_over_c[i] = freq[i]/SPEEDOFLIGHT; } /* ----------Step2: cugridder------------*/ // plan setting curafft_plan *plan; ragridder_plan *gridder_plan; plan = new curafft_plan(); gridder_plan = new ragridder_plan(); memset(plan, 0, sizeof(curafft_plan)); memset(gridder_plan, 0, sizeof(ragridder_plan)); visibility *pointer_v; pointer_v = (visibility *)malloc(sizeof(visibility)); pointer_v->u = u; pointer_v->v = v; pointer_v->w = w; pointer_v->vis = vis; pointer_v->frequency = freq; pointer_v->weight = wgt; pointer_v->pirange = 0; pointer_v->sign = -1; int direction = 1; //vis to image // device data allocation and transfer should be done in gridder setting ier = gridder_setting(nydirty,nxdirty,method,kerevalmeth,w_term_method,epsilon,direction,sigma,0,1,nrow,nchan,fov,pointer_v,d_u,d_v,d_w,d_vis ,plan,gridder_plan); //print the setting result free(pointer_v); if(ier == 1){ printf("errors in gridder setting\n"); return ier; } // fk(image) malloc and set checkCudaErrors(hipMalloc((void**)&d_fk,sizeof(CUCPX)*nydirty*nxdirty)); plan->fk = d_fk; gridder_plan->dirty_image = (CPX *)malloc(sizeof(CPX)*nxdirty*nydirty*nchan); // // how to use weight flag and frequency for(int i=0; i<nchan; i++){ // pre_setting // 1. u, v, w * f_over_c // 2. *pixelsize(*2pi) // 3. * rescale ratio // pre_setting(d_u, d_v, d_w, d_vis, plan, gridder_plan); // memory transfer (vis belong to this channel and weight) // checkCudaErrors(hipMemcpy(d_vis, vis, nrow * sizeof(CUCPX), hipMemcpyHostToDevice)); // // shift to corresponding range ier = gridder_execution(plan,gridder_plan); if(ier == 1){ printf("errors in gridder execution\n"); return ier; } checkCudaErrors(hipMemcpy(gridder_plan->dirty_image+i*nxdirty*nydirty, d_fk, sizeof(CUCPX)*nydirty*nxdirty, hipMemcpyDeviceToHost)); } printf("exection finished\n"); #ifdef PRINT printf("result printing...\n"); for(int i=0; i<nxdirty; i++){ for(int j=0; j<nydirty; j++){ printf("%.5lf ", gridder_plan->dirty_image[i*nydirty+j].real()); } printf("\n"); } #endif PCS pi_ratio = 1; if(!gridder_plan->kv.pirange)pi_ratio = 2 * PI; int print_sizex = nxdirty;int print_sizey = nydirty; if(nrow>=1e4){ print_sizex = 1; print_sizey = 10; } PCS *truth = (PCS*) malloc (sizeof(PCS)*nxdirty*nydirty); //printf("ground truth printing...\n"); for(int i=0; i<print_sizex; i++){ for(int j=0; j<print_sizey; j++){ CPX temp(0.0,0.0); PCS n_lm = sqrt(1.0-pow(gridder_plan->pixelsize_x*(i-nxdirty/2),2)-pow(gridder_plan->pixelsize_y*(j-nydirty/2),2)); for(int k=0; k<nrow; k++){ PCS phase = f0/SPEEDOFLIGHT*(u[k]*pi_ratio*gridder_plan->pixelsize_x*(i-nxdirty/2)+v[k]*pi_ratio*gridder_plan->pixelsize_y*(j-nydirty/2)-w[k]*pi_ratio*(n_lm-1)); temp += vis[k]*exp(phase*IMA); } truth[i*nydirty+j] = temp.real()/n_lm; //printf("%.5lf ",temp.real()/(n_lm)); } //printf("\n"); } printf("portion of result and ground truth printing...\n"); for(int i=0; i<10; i++){ printf("(%lf,%lf)",gridder_plan->dirty_image[i].real(),truth[i]); } printf("\n"); double max=0; double l2_max=0; double sum_fk = 0; for(int i=0; i<print_sizex*print_sizey; i++){ double temp = abs(truth[i] - gridder_plan->dirty_image[i].real()); if(temp>max) max = temp; l2_max += temp ; sum_fk += abs(gridder_plan->dirty_image[i].real()); } printf("maximal abs error %.3g, l2 error %.3g\n",max,l2_max/sum_fk); printf("---------------------------------------------------------------------------------------------------\n"); plan->dim=3; ier = gridder_destroy(plan, gridder_plan); if(ier == 1){ printf("errors in gridder destroy\n"); return ier; } return ier; }
aa0b327ca55bb2d816dd2f6d58e04390b0beb64e.cu
#include <iostream> #include <iomanip> #include <math.h> #include <helper_cuda.h> #include <thrust/complex.h> #include <algorithm> //#include <thrust> using namespace thrust; #include "ragridder_plan.h" #include "conv_interp_invoker.h" #include "cuft.h" #include "cugridder.h" #include "precomp.h" #include "utils.h" ///conv improved WS, method 0 correctness cheak int main(int argc, char *argv[]) { // suppose there is just one channel // range of uvw [-lamda/2,lamda/2], rescale with factor resolution / fov compatible with l // l and m need to be converted into pixels /* Input: nrow, nchan, nxdirty, nydirty, fov, epsilon row - number of visibility nchan - number of channels nxdirty, nydirty - image size (height width) fov - field of view epsilon - tolerance */ int ier = 0; if (argc < 7) { fprintf(stderr, "Usage: W Stacking\n" "Arguments:\n" " method: One of\n" " 0: nupts driven,\n" " 2: sub-problem, or\n" " 4: block gather (each nf must be multiple of 8).\n" " w_term_method: \n" " 0: w-stacking\n" " 1: improved w-stacking\n" " nxdirty, nydirty : image size.\n" " nrow: The number of non-uniform points.\n" " fov: Field of view.\n" " nchan: number of chanels (default 1)\n" " epsilon: NUFFT tolerance (default 1e-6).\n" " kerevalmeth: Kernel evaluation method; one of\n" " 0: Exponential of square root (default), or\n" " 1: Horner evaluation.\n"); return 1; } int nxdirty, nydirty; PCS sigma = 2; // upsampling factor int nrow, nchan; PCS fov; double inp; int method; sscanf(argv[1], "%d", &method); int w_term_method; sscanf(argv[2], "%d", &w_term_method); sscanf(argv[3], "%d", &nxdirty); sscanf(argv[4], "%d", &nydirty); sscanf(argv[5], "%d", &nrow); sscanf(argv[6], "%lf", &inp); fov = inp; nchan = 1; if (argc > 7) { sscanf(argv[7], "%d", &nchan); } PCS epsilon = 1e-12; if (argc > 8) { sscanf(argv[8], "%lf", &inp); epsilon = (PCS)inp; // so can read 1e6 right! } int kerevalmeth = 0; if (argc > 9) { sscanf(argv[9], "%d", &kerevalmeth); } // degree per pixel (unit radius) // PCS deg_per_pixelx = fov / 180.0 * PI / (PCS)nxdirty; // PCS deg_per_pixely = fov / 180.0 * PI / (PCS)nydirty; // chanel setting PCS f0 = 1e9; PCS *freq = (PCS *)malloc(sizeof(PCS) * nchan); for (int i = 0; i < nchan; i++) { freq[i] = f0 + i / (double)nchan * fov; //! } //improved WS stacking 1, //gpu_method == 0, nupts driven //N1 = 5; N2 = 5; M = 25; //for correctness checking //int ier; PCS *u, *v, *w; CPX *vis; PCS *wgt=NULL; //currently no mask u = (PCS *)malloc(nrow * sizeof(PCS)); //Allocates page-locked memory on the host. v = (PCS *)malloc(nrow * sizeof(PCS)); w = (PCS *)malloc(nrow * sizeof(PCS)); vis = (CPX *)malloc(nrow * sizeof(CPX)); PCS *d_u, *d_v, *d_w; CUCPX *d_vis, *d_fk; checkCudaErrors(cudaMalloc((void**)&d_u, nrow * sizeof(PCS))); checkCudaErrors(cudaMalloc((void**)&d_v, nrow * sizeof(PCS))); checkCudaErrors(cudaMalloc((void**)&d_w, nrow * sizeof(PCS))); checkCudaErrors(cudaMalloc((void**)&d_vis, nrow * sizeof(CUCPX))); // generating data for (int i = 0; i < nrow; i++) { u[i] = randm11() * 0.5 * SPEEDOFLIGHT / f0; //xxxxx remove v[i] = randm11() * 0.5 * SPEEDOFLIGHT / f0; w[i] = randm11() * 0.5 * SPEEDOFLIGHT / f0; vis[i].real(randm11()); // nrow vis per channel, weight? vis[i].imag(randm11()); // wgt[i] = 1; } #ifdef DEBUG printf("origial input data...\n"); for(int i=0; i<nrow; i++){ printf("%.3lf ",w[i]); } printf("\n"); for(int i=0; i<nrow; i++){ printf("%.3lf ",vis[i].real()); } printf("\n"); #endif // ignore the tdirty // Timing begin ++++ //data transfer checkCudaErrors(cudaMemcpy(d_u, u, nrow * sizeof(PCS), cudaMemcpyHostToDevice)); //u checkCudaErrors(cudaMemcpy(d_v, v, nrow * sizeof(PCS), cudaMemcpyHostToDevice)); //v checkCudaErrors(cudaMemcpy(d_w, w, nrow * sizeof(PCS), cudaMemcpyHostToDevice)); //w checkCudaErrors(cudaMemcpy(d_vis, vis, nrow * sizeof(CUCPX), cudaMemcpyHostToDevice)); /* -----------Step1: Baseline setting-------------- skip negative v uvw, nrow = M, shift, mask, f_over_c (fixed due to single channel) */ int shift = 0; while ((int(1) << shift) < nchan) ++shift; // int mask = (int(1) << shift) - 1; // ??? PCS *f_over_c = (PCS*) malloc(sizeof(PCS)*nchan); for(int i=0; i<nchan; i++){ f_over_c[i] = freq[i]/SPEEDOFLIGHT; } /* ----------Step2: cugridder------------*/ // plan setting curafft_plan *plan; ragridder_plan *gridder_plan; plan = new curafft_plan(); gridder_plan = new ragridder_plan(); memset(plan, 0, sizeof(curafft_plan)); memset(gridder_plan, 0, sizeof(ragridder_plan)); visibility *pointer_v; pointer_v = (visibility *)malloc(sizeof(visibility)); pointer_v->u = u; pointer_v->v = v; pointer_v->w = w; pointer_v->vis = vis; pointer_v->frequency = freq; pointer_v->weight = wgt; pointer_v->pirange = 0; pointer_v->sign = -1; int direction = 1; //vis to image // device data allocation and transfer should be done in gridder setting ier = gridder_setting(nydirty,nxdirty,method,kerevalmeth,w_term_method,epsilon,direction,sigma,0,1,nrow,nchan,fov,pointer_v,d_u,d_v,d_w,d_vis ,plan,gridder_plan); //print the setting result free(pointer_v); if(ier == 1){ printf("errors in gridder setting\n"); return ier; } // fk(image) malloc and set checkCudaErrors(cudaMalloc((void**)&d_fk,sizeof(CUCPX)*nydirty*nxdirty)); plan->fk = d_fk; gridder_plan->dirty_image = (CPX *)malloc(sizeof(CPX)*nxdirty*nydirty*nchan); // // how to use weight flag and frequency for(int i=0; i<nchan; i++){ // pre_setting // 1. u, v, w * f_over_c // 2. *pixelsize(*2pi) // 3. * rescale ratio // pre_setting(d_u, d_v, d_w, d_vis, plan, gridder_plan); // memory transfer (vis belong to this channel and weight) // checkCudaErrors(cudaMemcpy(d_vis, vis, nrow * sizeof(CUCPX), cudaMemcpyHostToDevice)); // // shift to corresponding range ier = gridder_execution(plan,gridder_plan); if(ier == 1){ printf("errors in gridder execution\n"); return ier; } checkCudaErrors(cudaMemcpy(gridder_plan->dirty_image+i*nxdirty*nydirty, d_fk, sizeof(CUCPX)*nydirty*nxdirty, cudaMemcpyDeviceToHost)); } printf("exection finished\n"); #ifdef PRINT printf("result printing...\n"); for(int i=0; i<nxdirty; i++){ for(int j=0; j<nydirty; j++){ printf("%.5lf ", gridder_plan->dirty_image[i*nydirty+j].real()); } printf("\n"); } #endif PCS pi_ratio = 1; if(!gridder_plan->kv.pirange)pi_ratio = 2 * PI; int print_sizex = nxdirty;int print_sizey = nydirty; if(nrow>=1e4){ print_sizex = 1; print_sizey = 10; } PCS *truth = (PCS*) malloc (sizeof(PCS)*nxdirty*nydirty); //printf("ground truth printing...\n"); for(int i=0; i<print_sizex; i++){ for(int j=0; j<print_sizey; j++){ CPX temp(0.0,0.0); PCS n_lm = sqrt(1.0-pow(gridder_plan->pixelsize_x*(i-nxdirty/2),2)-pow(gridder_plan->pixelsize_y*(j-nydirty/2),2)); for(int k=0; k<nrow; k++){ PCS phase = f0/SPEEDOFLIGHT*(u[k]*pi_ratio*gridder_plan->pixelsize_x*(i-nxdirty/2)+v[k]*pi_ratio*gridder_plan->pixelsize_y*(j-nydirty/2)-w[k]*pi_ratio*(n_lm-1)); temp += vis[k]*exp(phase*IMA); } truth[i*nydirty+j] = temp.real()/n_lm; //printf("%.5lf ",temp.real()/(n_lm)); } //printf("\n"); } printf("portion of result and ground truth printing...\n"); for(int i=0; i<10; i++){ printf("(%lf,%lf)",gridder_plan->dirty_image[i].real(),truth[i]); } printf("\n"); double max=0; double l2_max=0; double sum_fk = 0; for(int i=0; i<print_sizex*print_sizey; i++){ double temp = abs(truth[i] - gridder_plan->dirty_image[i].real()); if(temp>max) max = temp; l2_max += temp ; sum_fk += abs(gridder_plan->dirty_image[i].real()); } printf("maximal abs error %.3g, l2 error %.3g\n",max,l2_max/sum_fk); printf("---------------------------------------------------------------------------------------------------\n"); plan->dim=3; ier = gridder_destroy(plan, gridder_plan); if(ier == 1){ printf("errors in gridder destroy\n"); return ier; } return ier; }
068ca8bd2388eee778e331b2e5a4befd5518c314.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> //device functions __device__ int getGlobalIdx_1D_1D() { return blockIdx.x *blockDim.x + threadIdx.x; } __device__ int getGlobalIdx_1D_2D() { return blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; } __device__ int getGlobalIdx_1D_3D() { return blockIdx.x * blockDim.x * blockDim.y * blockDim.z + threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; } __device__ int getGlobalIdx_2D_1D() { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int threadId = blockId * blockDim.x + threadIdx.x; return threadId; } __device__ int getGlobalIdx_2D_2D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } __device__ int getGlobalIdx_2D_3D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } __device__ int getGlobalIdx_3D_1D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * blockDim.x + threadIdx.x; return threadId; } __device__ int getGlobalIdx_3D_2D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } __device__ int getGlobalIdx_3D_3D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } //kernels __global__ void kernel_1D_1D() { printf("Local thread ID: %i Global thread ID: %i\n", threadIdx.x, getGlobalIdx_1D_1D()); } __global__ void kernel_1D_2D() { printf("Local thread IDs: (%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, getGlobalIdx_1D_2D()); } __global__ void kernel_1D_3D() { printf("Local thread IDs: (%i,%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, threadIdx.z, getGlobalIdx_1D_3D()); } __global__ void kernel_2D_1D() { printf("Local thread ID: %i Global thread ID: %i\n", threadIdx.x, getGlobalIdx_2D_1D()); } __global__ void kernel_2D_2D() { printf("Local thread IDs: (%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, getGlobalIdx_2D_2D()); } __global__ void kernel_2D_3D() { printf("Local thread IDs: (%i,%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, threadIdx.z, getGlobalIdx_2D_3D()); } __global__ void kernel_3D_1D() { printf("Local thread ID: %i Global thread ID: %i\n", threadIdx.x, getGlobalIdx_3D_1D()); } __global__ void kernel_3D_2D() { printf("Local thread IDs: (%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, getGlobalIdx_3D_2D()); } __global__ void kernel_3D_3D() { printf("Local thread IDs: (%i,%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, threadIdx.z, getGlobalIdx_3D_3D()); } int main() { printf("\nLaunching kernel as 1D grid of 1D blocks...\n"); hipLaunchKernelGGL(( kernel_1D_1D), dim3(dim3(2,1,1)), dim3(dim3(2,1,1)), 0, 0, ); hipDeviceReset(); printf("\nLaunching kernel as 1D grid of 2D blocks...\n"); hipLaunchKernelGGL(( kernel_1D_2D), dim3(dim3(2,1,1)), dim3(dim3(2,2,1)), 0, 0, ); hipDeviceReset(); printf("\nLaunching kernel as 1D grid of 3D blocks...\n"); hipLaunchKernelGGL(( kernel_1D_3D), dim3(dim3(2,1,1)), dim3(dim3(2,2,2)), 0, 0, ); hipDeviceReset(); printf("\nLaunching kernel as 2D grid of 1D blocks...\n"); hipLaunchKernelGGL(( kernel_2D_1D), dim3(dim3(2,2,1)), dim3(dim3(2,1,1)), 0, 0, ); hipDeviceReset(); printf("\nLaunching kernel as 2D grid of 2D blocks...\n"); hipLaunchKernelGGL(( kernel_2D_2D), dim3(dim3(2,2,1)), dim3(dim3(2,2,1)), 0, 0, ); hipDeviceReset(); printf("\nLaunching kernel as 2D grid of 3D blocks...\n"); hipLaunchKernelGGL(( kernel_2D_3D), dim3(dim3(2,2,1)), dim3(dim3(2,2,2)), 0, 0, ); hipDeviceReset(); printf("\nLaunching kernel as 3D grid of 1D blocks...\n"); hipLaunchKernelGGL(( kernel_3D_1D), dim3(dim3(2,2,2)), dim3(dim3(2,1,1)), 0, 0, ); hipDeviceReset(); printf("\nLaunching kernel as 3D grid of 2D blocks...\n"); hipLaunchKernelGGL(( kernel_3D_2D), dim3(dim3(2,2,2)), dim3(dim3(2,2,1)), 0, 0, ); hipDeviceReset(); printf("\nLaunching kernel as 3D grid of 3D blocks...\n"); hipLaunchKernelGGL(( kernel_3D_3D), dim3(dim3(2,2,2)), dim3(dim3(2,2,2)), 0, 0, ); hipDeviceReset(); return 0; }
068ca8bd2388eee778e331b2e5a4befd5518c314.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> //device functions __device__ int getGlobalIdx_1D_1D() { return blockIdx.x *blockDim.x + threadIdx.x; } __device__ int getGlobalIdx_1D_2D() { return blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; } __device__ int getGlobalIdx_1D_3D() { return blockIdx.x * blockDim.x * blockDim.y * blockDim.z + threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; } __device__ int getGlobalIdx_2D_1D() { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int threadId = blockId * blockDim.x + threadIdx.x; return threadId; } __device__ int getGlobalIdx_2D_2D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } __device__ int getGlobalIdx_2D_3D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } __device__ int getGlobalIdx_3D_1D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * blockDim.x + threadIdx.x; return threadId; } __device__ int getGlobalIdx_3D_2D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } __device__ int getGlobalIdx_3D_3D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } //kernels __global__ void kernel_1D_1D() { printf("Local thread ID: %i Global thread ID: %i\n", threadIdx.x, getGlobalIdx_1D_1D()); } __global__ void kernel_1D_2D() { printf("Local thread IDs: (%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, getGlobalIdx_1D_2D()); } __global__ void kernel_1D_3D() { printf("Local thread IDs: (%i,%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, threadIdx.z, getGlobalIdx_1D_3D()); } __global__ void kernel_2D_1D() { printf("Local thread ID: %i Global thread ID: %i\n", threadIdx.x, getGlobalIdx_2D_1D()); } __global__ void kernel_2D_2D() { printf("Local thread IDs: (%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, getGlobalIdx_2D_2D()); } __global__ void kernel_2D_3D() { printf("Local thread IDs: (%i,%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, threadIdx.z, getGlobalIdx_2D_3D()); } __global__ void kernel_3D_1D() { printf("Local thread ID: %i Global thread ID: %i\n", threadIdx.x, getGlobalIdx_3D_1D()); } __global__ void kernel_3D_2D() { printf("Local thread IDs: (%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, getGlobalIdx_3D_2D()); } __global__ void kernel_3D_3D() { printf("Local thread IDs: (%i,%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, threadIdx.z, getGlobalIdx_3D_3D()); } int main() { printf("\nLaunching kernel as 1D grid of 1D blocks...\n"); kernel_1D_1D<<<dim3(2,1,1), dim3(2,1,1)>>>(); cudaDeviceReset(); printf("\nLaunching kernel as 1D grid of 2D blocks...\n"); kernel_1D_2D<<<dim3(2,1,1), dim3(2,2,1)>>>(); cudaDeviceReset(); printf("\nLaunching kernel as 1D grid of 3D blocks...\n"); kernel_1D_3D<<<dim3(2,1,1), dim3(2,2,2)>>>(); cudaDeviceReset(); printf("\nLaunching kernel as 2D grid of 1D blocks...\n"); kernel_2D_1D<<<dim3(2,2,1), dim3(2,1,1)>>>(); cudaDeviceReset(); printf("\nLaunching kernel as 2D grid of 2D blocks...\n"); kernel_2D_2D<<<dim3(2,2,1), dim3(2,2,1)>>>(); cudaDeviceReset(); printf("\nLaunching kernel as 2D grid of 3D blocks...\n"); kernel_2D_3D<<<dim3(2,2,1), dim3(2,2,2)>>>(); cudaDeviceReset(); printf("\nLaunching kernel as 3D grid of 1D blocks...\n"); kernel_3D_1D<<<dim3(2,2,2), dim3(2,1,1)>>>(); cudaDeviceReset(); printf("\nLaunching kernel as 3D grid of 2D blocks...\n"); kernel_3D_2D<<<dim3(2,2,2), dim3(2,2,1)>>>(); cudaDeviceReset(); printf("\nLaunching kernel as 3D grid of 3D blocks...\n"); kernel_3D_3D<<<dim3(2,2,2), dim3(2,2,2)>>>(); cudaDeviceReset(); return 0; }
ea59a37ecb6c65153698f71a0c44c8d49fbb51e4.hip
// !!! This is a file automatically generated by hipify!!! #include "utils.cuh" __host__ void readMatrixFromFile(char* input_filename, // First row of file int* rows, int* columns, int* num_of_non_zero_entries, // Return variables int** row_ptr_array, int** row_ptr_array_init, int** col_ind_array, double** values_array) { FILE *fptr; fptr = fopen(input_filename, "r"); if (fptr == NULL) { printf("Error reading file"); return; } else { int index = 0; int row = 0, column = 0, tmp = 0; double non_zero_val = 0.0; // Read first row from matrix file fscanf(fptr, "%d %d %d", &row, &column, &tmp); *rows = row; *columns = column; *num_of_non_zero_entries = tmp; int* elem_ptr_arr = (int*)malloc(sizeof(int) * (*rows + 1)); *row_ptr_array = (int*)malloc(sizeof(int) * (*rows + 1)); *row_ptr_array_init = (int*)malloc(sizeof(int) * *num_of_non_zero_entries); *col_ind_array = (int*)malloc(sizeof(int) * *num_of_non_zero_entries); *values_array = (double*)malloc(sizeof(double) * *num_of_non_zero_entries); // read lines into 3 variables line by line while (index < *num_of_non_zero_entries) { fscanf(fptr, "%d", &row); fscanf(fptr, "%d", &column); fscanf(fptr, "%lf", &non_zero_val); // -1 to make indices start from 0 elem_ptr_arr[row - 1]++; // (*row_ptr_array)[index] = row - 1; (*row_ptr_array_init)[index] = row - 1; (*col_ind_array)[index] = column - 1; (*values_array)[index] = non_zero_val; index++; } // Prefix sum for (int k = 1; k < *rows; k++) { elem_ptr_arr[k] += elem_ptr_arr[k - 1]; (*row_ptr_array)[k] = elem_ptr_arr[k]; } // for (int k = *rows; k > 0; k--) // (*row_ptr_array)[k] = *num_of_non_zero_entries - elem_ptr_arr[k - 1]; (*row_ptr_array)[0] = 0; free(elem_ptr_arr); } } __host__ void printMatrix(int rows, int columns, int num_of_non_zero_entries, int* row_ptr_array, int* col_ind_array, double * values_array) { printf("%d\t%d\t%d\n", rows, columns, num_of_non_zero_entries); // For each row for (int i = 0; i < num_of_non_zero_entries; i++) { printf("%d\t%d\t%lf\n", row_ptr_array[i] + 1, col_ind_array[i] + 1, values_array[i]); } } __host__ void printVector(int rows, double* x_array) { for (int i = 0; i < rows; i++) { printf("%lf\n", x_array[i]); } printf("\n"); } __host__ void CUDAErrorCheck(const char* msg) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %d: %s.\n", msg, (int)err, hipGetErrorName(err)); // exit(EXIT_FAILURE); } } __host__ void mmult_serial(// First row of file int rows, int columns, int num_of_non_zero_entries, int num_repetitions, // Return variables int* row_ptr_array, int* col_ind_array, double* values_array, double** x_array, double** x_array_old) { for ( int row = 0; row < rows; row++) { double tmp_product = 0; int row_start = row_ptr_array[row]; int row_end = row_ptr_array[row + 1]; // Iterate over the sparse row for (int j = row_start; j < row_end; j++) tmp_product += values_array[j] * (*x_array_old)[col_ind_array[j]]; (*x_array)[row] = tmp_product; } } /*__host__ void init_vector_to_1(// First row of file int rows, int columns, int num_of_non_zero_entries, int num_repetitions, // Return variables int* row_ptr_array, int* col_ind_array, double* values_array, double** x_array) { for ( int row = 0; row < rows; row++) { int row_start = row_ptr_array[row]; int row_end = row_ptr_array[row + 1]; // Iterate over the sparse row for (int j = row_start; j < row_end; j++) (*x_array)[col_ind_array[j]] = 1.0f; } }*/
ea59a37ecb6c65153698f71a0c44c8d49fbb51e4.cu
#include "utils.cuh" __host__ void readMatrixFromFile(char* input_filename, // First row of file int* rows, int* columns, int* num_of_non_zero_entries, // Return variables int** row_ptr_array, int** row_ptr_array_init, int** col_ind_array, double** values_array) { FILE *fptr; fptr = fopen(input_filename, "r"); if (fptr == NULL) { printf("Error reading file"); return; } else { int index = 0; int row = 0, column = 0, tmp = 0; double non_zero_val = 0.0; // Read first row from matrix file fscanf(fptr, "%d %d %d", &row, &column, &tmp); *rows = row; *columns = column; *num_of_non_zero_entries = tmp; int* elem_ptr_arr = (int*)malloc(sizeof(int) * (*rows + 1)); *row_ptr_array = (int*)malloc(sizeof(int) * (*rows + 1)); *row_ptr_array_init = (int*)malloc(sizeof(int) * *num_of_non_zero_entries); *col_ind_array = (int*)malloc(sizeof(int) * *num_of_non_zero_entries); *values_array = (double*)malloc(sizeof(double) * *num_of_non_zero_entries); // read lines into 3 variables line by line while (index < *num_of_non_zero_entries) { fscanf(fptr, "%d", &row); fscanf(fptr, "%d", &column); fscanf(fptr, "%lf", &non_zero_val); // -1 to make indices start from 0 elem_ptr_arr[row - 1]++; // (*row_ptr_array)[index] = row - 1; (*row_ptr_array_init)[index] = row - 1; (*col_ind_array)[index] = column - 1; (*values_array)[index] = non_zero_val; index++; } // Prefix sum for (int k = 1; k < *rows; k++) { elem_ptr_arr[k] += elem_ptr_arr[k - 1]; (*row_ptr_array)[k] = elem_ptr_arr[k]; } // for (int k = *rows; k > 0; k--) // (*row_ptr_array)[k] = *num_of_non_zero_entries - elem_ptr_arr[k - 1]; (*row_ptr_array)[0] = 0; free(elem_ptr_arr); } } __host__ void printMatrix(int rows, int columns, int num_of_non_zero_entries, int* row_ptr_array, int* col_ind_array, double * values_array) { printf("%d\t%d\t%d\n", rows, columns, num_of_non_zero_entries); // For each row for (int i = 0; i < num_of_non_zero_entries; i++) { printf("%d\t%d\t%lf\n", row_ptr_array[i] + 1, col_ind_array[i] + 1, values_array[i]); } } __host__ void printVector(int rows, double* x_array) { for (int i = 0; i < rows; i++) { printf("%lf\n", x_array[i]); } printf("\n"); } __host__ void CUDAErrorCheck(const char* msg) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %d: %s.\n", msg, (int)err, cudaGetErrorName(err)); // exit(EXIT_FAILURE); } } __host__ void mmult_serial(// First row of file int rows, int columns, int num_of_non_zero_entries, int num_repetitions, // Return variables int* row_ptr_array, int* col_ind_array, double* values_array, double** x_array, double** x_array_old) { for ( int row = 0; row < rows; row++) { double tmp_product = 0; int row_start = row_ptr_array[row]; int row_end = row_ptr_array[row + 1]; // Iterate over the sparse row for (int j = row_start; j < row_end; j++) tmp_product += values_array[j] * (*x_array_old)[col_ind_array[j]]; (*x_array)[row] = tmp_product; } } /*__host__ void init_vector_to_1(// First row of file int rows, int columns, int num_of_non_zero_entries, int num_repetitions, // Return variables int* row_ptr_array, int* col_ind_array, double* values_array, double** x_array) { for ( int row = 0; row < rows; row++) { int row_start = row_ptr_array[row]; int row_end = row_ptr_array[row + 1]; // Iterate over the sparse row for (int j = row_start; j < row_end; j++) (*x_array)[col_ind_array[j]] = 1.0f; } }*/
0e8d5c8a06d77806e4a21bd816504fa581a6e750.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/hip/HIPContext.h> #include "open3d/ml/PyTorch/TorchHelper.h" #include "open3d/ml/impl/misc/ReduceSubarraysSum.cuh" #include "torch/script.h" template <class T> torch::Tensor ReduceSubarraysSumCUDA(torch::Tensor values, torch::Tensor row_splits) { auto device = values.device().type(); auto device_idx = values.device().index(); torch::Tensor sums = torch::empty( {row_splits.size(0) - 1}, torch::dtype(ToTorchDtype<T>()).device(device, device_idx)); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto cuda_device_props = at::cuda::getCurrentDeviceProperties(); const int texture_alignment = cuda_device_props->textureAlignment; open3d::ml::impl::ReduceSubarraysSumCUDA( stream, values.data_ptr<T>(), values.size(0), row_splits.data_ptr<int64_t>(), row_splits.size(0) - 1, sums.data_ptr<T>()); return sums; } #define INSTANTIATE(T) \ template torch::Tensor ReduceSubarraysSumCUDA<T>(torch::Tensor, \ torch::Tensor); INSTANTIATE(int32_t) INSTANTIATE(int64_t) INSTANTIATE(float) INSTANTIATE(double)
0e8d5c8a06d77806e4a21bd816504fa581a6e750.cu
#include <ATen/cuda/CUDAContext.h> #include "open3d/ml/PyTorch/TorchHelper.h" #include "open3d/ml/impl/misc/ReduceSubarraysSum.cuh" #include "torch/script.h" template <class T> torch::Tensor ReduceSubarraysSumCUDA(torch::Tensor values, torch::Tensor row_splits) { auto device = values.device().type(); auto device_idx = values.device().index(); torch::Tensor sums = torch::empty( {row_splits.size(0) - 1}, torch::dtype(ToTorchDtype<T>()).device(device, device_idx)); auto stream = at::cuda::getCurrentCUDAStream(); auto cuda_device_props = at::cuda::getCurrentDeviceProperties(); const int texture_alignment = cuda_device_props->textureAlignment; open3d::ml::impl::ReduceSubarraysSumCUDA( stream, values.data_ptr<T>(), values.size(0), row_splits.data_ptr<int64_t>(), row_splits.size(0) - 1, sums.data_ptr<T>()); return sums; } #define INSTANTIATE(T) \ template torch::Tensor ReduceSubarraysSumCUDA<T>(torch::Tensor, \ torch::Tensor); INSTANTIATE(int32_t) INSTANTIATE(int64_t) INSTANTIATE(float) INSTANTIATE(double)
2e8300c9dfcca8302bc74038e4f2685bec66e224.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <sys/time.h> #define P (1 << 4) __global__ void addmat_x(int m, int n, int* A, int *B) { int idx, ix; int iy = threadIdx.y + blockIdx.y*blockDim.y; if (iy < n) if (threadIdx.y % 2 == 0) { for(ix = 0; ix < P; ix++) { idx = iy*m + ix; B[idx] = A[idx]; } } else { for(ix = 0; ix < P; ix++) { idx = iy*m + ix; B[idx] = A[idx]; } } } __global__ void addmat_x_div(int m, int n, int* A, int *B) { int idx, ix; int iy = threadIdx.y + blockIdx.y*blockDim.y; if (iy < n) for(ix = 0; ix < P; ix++) { idx = iy*m + ix; B[idx] = A[idx]; } } double cpuSecond() { struct timeval tp; gettimeofday(&tp,NULL); return (double) tp.tv_sec + (double)tp.tv_usec*1e-6; } int main(int argc, char** argv) { int *A, *B; int *dev_A, *dev_B; size_t m, n, nbytes; double etime, start; m = 1 << 14; n = 1 << 14; nbytes = m*n*sizeof(int); printf("P = %d\n",P); A = (int*) malloc(nbytes); B = (int*) malloc(nbytes); memset(A,0,nbytes); hipMalloc((void**) &dev_A, nbytes); hipMalloc((void**) &dev_B, nbytes); hipMemcpy(dev_A, A, nbytes, hipMemcpyHostToDevice); #if 1 /* One thread per row */ dim3 block(1,32); dim3 grid(1,(n+block.y-1)/block.y); start = cpuSecond(); hipLaunchKernelGGL(( addmat_x), dim3(grid),dim3(block), 0, 0, m,n,dev_A, dev_B); #else /* One thread per column */ dim3 block(32,1); dim3 grid((m+block.x-1)/block.x,1); start = cpuSecond(); hipLaunchKernelGGL(( addmat_x_div), dim3(grid),dim3(block), 0, 0, m,n,dev_A, dev_B); #endif hipDeviceSynchronize(); etime = cpuSecond() - start; printf("GPU Kernel %10.3g (s)\n",etime); hipFree(dev_A); hipFree(dev_B); free(A); free(B); hipDeviceReset(); }
2e8300c9dfcca8302bc74038e4f2685bec66e224.cu
#include <stdio.h> #include <sys/time.h> #define P (1 << 4) __global__ void addmat_x(int m, int n, int* A, int *B) { int idx, ix; int iy = threadIdx.y + blockIdx.y*blockDim.y; if (iy < n) if (threadIdx.y % 2 == 0) { for(ix = 0; ix < P; ix++) { idx = iy*m + ix; B[idx] = A[idx]; } } else { for(ix = 0; ix < P; ix++) { idx = iy*m + ix; B[idx] = A[idx]; } } } __global__ void addmat_x_div(int m, int n, int* A, int *B) { int idx, ix; int iy = threadIdx.y + blockIdx.y*blockDim.y; if (iy < n) for(ix = 0; ix < P; ix++) { idx = iy*m + ix; B[idx] = A[idx]; } } double cpuSecond() { struct timeval tp; gettimeofday(&tp,NULL); return (double) tp.tv_sec + (double)tp.tv_usec*1e-6; } int main(int argc, char** argv) { int *A, *B; int *dev_A, *dev_B; size_t m, n, nbytes; double etime, start; m = 1 << 14; n = 1 << 14; nbytes = m*n*sizeof(int); printf("P = %d\n",P); A = (int*) malloc(nbytes); B = (int*) malloc(nbytes); memset(A,0,nbytes); cudaMalloc((void**) &dev_A, nbytes); cudaMalloc((void**) &dev_B, nbytes); cudaMemcpy(dev_A, A, nbytes, cudaMemcpyHostToDevice); #if 1 /* One thread per row */ dim3 block(1,32); dim3 grid(1,(n+block.y-1)/block.y); start = cpuSecond(); addmat_x<<<grid,block>>>(m,n,dev_A, dev_B); #else /* One thread per column */ dim3 block(32,1); dim3 grid((m+block.x-1)/block.x,1); start = cpuSecond(); addmat_x_div<<<grid,block>>>(m,n,dev_A, dev_B); #endif cudaDeviceSynchronize(); etime = cpuSecond() - start; printf("GPU Kernel %10.3g (s)\n",etime); cudaFree(dev_A); cudaFree(dev_B); free(A); free(B); cudaDeviceReset(); }
819ab5d178953dbde06718782991792fcdc4b2fa.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This is a simple test program to measure the memcopy bandwidth of the GPU. * It can measure device to device copy bandwidth, host to device copy bandwidth * for pageable and pinned memory, and device to host copy bandwidth for pageable * and pinned memory. * * Usage: * ./bandwidthTest [option]... */ // CUDA runtime #include <hip/hip_runtime.h> // includes #include <helper_functions.h> // helper for shared functions common to CUDA Samples #include <helper_cuda.h> // helper functions for CUDA error checking and initialization #include <hip/hip_runtime.h> #include <memory> #include <iostream> #include <cassert> static const char *sSDKsample = "CUDA Bandwidth Test"; // defines, project #define MEMCOPY_ITERATIONS 10000 #define DEFAULT_SIZE ( 32 * ( 1 << 20 ) ) //32 M #define DEFAULT_INCREMENT (1 << 22) //4 M #define CACHE_CLEAR_SIZE (1 << 24) //16 M //shmoo mode defines #define SHMOO_MEMSIZE_MAX (1 << 26) //64 M #define SHMOO_MEMSIZE_START (1 << 10) //1 KB #define SHMOO_INCREMENT_1KB (1 << 10) //1 KB #define SHMOO_INCREMENT_2KB (1 << 11) //2 KB #define SHMOO_INCREMENT_10KB (10 * (1 << 10)) //10KB #define SHMOO_INCREMENT_100KB (100 * (1 << 10)) //100 KB #define SHMOO_INCREMENT_1MB (1 << 20) //1 MB #define SHMOO_INCREMENT_2MB (1 << 21) //2 MB #define SHMOO_INCREMENT_4MB (1 << 22) //4 MB #define SHMOO_LIMIT_20KB (20 * (1 << 10)) //20 KB #define SHMOO_LIMIT_50KB (50 * (1 << 10)) //50 KB #define SHMOO_LIMIT_100KB (100 * (1 << 10)) //100 KB #define SHMOO_LIMIT_1MB (1 << 20) //1 MB #define SHMOO_LIMIT_16MB (1 << 24) //16 MB #define SHMOO_LIMIT_32MB (1 << 25) //32 MB //enums, project enum testMode { QUICK_MODE, RANGE_MODE, SHMOO_MODE }; enum memcpyKind { DEVICE_TO_HOST, HOST_TO_DEVICE, DEVICE_TO_DEVICE }; enum printMode { USER_READABLE, CSV }; enum memoryMode { PINNED, PAGEABLE }; const char *sMemoryCopyKind[] = { "Device to Host", "Host to Device", "Device to Device", NULL }; const char *sMemoryMode[] = { "PINNED", "PAGEABLE", NULL }; // if true, use CPU based timing for everything static bool bDontUseGPUTiming; int *pArgc = NULL; char **pArgv = NULL; //////////////////////////////////////////////////////////////////////////////// // declaration, forward int runTest(const int argc, const char **argv); void testBandwidth(unsigned int start, unsigned int end, unsigned int increment, testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); void testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); void testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc); float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc); float testDeviceToDeviceTransfer(unsigned int memSize); void printResultsReadable(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc); void printResultsCSV(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc); void printHelp(void); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { pArgc = &argc; pArgv = argv; // set logfile name and start logs printf("[%s] - Starting...\n", sSDKsample); int iRetVal = runTest(argc, (const char **)argv); if (iRetVal < 0) { checkCudaErrors(hipSetDevice(0)); } // finish printf("%s\n", (iRetVal==0) ? "Result = PASS" : "Result = FAIL"); printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n"); exit((iRetVal==0) ? EXIT_SUCCESS : EXIT_FAILURE); } /////////////////////////////////////////////////////////////////////////////// //Parse args, run the appropriate tests /////////////////////////////////////////////////////////////////////////////// int runTest(const int argc, const char **argv) { int start = DEFAULT_SIZE; int end = DEFAULT_SIZE; int startDevice = 0; int endDevice = 0; int increment = DEFAULT_INCREMENT; testMode mode = QUICK_MODE; bool htod = false; bool dtoh = false; bool dtod = false; bool wc = false; char *modeStr; char *device = NULL; printMode printmode = USER_READABLE; char *memModeStr = NULL; memoryMode memMode = PINNED; //process command line args if (checkCmdLineFlag(argc, argv, "help")) { printHelp(); return 0; } if (checkCmdLineFlag(argc, argv, "csv")) { printmode = CSV; } if (getCmdLineArgumentString(argc, argv, "memory", &memModeStr)) { if (strcmp(memModeStr, "pageable") == 0) { memMode = PAGEABLE; } else if (strcmp(memModeStr, "pinned") == 0) { memMode = PINNED; } else { printf("Invalid memory mode - valid modes are pageable or pinned\n"); printf("See --help for more information\n"); return -1000; } } else { //default - pinned memory memMode = PINNED; } if (getCmdLineArgumentString(argc, argv, "device", &device)) { int deviceCount; hipError_t error_id = hipGetDeviceCount(&deviceCount); if (error_id != hipSuccess) { printf("hipGetDeviceCount returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id)); exit(EXIT_FAILURE); } if (deviceCount == 0) { printf("!!!!!No devices found!!!!!\n"); return -2000; } if (strcmp(device, "all") == 0) { printf("\n!!!!!Cumulative Bandwidth to be computed from all the devices !!!!!!\n\n"); startDevice = 0; endDevice = deviceCount-1; } else { startDevice = endDevice = atoi(device); if (startDevice >= deviceCount || startDevice < 0) { printf("\n!!!!!Invalid GPU number %d given hence default gpu %d will be used !!!!!\n", startDevice,0); startDevice = endDevice = 0; } } } printf("Running on...\n\n"); for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++) { hipDeviceProp_t deviceProp; hipError_t error_id = hipGetDeviceProperties(&deviceProp, currentDevice); if (error_id == hipSuccess) { printf(" Device %d: %s\n", currentDevice, deviceProp.name); if (deviceProp.computeMode == hipComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n"); checkCudaErrors(hipSetDevice(currentDevice)); exit(EXIT_FAILURE); } } else { printf("hipGetDeviceProperties returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id)); checkCudaErrors(hipSetDevice(currentDevice)); exit(EXIT_FAILURE); } } if (getCmdLineArgumentString(argc, argv, "mode", &modeStr)) { //figure out the mode if (strcmp(modeStr, "quick") == 0) { printf(" Quick Mode\n\n"); mode = QUICK_MODE; } else if (strcmp(modeStr, "shmoo") == 0) { printf(" Shmoo Mode\n\n"); mode = SHMOO_MODE; } else if (strcmp(modeStr, "range") == 0) { printf(" Range Mode\n\n"); mode = RANGE_MODE; } else { printf("Invalid mode - valid modes are quick, range, or shmoo\n"); printf("See --help for more information\n"); return -3000; } } else { //default mode - quick printf(" Quick Mode\n\n"); mode = QUICK_MODE; } if (checkCmdLineFlag(argc, argv, "htod")) { htod = true; } if (checkCmdLineFlag(argc, argv, "dtoh")) { dtoh = true; } if (checkCmdLineFlag(argc, argv, "dtod")) { dtod = true; } #if CUDART_VERSION >= 2020 if (checkCmdLineFlag(argc, argv, "wc")) { wc = true; } #endif if (checkCmdLineFlag(argc, argv, "cputiming")) { bDontUseGPUTiming = true; } if (!htod && !dtoh && !dtod) { //default: All htod = true; dtoh = true; dtod = true; } if (RANGE_MODE == mode) { if (checkCmdLineFlag(argc, (const char **)argv, "start")) { start = getCmdLineArgumentInt(argc, argv, "start"); if (start <= 0) { printf("Illegal argument - start must be greater than zero\n"); return -4000; } } else { printf("Must specify a starting size in range mode\n"); printf("See --help for more information\n"); return -5000; } if (checkCmdLineFlag(argc, (const char **)argv, "end")) { end = getCmdLineArgumentInt(argc, argv, "end"); if (end <= 0) { printf("Illegal argument - end must be greater than zero\n"); return -6000; } if (start > end) { printf("Illegal argument - start is greater than end\n"); return -7000; } } else { printf("Must specify an end size in range mode.\n"); printf("See --help for more information\n"); return -8000; } if (checkCmdLineFlag(argc, argv, "increment")) { increment = getCmdLineArgumentInt(argc, argv, "increment"); if (increment <= 0) { printf("Illegal argument - increment must be greater than zero\n"); return -9000; } } else { printf("Must specify an increment in user mode\n"); printf("See --help for more information\n"); return -10000; } } if (htod) { testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment, mode, HOST_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc); } if (dtoh) { testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment, mode, DEVICE_TO_HOST, printmode, memMode, startDevice, endDevice, wc); } if (dtod) { testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment, mode, DEVICE_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc); } // Ensure that we reset all CUDA Devices in question for (int nDevice = startDevice; nDevice <= endDevice; nDevice++) { hipSetDevice(nDevice); } return 0; } /////////////////////////////////////////////////////////////////////////////// // Run a bandwidth test /////////////////////////////////////////////////////////////////////////////// void testBandwidth(unsigned int start, unsigned int end, unsigned int increment, testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { switch (mode) { case QUICK_MODE: testBandwidthQuick(DEFAULT_SIZE, kind, printmode, memMode, startDevice, endDevice, wc); break; case RANGE_MODE: testBandwidthRange(start, end, increment, kind, printmode, memMode, startDevice, endDevice, wc); break; case SHMOO_MODE: testBandwidthShmoo(kind, printmode, memMode, startDevice, endDevice, wc); break; default: break; } } ////////////////////////////////////////////////////////////////////// // Run a quick mode bandwidth test ////////////////////////////////////////////////////////////////////// void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { testBandwidthRange(size, size, DEFAULT_INCREMENT, kind, printmode, memMode, startDevice, endDevice, wc); } /////////////////////////////////////////////////////////////////////// // Run a range mode bandwidth test ////////////////////////////////////////////////////////////////////// void testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { //count the number of copies we're going to run unsigned int count = 1 + ((end - start) / increment); unsigned int *memSizes = (unsigned int *)malloc(count * sizeof(unsigned int)); double *bandwidths = (double *) malloc(count * sizeof(double)); // Before calculating the cumulative bandwidth, initialize bandwidths array to NULL for (unsigned int i = 0; i < count; i++) { bandwidths[i] = 0.0; } // Use the device asked by the user for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++) { hipSetDevice(currentDevice); //run each of the copies for (unsigned int i = 0; i < count; i++) { memSizes[i] = start + i * increment; switch (kind) { case DEVICE_TO_HOST: bandwidths[i] += testDeviceToHostTransfer(memSizes[i], memMode, wc); break; case HOST_TO_DEVICE: bandwidths[i] += testHostToDeviceTransfer(memSizes[i], memMode, wc); break; case DEVICE_TO_DEVICE: bandwidths[i] += testDeviceToDeviceTransfer(memSizes[i]); break; } } } // Complete the bandwidth computation on all the devices //print results if (printmode == CSV) { printResultsCSV(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } else { printResultsReadable(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } //clean up free(memSizes); free(bandwidths); } ////////////////////////////////////////////////////////////////////////////// // Intense shmoo mode - covers a large range of values with varying increments ////////////////////////////////////////////////////////////////////////////// void testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { //count the number of copies to make unsigned int count = 1 + (SHMOO_LIMIT_20KB / SHMOO_INCREMENT_1KB) + ((SHMOO_LIMIT_50KB - SHMOO_LIMIT_20KB) / SHMOO_INCREMENT_2KB) + ((SHMOO_LIMIT_100KB - SHMOO_LIMIT_50KB) / SHMOO_INCREMENT_10KB) + ((SHMOO_LIMIT_1MB - SHMOO_LIMIT_100KB) / SHMOO_INCREMENT_100KB) + ((SHMOO_LIMIT_16MB - SHMOO_LIMIT_1MB) / SHMOO_INCREMENT_1MB) + ((SHMOO_LIMIT_32MB - SHMOO_LIMIT_16MB) / SHMOO_INCREMENT_2MB) + ((SHMOO_MEMSIZE_MAX - SHMOO_LIMIT_32MB) / SHMOO_INCREMENT_4MB); unsigned int *memSizes = (unsigned int *)malloc(count * sizeof(unsigned int)); double *bandwidths = (double *) malloc(count * sizeof(double)); // Before calculating the cumulative bandwidth, initialize bandwidths array to NULL for (unsigned int i = 0; i < count; i++) { bandwidths[i] = 0.0; } // Use the device asked by the user for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++) { hipSetDevice(currentDevice); //Run the shmoo int iteration = 0; unsigned int memSize = 0; while (memSize <= SHMOO_MEMSIZE_MAX) { if (memSize < SHMOO_LIMIT_20KB) { memSize += SHMOO_INCREMENT_1KB; } else if (memSize < SHMOO_LIMIT_50KB) { memSize += SHMOO_INCREMENT_2KB; } else if (memSize < SHMOO_LIMIT_100KB) { memSize += SHMOO_INCREMENT_10KB; } else if (memSize < SHMOO_LIMIT_1MB) { memSize += SHMOO_INCREMENT_100KB; } else if (memSize < SHMOO_LIMIT_16MB) { memSize += SHMOO_INCREMENT_1MB; } else if (memSize < SHMOO_LIMIT_32MB) { memSize += SHMOO_INCREMENT_2MB; } else { memSize += SHMOO_INCREMENT_4MB; } memSizes[iteration] = memSize; switch (kind) { case DEVICE_TO_HOST: bandwidths[iteration] += testDeviceToHostTransfer(memSizes[iteration], memMode, wc); break; case HOST_TO_DEVICE: bandwidths[iteration] += testHostToDeviceTransfer(memSizes[iteration], memMode, wc); break; case DEVICE_TO_DEVICE: bandwidths[iteration] += testDeviceToDeviceTransfer(memSizes[iteration]); break; } iteration++; printf("."); } } // Complete the bandwidth computation on all the devices //print results printf("\n"); if (CSV == printmode) { printResultsCSV(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } else { printResultsReadable(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } //clean up free(memSizes); free(bandwidths); } /////////////////////////////////////////////////////////////////////////////// // test the bandwidth of a device to host memcopy of a specific size /////////////////////////////////////////////////////////////////////////////// float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc) { StopWatchInterface *timer = NULL; float elapsedTimeInMs = 0.0f; float bandwidthInMBs = 0.0f; unsigned char *h_idata = NULL; unsigned char *h_odata = NULL; hipEvent_t start, stop; sdkCreateTimer(&timer); checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); //allocate host memory if (PINNED == memMode) { //pinned memory mode - use special function to get OS-pinned memory #if CUDART_VERSION >= 2020 checkCudaErrors(hipHostMalloc((void **)&h_idata, memSize, (wc) ? hipHostMallocWriteCombined : 0)); checkCudaErrors(hipHostMalloc((void **)&h_odata, memSize, (wc) ? hipHostMallocWriteCombined : 0)); #else checkCudaErrors(hipHostMalloc((void **)&h_idata, memSize)); checkCudaErrors(hipHostMalloc((void **)&h_odata, memSize)); #endif } else { //pageable memory mode - use malloc h_idata = (unsigned char *)malloc(memSize); h_odata = (unsigned char *)malloc(memSize); if (h_idata == 0 || h_odata == 0) { fprintf(stderr, "Not enough memory avaialable on host to run test!\n"); exit(EXIT_FAILURE); } } //initialize the memory for (unsigned int i = 0; i < memSize/sizeof(unsigned char); i++) { h_idata[i] = (unsigned char)(i & 0xff); } // allocate device memory unsigned char *d_idata; checkCudaErrors(hipMalloc((void **) &d_idata, memSize)); //initialize the device memory checkCudaErrors(hipMemcpy(d_idata, h_idata, memSize, hipMemcpyHostToDevice)); //copy data from GPU to Host sdkStartTimer(&timer); checkCudaErrors(hipEventRecord(start, 0)); if (PINNED == memMode) { for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors(hipMemcpyAsync(h_odata, d_idata, memSize, hipMemcpyDeviceToHost, 0)); } } else { for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors(hipMemcpy(h_odata, d_idata, memSize, hipMemcpyDeviceToHost)); } } checkCudaErrors(hipEventRecord(stop, 0)); // make sure GPU has finished copying checkCudaErrors(hipDeviceSynchronize()); //get the total elapsed time in ms sdkStopTimer(&timer); checkCudaErrors(hipEventElapsedTime(&elapsedTimeInMs, start, stop)); if (PINNED != memMode || bDontUseGPUTiming) { elapsedTimeInMs = sdkGetTimerValue(&timer); } //calculate bandwidth in MB/s bandwidthInMBs = ((float)(1<<10) * memSize * (float)MEMCOPY_ITERATIONS) / (elapsedTimeInMs * (float)(1 << 20)); //clean up memory checkCudaErrors(hipEventDestroy(stop)); checkCudaErrors(hipEventDestroy(start)); sdkDeleteTimer(&timer); if (PINNED == memMode) { checkCudaErrors(hipHostFree(h_idata)); checkCudaErrors(hipHostFree(h_odata)); } else { free(h_idata); free(h_odata); } checkCudaErrors(hipFree(d_idata)); return bandwidthInMBs; } /////////////////////////////////////////////////////////////////////////////// //! test the bandwidth of a host to device memcopy of a specific size /////////////////////////////////////////////////////////////////////////////// float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc) { StopWatchInterface *timer = NULL; float elapsedTimeInMs = 0.0f; float bandwidthInMBs = 0.0f; hipEvent_t start, stop; sdkCreateTimer(&timer); checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); //allocate host memory unsigned char *h_odata = NULL; if (PINNED == memMode) { #if CUDART_VERSION >= 2020 //pinned memory mode - use special function to get OS-pinned memory checkCudaErrors(hipHostMalloc((void **)&h_odata, memSize, (wc) ? hipHostMallocWriteCombined : 0)); #else //pinned memory mode - use special function to get OS-pinned memory checkCudaErrors(hipHostMalloc((void **)&h_odata, memSize)); #endif } else { //pageable memory mode - use malloc h_odata = (unsigned char *)malloc(memSize); if (h_odata == 0) { fprintf(stderr, "Not enough memory available on host to run test!\n"); exit(EXIT_FAILURE); } } unsigned char *h_cacheClear1 = (unsigned char *)malloc(CACHE_CLEAR_SIZE); unsigned char *h_cacheClear2 = (unsigned char *)malloc(CACHE_CLEAR_SIZE); if (h_cacheClear1 == 0 || h_cacheClear1 == 0) { fprintf(stderr, "Not enough memory available on host to run test!\n"); exit(EXIT_FAILURE); } //initialize the memory for (unsigned int i = 0; i < memSize/sizeof(unsigned char); i++) { h_odata[i] = (unsigned char)(i & 0xff); } for (unsigned int i = 0; i < CACHE_CLEAR_SIZE / sizeof(unsigned char); i++) { h_cacheClear1[i] = (unsigned char)(i & 0xff); h_cacheClear2[i] = (unsigned char)(0xff - (i & 0xff)); } //allocate device memory unsigned char *d_idata; checkCudaErrors(hipMalloc((void **) &d_idata, memSize)); sdkStartTimer(&timer); checkCudaErrors(hipEventRecord(start, 0)); //copy host memory to device memory if (PINNED == memMode) { for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors(hipMemcpyAsync(d_idata, h_odata, memSize, hipMemcpyHostToDevice, 0)); } } else { for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors(hipMemcpy(d_idata, h_odata, memSize, hipMemcpyHostToDevice)); } } checkCudaErrors(hipEventRecord(stop, 0)); checkCudaErrors(hipDeviceSynchronize()); //total elapsed time in ms sdkStopTimer(&timer); checkCudaErrors(hipEventElapsedTime(&elapsedTimeInMs, start, stop)); if (PINNED != memMode || bDontUseGPUTiming) { elapsedTimeInMs = sdkGetTimerValue(&timer); } sdkResetTimer(&timer); //calculate bandwidth in MB/s bandwidthInMBs = ((float)(1<<10) * memSize * (float)MEMCOPY_ITERATIONS) / (elapsedTimeInMs * (float)(1 << 20)); //clean up memory checkCudaErrors(hipEventDestroy(stop)); checkCudaErrors(hipEventDestroy(start)); sdkDeleteTimer(&timer); if (PINNED == memMode) { checkCudaErrors(hipHostFree(h_odata)); } else { free(h_odata); } free(h_cacheClear1); free(h_cacheClear2); checkCudaErrors(hipFree(d_idata)); return bandwidthInMBs; } /////////////////////////////////////////////////////////////////////////////// //! test the bandwidth of a device to device memcopy of a specific size /////////////////////////////////////////////////////////////////////////////// float testDeviceToDeviceTransfer(unsigned int memSize) { StopWatchInterface *timer = NULL; float elapsedTimeInMs = 0.0f; float bandwidthInMBs = 0.0f; hipEvent_t start, stop; sdkCreateTimer(&timer); checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); //allocate host memory unsigned char *h_idata = (unsigned char *)malloc(memSize); if (h_idata == 0) { fprintf(stderr, "Not enough memory avaialable on host to run test!\n"); exit(EXIT_FAILURE); } //initialize the host memory for (unsigned int i = 0; i < memSize/sizeof(unsigned char); i++) { h_idata[i] = (unsigned char)(i & 0xff); } //allocate device memory unsigned char *d_idata; checkCudaErrors(hipMalloc((void **) &d_idata, memSize)); unsigned char *d_odata; checkCudaErrors(hipMalloc((void **) &d_odata, memSize)); //initialize memory checkCudaErrors(hipMemcpy(d_idata, h_idata, memSize, hipMemcpyHostToDevice)); //run the memcopy sdkStartTimer(&timer); checkCudaErrors(hipEventRecord(start, 0)); for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors(hipMemcpy(d_odata, d_idata, memSize, hipMemcpyDeviceToDevice)); } checkCudaErrors(hipEventRecord(stop, 0)); //Since device to device memory copies are non-blocking, //hipDeviceSynchronize() is required in order to get //proper timing. checkCudaErrors(hipDeviceSynchronize()); //get the total elapsed time in ms sdkStopTimer(&timer); checkCudaErrors(hipEventElapsedTime(&elapsedTimeInMs, start, stop)); if (bDontUseGPUTiming) { elapsedTimeInMs = sdkGetTimerValue(&timer); } //calculate bandwidth in MB/s bandwidthInMBs = 2.0f * ((float)(1<<10) * memSize * (float)MEMCOPY_ITERATIONS) / (elapsedTimeInMs * (float)(1 << 20)); //clean up memory sdkDeleteTimer(&timer); free(h_idata); checkCudaErrors(hipEventDestroy(stop)); checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipFree(d_idata)); checkCudaErrors(hipFree(d_odata)); return bandwidthInMBs; } ///////////////////////////////////////////////////////// //print results in an easily read format //////////////////////////////////////////////////////// void printResultsReadable(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc) { printf(" %s Bandwidth, %i Device(s)\n", sMemoryCopyKind[kind], iNumDevs); printf(" %s Memory Transfers\n", sMemoryMode[memMode]); if (wc) { printf(" Write-Combined Memory Writes are Enabled"); } printf(" Transfer Size (Bytes)\tBandwidth(MB/s)\n"); unsigned int i; for (i = 0; i < (count - 1); i++) { printf(" %u\t\t\t%s%.1f\n", memSizes[i], (memSizes[i] < 10000)? "\t" : "", bandwidths[i]); } printf(" %u\t\t\t%s%.1f\n\n", memSizes[i], (memSizes[i] < 10000)? "\t" : "", bandwidths[i]); } /////////////////////////////////////////////////////////////////////////// //print results in a database format /////////////////////////////////////////////////////////////////////////// void printResultsCSV(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc) { std::string sConfig; // log config information if (kind == DEVICE_TO_DEVICE) { sConfig += "D2D"; } else { if (kind == DEVICE_TO_HOST) { sConfig += "D2H"; } else if (kind == HOST_TO_DEVICE) { sConfig += "H2D"; } if (memMode == PAGEABLE) { sConfig += "-Paged"; } else if (memMode == PINNED) { sConfig += "-Pinned"; if (wc) { sConfig += "-WriteCombined"; } } } unsigned int i; double dSeconds = 0.0; for (i = 0; i < count; i++) { dSeconds = (double)memSizes[i] / (bandwidths[i] * (double)(1<<20)); printf("bandwidthTest-%s, Bandwidth = %.1f MB/s, Time = %.5f s, Size = %u bytes, NumDevsUsed = %d\n", sConfig.c_str(), bandwidths[i], dSeconds, memSizes[i], iNumDevs); } } /////////////////////////////////////////////////////////////////////////// //Print help screen /////////////////////////////////////////////////////////////////////////// void printHelp(void) { printf("Usage: bandwidthTest [OPTION]...\n"); printf("Test the bandwidth for device to host, host to device, and device to device transfers\n"); printf("\n"); printf("Example: measure the bandwidth of device to host pinned memory copies in the range 1024 Bytes to 102400 Bytes in 1024 Byte increments\n"); printf("./bandwidthTest --memory=pinned --mode=range --start=1024 --end=102400 --increment=1024 --dtoh\n"); printf("\n"); printf("Options:\n"); printf("--help\tDisplay this help menu\n"); printf("--csv\tPrint results as a CSV\n"); printf("--device=[deviceno]\tSpecify the device device to be used\n"); printf(" all - compute cumulative bandwidth on all the devices\n"); printf(" 0,1,2,...,n - Specify any particular device to be used\n"); printf("--memory=[MEMMODE]\tSpecify which memory mode to use\n"); printf(" pageable - pageable memory\n"); printf(" pinned - non-pageable system memory\n"); printf("--mode=[MODE]\tSpecify the mode to use\n"); printf(" quick - performs a quick measurement\n"); printf(" range - measures a user-specified range of values\n"); printf(" shmoo - performs an intense shmoo of a large range of values\n"); printf("--htod\tMeasure host to device transfers\n"); printf("--dtoh\tMeasure device to host transfers\n"); printf("--dtod\tMeasure device to device transfers\n"); #if CUDART_VERSION >= 2020 printf("--wc\tAllocate pinned memory as write-combined\n"); #endif printf("--cputiming\tForce CPU-based timing always\n"); printf("Range mode options\n"); printf("--start=[SIZE]\tStarting transfer size in bytes\n"); printf("--end=[SIZE]\tEnding transfer size in bytes\n"); printf("--increment=[SIZE]\tIncrement size in bytes\n"); }
819ab5d178953dbde06718782991792fcdc4b2fa.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This is a simple test program to measure the memcopy bandwidth of the GPU. * It can measure device to device copy bandwidth, host to device copy bandwidth * for pageable and pinned memory, and device to host copy bandwidth for pageable * and pinned memory. * * Usage: * ./bandwidthTest [option]... */ // CUDA runtime #include <cuda_runtime.h> // includes #include <helper_functions.h> // helper for shared functions common to CUDA Samples #include <helper_cuda.h> // helper functions for CUDA error checking and initialization #include <cuda.h> #include <memory> #include <iostream> #include <cassert> static const char *sSDKsample = "CUDA Bandwidth Test"; // defines, project #define MEMCOPY_ITERATIONS 10000 #define DEFAULT_SIZE ( 32 * ( 1 << 20 ) ) //32 M #define DEFAULT_INCREMENT (1 << 22) //4 M #define CACHE_CLEAR_SIZE (1 << 24) //16 M //shmoo mode defines #define SHMOO_MEMSIZE_MAX (1 << 26) //64 M #define SHMOO_MEMSIZE_START (1 << 10) //1 KB #define SHMOO_INCREMENT_1KB (1 << 10) //1 KB #define SHMOO_INCREMENT_2KB (1 << 11) //2 KB #define SHMOO_INCREMENT_10KB (10 * (1 << 10)) //10KB #define SHMOO_INCREMENT_100KB (100 * (1 << 10)) //100 KB #define SHMOO_INCREMENT_1MB (1 << 20) //1 MB #define SHMOO_INCREMENT_2MB (1 << 21) //2 MB #define SHMOO_INCREMENT_4MB (1 << 22) //4 MB #define SHMOO_LIMIT_20KB (20 * (1 << 10)) //20 KB #define SHMOO_LIMIT_50KB (50 * (1 << 10)) //50 KB #define SHMOO_LIMIT_100KB (100 * (1 << 10)) //100 KB #define SHMOO_LIMIT_1MB (1 << 20) //1 MB #define SHMOO_LIMIT_16MB (1 << 24) //16 MB #define SHMOO_LIMIT_32MB (1 << 25) //32 MB //enums, project enum testMode { QUICK_MODE, RANGE_MODE, SHMOO_MODE }; enum memcpyKind { DEVICE_TO_HOST, HOST_TO_DEVICE, DEVICE_TO_DEVICE }; enum printMode { USER_READABLE, CSV }; enum memoryMode { PINNED, PAGEABLE }; const char *sMemoryCopyKind[] = { "Device to Host", "Host to Device", "Device to Device", NULL }; const char *sMemoryMode[] = { "PINNED", "PAGEABLE", NULL }; // if true, use CPU based timing for everything static bool bDontUseGPUTiming; int *pArgc = NULL; char **pArgv = NULL; //////////////////////////////////////////////////////////////////////////////// // declaration, forward int runTest(const int argc, const char **argv); void testBandwidth(unsigned int start, unsigned int end, unsigned int increment, testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); void testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); void testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc); float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc); float testDeviceToDeviceTransfer(unsigned int memSize); void printResultsReadable(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc); void printResultsCSV(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc); void printHelp(void); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { pArgc = &argc; pArgv = argv; // set logfile name and start logs printf("[%s] - Starting...\n", sSDKsample); int iRetVal = runTest(argc, (const char **)argv); if (iRetVal < 0) { checkCudaErrors(cudaSetDevice(0)); } // finish printf("%s\n", (iRetVal==0) ? "Result = PASS" : "Result = FAIL"); printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n"); exit((iRetVal==0) ? EXIT_SUCCESS : EXIT_FAILURE); } /////////////////////////////////////////////////////////////////////////////// //Parse args, run the appropriate tests /////////////////////////////////////////////////////////////////////////////// int runTest(const int argc, const char **argv) { int start = DEFAULT_SIZE; int end = DEFAULT_SIZE; int startDevice = 0; int endDevice = 0; int increment = DEFAULT_INCREMENT; testMode mode = QUICK_MODE; bool htod = false; bool dtoh = false; bool dtod = false; bool wc = false; char *modeStr; char *device = NULL; printMode printmode = USER_READABLE; char *memModeStr = NULL; memoryMode memMode = PINNED; //process command line args if (checkCmdLineFlag(argc, argv, "help")) { printHelp(); return 0; } if (checkCmdLineFlag(argc, argv, "csv")) { printmode = CSV; } if (getCmdLineArgumentString(argc, argv, "memory", &memModeStr)) { if (strcmp(memModeStr, "pageable") == 0) { memMode = PAGEABLE; } else if (strcmp(memModeStr, "pinned") == 0) { memMode = PINNED; } else { printf("Invalid memory mode - valid modes are pageable or pinned\n"); printf("See --help for more information\n"); return -1000; } } else { //default - pinned memory memMode = PINNED; } if (getCmdLineArgumentString(argc, argv, "device", &device)) { int deviceCount; cudaError_t error_id = cudaGetDeviceCount(&deviceCount); if (error_id != cudaSuccess) { printf("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id)); exit(EXIT_FAILURE); } if (deviceCount == 0) { printf("!!!!!No devices found!!!!!\n"); return -2000; } if (strcmp(device, "all") == 0) { printf("\n!!!!!Cumulative Bandwidth to be computed from all the devices !!!!!!\n\n"); startDevice = 0; endDevice = deviceCount-1; } else { startDevice = endDevice = atoi(device); if (startDevice >= deviceCount || startDevice < 0) { printf("\n!!!!!Invalid GPU number %d given hence default gpu %d will be used !!!!!\n", startDevice,0); startDevice = endDevice = 0; } } } printf("Running on...\n\n"); for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++) { cudaDeviceProp deviceProp; cudaError_t error_id = cudaGetDeviceProperties(&deviceProp, currentDevice); if (error_id == cudaSuccess) { printf(" Device %d: %s\n", currentDevice, deviceProp.name); if (deviceProp.computeMode == cudaComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); checkCudaErrors(cudaSetDevice(currentDevice)); exit(EXIT_FAILURE); } } else { printf("cudaGetDeviceProperties returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id)); checkCudaErrors(cudaSetDevice(currentDevice)); exit(EXIT_FAILURE); } } if (getCmdLineArgumentString(argc, argv, "mode", &modeStr)) { //figure out the mode if (strcmp(modeStr, "quick") == 0) { printf(" Quick Mode\n\n"); mode = QUICK_MODE; } else if (strcmp(modeStr, "shmoo") == 0) { printf(" Shmoo Mode\n\n"); mode = SHMOO_MODE; } else if (strcmp(modeStr, "range") == 0) { printf(" Range Mode\n\n"); mode = RANGE_MODE; } else { printf("Invalid mode - valid modes are quick, range, or shmoo\n"); printf("See --help for more information\n"); return -3000; } } else { //default mode - quick printf(" Quick Mode\n\n"); mode = QUICK_MODE; } if (checkCmdLineFlag(argc, argv, "htod")) { htod = true; } if (checkCmdLineFlag(argc, argv, "dtoh")) { dtoh = true; } if (checkCmdLineFlag(argc, argv, "dtod")) { dtod = true; } #if CUDART_VERSION >= 2020 if (checkCmdLineFlag(argc, argv, "wc")) { wc = true; } #endif if (checkCmdLineFlag(argc, argv, "cputiming")) { bDontUseGPUTiming = true; } if (!htod && !dtoh && !dtod) { //default: All htod = true; dtoh = true; dtod = true; } if (RANGE_MODE == mode) { if (checkCmdLineFlag(argc, (const char **)argv, "start")) { start = getCmdLineArgumentInt(argc, argv, "start"); if (start <= 0) { printf("Illegal argument - start must be greater than zero\n"); return -4000; } } else { printf("Must specify a starting size in range mode\n"); printf("See --help for more information\n"); return -5000; } if (checkCmdLineFlag(argc, (const char **)argv, "end")) { end = getCmdLineArgumentInt(argc, argv, "end"); if (end <= 0) { printf("Illegal argument - end must be greater than zero\n"); return -6000; } if (start > end) { printf("Illegal argument - start is greater than end\n"); return -7000; } } else { printf("Must specify an end size in range mode.\n"); printf("See --help for more information\n"); return -8000; } if (checkCmdLineFlag(argc, argv, "increment")) { increment = getCmdLineArgumentInt(argc, argv, "increment"); if (increment <= 0) { printf("Illegal argument - increment must be greater than zero\n"); return -9000; } } else { printf("Must specify an increment in user mode\n"); printf("See --help for more information\n"); return -10000; } } if (htod) { testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment, mode, HOST_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc); } if (dtoh) { testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment, mode, DEVICE_TO_HOST, printmode, memMode, startDevice, endDevice, wc); } if (dtod) { testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment, mode, DEVICE_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc); } // Ensure that we reset all CUDA Devices in question for (int nDevice = startDevice; nDevice <= endDevice; nDevice++) { cudaSetDevice(nDevice); } return 0; } /////////////////////////////////////////////////////////////////////////////// // Run a bandwidth test /////////////////////////////////////////////////////////////////////////////// void testBandwidth(unsigned int start, unsigned int end, unsigned int increment, testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { switch (mode) { case QUICK_MODE: testBandwidthQuick(DEFAULT_SIZE, kind, printmode, memMode, startDevice, endDevice, wc); break; case RANGE_MODE: testBandwidthRange(start, end, increment, kind, printmode, memMode, startDevice, endDevice, wc); break; case SHMOO_MODE: testBandwidthShmoo(kind, printmode, memMode, startDevice, endDevice, wc); break; default: break; } } ////////////////////////////////////////////////////////////////////// // Run a quick mode bandwidth test ////////////////////////////////////////////////////////////////////// void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { testBandwidthRange(size, size, DEFAULT_INCREMENT, kind, printmode, memMode, startDevice, endDevice, wc); } /////////////////////////////////////////////////////////////////////// // Run a range mode bandwidth test ////////////////////////////////////////////////////////////////////// void testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { //count the number of copies we're going to run unsigned int count = 1 + ((end - start) / increment); unsigned int *memSizes = (unsigned int *)malloc(count * sizeof(unsigned int)); double *bandwidths = (double *) malloc(count * sizeof(double)); // Before calculating the cumulative bandwidth, initialize bandwidths array to NULL for (unsigned int i = 0; i < count; i++) { bandwidths[i] = 0.0; } // Use the device asked by the user for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++) { cudaSetDevice(currentDevice); //run each of the copies for (unsigned int i = 0; i < count; i++) { memSizes[i] = start + i * increment; switch (kind) { case DEVICE_TO_HOST: bandwidths[i] += testDeviceToHostTransfer(memSizes[i], memMode, wc); break; case HOST_TO_DEVICE: bandwidths[i] += testHostToDeviceTransfer(memSizes[i], memMode, wc); break; case DEVICE_TO_DEVICE: bandwidths[i] += testDeviceToDeviceTransfer(memSizes[i]); break; } } } // Complete the bandwidth computation on all the devices //print results if (printmode == CSV) { printResultsCSV(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } else { printResultsReadable(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } //clean up free(memSizes); free(bandwidths); } ////////////////////////////////////////////////////////////////////////////// // Intense shmoo mode - covers a large range of values with varying increments ////////////////////////////////////////////////////////////////////////////// void testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { //count the number of copies to make unsigned int count = 1 + (SHMOO_LIMIT_20KB / SHMOO_INCREMENT_1KB) + ((SHMOO_LIMIT_50KB - SHMOO_LIMIT_20KB) / SHMOO_INCREMENT_2KB) + ((SHMOO_LIMIT_100KB - SHMOO_LIMIT_50KB) / SHMOO_INCREMENT_10KB) + ((SHMOO_LIMIT_1MB - SHMOO_LIMIT_100KB) / SHMOO_INCREMENT_100KB) + ((SHMOO_LIMIT_16MB - SHMOO_LIMIT_1MB) / SHMOO_INCREMENT_1MB) + ((SHMOO_LIMIT_32MB - SHMOO_LIMIT_16MB) / SHMOO_INCREMENT_2MB) + ((SHMOO_MEMSIZE_MAX - SHMOO_LIMIT_32MB) / SHMOO_INCREMENT_4MB); unsigned int *memSizes = (unsigned int *)malloc(count * sizeof(unsigned int)); double *bandwidths = (double *) malloc(count * sizeof(double)); // Before calculating the cumulative bandwidth, initialize bandwidths array to NULL for (unsigned int i = 0; i < count; i++) { bandwidths[i] = 0.0; } // Use the device asked by the user for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++) { cudaSetDevice(currentDevice); //Run the shmoo int iteration = 0; unsigned int memSize = 0; while (memSize <= SHMOO_MEMSIZE_MAX) { if (memSize < SHMOO_LIMIT_20KB) { memSize += SHMOO_INCREMENT_1KB; } else if (memSize < SHMOO_LIMIT_50KB) { memSize += SHMOO_INCREMENT_2KB; } else if (memSize < SHMOO_LIMIT_100KB) { memSize += SHMOO_INCREMENT_10KB; } else if (memSize < SHMOO_LIMIT_1MB) { memSize += SHMOO_INCREMENT_100KB; } else if (memSize < SHMOO_LIMIT_16MB) { memSize += SHMOO_INCREMENT_1MB; } else if (memSize < SHMOO_LIMIT_32MB) { memSize += SHMOO_INCREMENT_2MB; } else { memSize += SHMOO_INCREMENT_4MB; } memSizes[iteration] = memSize; switch (kind) { case DEVICE_TO_HOST: bandwidths[iteration] += testDeviceToHostTransfer(memSizes[iteration], memMode, wc); break; case HOST_TO_DEVICE: bandwidths[iteration] += testHostToDeviceTransfer(memSizes[iteration], memMode, wc); break; case DEVICE_TO_DEVICE: bandwidths[iteration] += testDeviceToDeviceTransfer(memSizes[iteration]); break; } iteration++; printf("."); } } // Complete the bandwidth computation on all the devices //print results printf("\n"); if (CSV == printmode) { printResultsCSV(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } else { printResultsReadable(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } //clean up free(memSizes); free(bandwidths); } /////////////////////////////////////////////////////////////////////////////// // test the bandwidth of a device to host memcopy of a specific size /////////////////////////////////////////////////////////////////////////////// float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc) { StopWatchInterface *timer = NULL; float elapsedTimeInMs = 0.0f; float bandwidthInMBs = 0.0f; unsigned char *h_idata = NULL; unsigned char *h_odata = NULL; cudaEvent_t start, stop; sdkCreateTimer(&timer); checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); //allocate host memory if (PINNED == memMode) { //pinned memory mode - use special function to get OS-pinned memory #if CUDART_VERSION >= 2020 checkCudaErrors(cudaHostAlloc((void **)&h_idata, memSize, (wc) ? cudaHostAllocWriteCombined : 0)); checkCudaErrors(cudaHostAlloc((void **)&h_odata, memSize, (wc) ? cudaHostAllocWriteCombined : 0)); #else checkCudaErrors(cudaMallocHost((void **)&h_idata, memSize)); checkCudaErrors(cudaMallocHost((void **)&h_odata, memSize)); #endif } else { //pageable memory mode - use malloc h_idata = (unsigned char *)malloc(memSize); h_odata = (unsigned char *)malloc(memSize); if (h_idata == 0 || h_odata == 0) { fprintf(stderr, "Not enough memory avaialable on host to run test!\n"); exit(EXIT_FAILURE); } } //initialize the memory for (unsigned int i = 0; i < memSize/sizeof(unsigned char); i++) { h_idata[i] = (unsigned char)(i & 0xff); } // allocate device memory unsigned char *d_idata; checkCudaErrors(cudaMalloc((void **) &d_idata, memSize)); //initialize the device memory checkCudaErrors(cudaMemcpy(d_idata, h_idata, memSize, cudaMemcpyHostToDevice)); //copy data from GPU to Host sdkStartTimer(&timer); checkCudaErrors(cudaEventRecord(start, 0)); if (PINNED == memMode) { for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors(cudaMemcpyAsync(h_odata, d_idata, memSize, cudaMemcpyDeviceToHost, 0)); } } else { for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors(cudaMemcpy(h_odata, d_idata, memSize, cudaMemcpyDeviceToHost)); } } checkCudaErrors(cudaEventRecord(stop, 0)); // make sure GPU has finished copying checkCudaErrors(cudaDeviceSynchronize()); //get the total elapsed time in ms sdkStopTimer(&timer); checkCudaErrors(cudaEventElapsedTime(&elapsedTimeInMs, start, stop)); if (PINNED != memMode || bDontUseGPUTiming) { elapsedTimeInMs = sdkGetTimerValue(&timer); } //calculate bandwidth in MB/s bandwidthInMBs = ((float)(1<<10) * memSize * (float)MEMCOPY_ITERATIONS) / (elapsedTimeInMs * (float)(1 << 20)); //clean up memory checkCudaErrors(cudaEventDestroy(stop)); checkCudaErrors(cudaEventDestroy(start)); sdkDeleteTimer(&timer); if (PINNED == memMode) { checkCudaErrors(cudaFreeHost(h_idata)); checkCudaErrors(cudaFreeHost(h_odata)); } else { free(h_idata); free(h_odata); } checkCudaErrors(cudaFree(d_idata)); return bandwidthInMBs; } /////////////////////////////////////////////////////////////////////////////// //! test the bandwidth of a host to device memcopy of a specific size /////////////////////////////////////////////////////////////////////////////// float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc) { StopWatchInterface *timer = NULL; float elapsedTimeInMs = 0.0f; float bandwidthInMBs = 0.0f; cudaEvent_t start, stop; sdkCreateTimer(&timer); checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); //allocate host memory unsigned char *h_odata = NULL; if (PINNED == memMode) { #if CUDART_VERSION >= 2020 //pinned memory mode - use special function to get OS-pinned memory checkCudaErrors(cudaHostAlloc((void **)&h_odata, memSize, (wc) ? cudaHostAllocWriteCombined : 0)); #else //pinned memory mode - use special function to get OS-pinned memory checkCudaErrors(cudaMallocHost((void **)&h_odata, memSize)); #endif } else { //pageable memory mode - use malloc h_odata = (unsigned char *)malloc(memSize); if (h_odata == 0) { fprintf(stderr, "Not enough memory available on host to run test!\n"); exit(EXIT_FAILURE); } } unsigned char *h_cacheClear1 = (unsigned char *)malloc(CACHE_CLEAR_SIZE); unsigned char *h_cacheClear2 = (unsigned char *)malloc(CACHE_CLEAR_SIZE); if (h_cacheClear1 == 0 || h_cacheClear1 == 0) { fprintf(stderr, "Not enough memory available on host to run test!\n"); exit(EXIT_FAILURE); } //initialize the memory for (unsigned int i = 0; i < memSize/sizeof(unsigned char); i++) { h_odata[i] = (unsigned char)(i & 0xff); } for (unsigned int i = 0; i < CACHE_CLEAR_SIZE / sizeof(unsigned char); i++) { h_cacheClear1[i] = (unsigned char)(i & 0xff); h_cacheClear2[i] = (unsigned char)(0xff - (i & 0xff)); } //allocate device memory unsigned char *d_idata; checkCudaErrors(cudaMalloc((void **) &d_idata, memSize)); sdkStartTimer(&timer); checkCudaErrors(cudaEventRecord(start, 0)); //copy host memory to device memory if (PINNED == memMode) { for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors(cudaMemcpyAsync(d_idata, h_odata, memSize, cudaMemcpyHostToDevice, 0)); } } else { for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors(cudaMemcpy(d_idata, h_odata, memSize, cudaMemcpyHostToDevice)); } } checkCudaErrors(cudaEventRecord(stop, 0)); checkCudaErrors(cudaDeviceSynchronize()); //total elapsed time in ms sdkStopTimer(&timer); checkCudaErrors(cudaEventElapsedTime(&elapsedTimeInMs, start, stop)); if (PINNED != memMode || bDontUseGPUTiming) { elapsedTimeInMs = sdkGetTimerValue(&timer); } sdkResetTimer(&timer); //calculate bandwidth in MB/s bandwidthInMBs = ((float)(1<<10) * memSize * (float)MEMCOPY_ITERATIONS) / (elapsedTimeInMs * (float)(1 << 20)); //clean up memory checkCudaErrors(cudaEventDestroy(stop)); checkCudaErrors(cudaEventDestroy(start)); sdkDeleteTimer(&timer); if (PINNED == memMode) { checkCudaErrors(cudaFreeHost(h_odata)); } else { free(h_odata); } free(h_cacheClear1); free(h_cacheClear2); checkCudaErrors(cudaFree(d_idata)); return bandwidthInMBs; } /////////////////////////////////////////////////////////////////////////////// //! test the bandwidth of a device to device memcopy of a specific size /////////////////////////////////////////////////////////////////////////////// float testDeviceToDeviceTransfer(unsigned int memSize) { StopWatchInterface *timer = NULL; float elapsedTimeInMs = 0.0f; float bandwidthInMBs = 0.0f; cudaEvent_t start, stop; sdkCreateTimer(&timer); checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); //allocate host memory unsigned char *h_idata = (unsigned char *)malloc(memSize); if (h_idata == 0) { fprintf(stderr, "Not enough memory avaialable on host to run test!\n"); exit(EXIT_FAILURE); } //initialize the host memory for (unsigned int i = 0; i < memSize/sizeof(unsigned char); i++) { h_idata[i] = (unsigned char)(i & 0xff); } //allocate device memory unsigned char *d_idata; checkCudaErrors(cudaMalloc((void **) &d_idata, memSize)); unsigned char *d_odata; checkCudaErrors(cudaMalloc((void **) &d_odata, memSize)); //initialize memory checkCudaErrors(cudaMemcpy(d_idata, h_idata, memSize, cudaMemcpyHostToDevice)); //run the memcopy sdkStartTimer(&timer); checkCudaErrors(cudaEventRecord(start, 0)); for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors(cudaMemcpy(d_odata, d_idata, memSize, cudaMemcpyDeviceToDevice)); } checkCudaErrors(cudaEventRecord(stop, 0)); //Since device to device memory copies are non-blocking, //cudaDeviceSynchronize() is required in order to get //proper timing. checkCudaErrors(cudaDeviceSynchronize()); //get the total elapsed time in ms sdkStopTimer(&timer); checkCudaErrors(cudaEventElapsedTime(&elapsedTimeInMs, start, stop)); if (bDontUseGPUTiming) { elapsedTimeInMs = sdkGetTimerValue(&timer); } //calculate bandwidth in MB/s bandwidthInMBs = 2.0f * ((float)(1<<10) * memSize * (float)MEMCOPY_ITERATIONS) / (elapsedTimeInMs * (float)(1 << 20)); //clean up memory sdkDeleteTimer(&timer); free(h_idata); checkCudaErrors(cudaEventDestroy(stop)); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaFree(d_idata)); checkCudaErrors(cudaFree(d_odata)); return bandwidthInMBs; } ///////////////////////////////////////////////////////// //print results in an easily read format //////////////////////////////////////////////////////// void printResultsReadable(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc) { printf(" %s Bandwidth, %i Device(s)\n", sMemoryCopyKind[kind], iNumDevs); printf(" %s Memory Transfers\n", sMemoryMode[memMode]); if (wc) { printf(" Write-Combined Memory Writes are Enabled"); } printf(" Transfer Size (Bytes)\tBandwidth(MB/s)\n"); unsigned int i; for (i = 0; i < (count - 1); i++) { printf(" %u\t\t\t%s%.1f\n", memSizes[i], (memSizes[i] < 10000)? "\t" : "", bandwidths[i]); } printf(" %u\t\t\t%s%.1f\n\n", memSizes[i], (memSizes[i] < 10000)? "\t" : "", bandwidths[i]); } /////////////////////////////////////////////////////////////////////////// //print results in a database format /////////////////////////////////////////////////////////////////////////// void printResultsCSV(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc) { std::string sConfig; // log config information if (kind == DEVICE_TO_DEVICE) { sConfig += "D2D"; } else { if (kind == DEVICE_TO_HOST) { sConfig += "D2H"; } else if (kind == HOST_TO_DEVICE) { sConfig += "H2D"; } if (memMode == PAGEABLE) { sConfig += "-Paged"; } else if (memMode == PINNED) { sConfig += "-Pinned"; if (wc) { sConfig += "-WriteCombined"; } } } unsigned int i; double dSeconds = 0.0; for (i = 0; i < count; i++) { dSeconds = (double)memSizes[i] / (bandwidths[i] * (double)(1<<20)); printf("bandwidthTest-%s, Bandwidth = %.1f MB/s, Time = %.5f s, Size = %u bytes, NumDevsUsed = %d\n", sConfig.c_str(), bandwidths[i], dSeconds, memSizes[i], iNumDevs); } } /////////////////////////////////////////////////////////////////////////// //Print help screen /////////////////////////////////////////////////////////////////////////// void printHelp(void) { printf("Usage: bandwidthTest [OPTION]...\n"); printf("Test the bandwidth for device to host, host to device, and device to device transfers\n"); printf("\n"); printf("Example: measure the bandwidth of device to host pinned memory copies in the range 1024 Bytes to 102400 Bytes in 1024 Byte increments\n"); printf("./bandwidthTest --memory=pinned --mode=range --start=1024 --end=102400 --increment=1024 --dtoh\n"); printf("\n"); printf("Options:\n"); printf("--help\tDisplay this help menu\n"); printf("--csv\tPrint results as a CSV\n"); printf("--device=[deviceno]\tSpecify the device device to be used\n"); printf(" all - compute cumulative bandwidth on all the devices\n"); printf(" 0,1,2,...,n - Specify any particular device to be used\n"); printf("--memory=[MEMMODE]\tSpecify which memory mode to use\n"); printf(" pageable - pageable memory\n"); printf(" pinned - non-pageable system memory\n"); printf("--mode=[MODE]\tSpecify the mode to use\n"); printf(" quick - performs a quick measurement\n"); printf(" range - measures a user-specified range of values\n"); printf(" shmoo - performs an intense shmoo of a large range of values\n"); printf("--htod\tMeasure host to device transfers\n"); printf("--dtoh\tMeasure device to host transfers\n"); printf("--dtod\tMeasure device to device transfers\n"); #if CUDART_VERSION >= 2020 printf("--wc\tAllocate pinned memory as write-combined\n"); #endif printf("--cputiming\tForce CPU-based timing always\n"); printf("Range mode options\n"); printf("--start=[SIZE]\tStarting transfer size in bytes\n"); printf("--end=[SIZE]\tEnding transfer size in bytes\n"); printf("--increment=[SIZE]\tIncrement size in bytes\n"); }
aba345df4d0f471f7d7ed2e8d07690f2024a8779.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @precisions normal z -> z */ #include "common_magma.h" #define magmablas_zgemv_tesla magmablas_zgemv extern "C" void magmablas_zgemv_tesla(char trans, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, const magmaDoubleComplex *A, magma_int_t lda, const magmaDoubleComplex *x, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex *y, magma_int_t incy) { hipblasZgemv(trans, m, n, alpha, A, lda, x, incx, beta, y, incy); }
aba345df4d0f471f7d7ed2e8d07690f2024a8779.cu
/* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @precisions normal z -> z */ #include "common_magma.h" #define magmablas_zgemv_tesla magmablas_zgemv extern "C" void magmablas_zgemv_tesla(char trans, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, const magmaDoubleComplex *A, magma_int_t lda, const magmaDoubleComplex *x, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex *y, magma_int_t incy) { cublasZgemv(trans, m, n, alpha, A, lda, x, incx, beta, y, incy); }
f28fb6809b4882ea949c28914115a1f6df9796b1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <rmm/thrust_rmm_allocator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/iterator/transform_output_iterator.h> #include <thrust/scan.h> #include <thrust/tuple.h> #include <cudf/detail/utilities/release_assert.cuh> #include <cudf/utilities/bit.hpp> #include <io/utilities/block_utils.cuh> #include <io/utilities/column_buffer.hpp> #include <io/parquet/parquet_gpu.hpp> #define LOG2_NTHREADS (5 + 2) #define NTHREADS (1 << LOG2_NTHREADS) #define NZ_BFRSZ (NTHREADS * 2) inline __device__ uint32_t rotl32(uint32_t x, uint32_t r) { return __funnelshift_l(x, x, r); // (x << r) | (x >> (32 - r)); } inline __device__ int rolling_index(int index) { return index & (NZ_BFRSZ - 1); } namespace cudf { namespace io { namespace parquet { namespace gpu { struct page_state_s { const uint8_t *data_start; const uint8_t *data_end; const uint8_t *dict_base; // ptr to dictionary page data int32_t dict_size; // size of dictionary data int32_t first_row; // First row in page to output int32_t num_rows; // Rows in page to decode (including rows to be skipped) int32_t first_output_value; // First value in page to output int32_t num_input_values; // total # of input/level values in the page int32_t dtype_len; // Output data type length int32_t dtype_len_in; // Can be larger than dtype_len if truncating 32-bit into 8-bit int32_t dict_bits; // # of bits to store dictionary indices uint32_t dict_run; int32_t dict_val; uint32_t initial_rle_run[NUM_LEVEL_TYPES]; // [def,rep] int32_t initial_rle_value[NUM_LEVEL_TYPES]; // [def,rep] int32_t error; PageInfo page; ColumnChunkDesc col; // (leaf) value decoding int32_t nz_count; // number of valid entries in nz_idx (write position in circular buffer) int32_t dict_pos; // write position of dictionary indices int32_t out_pos; // read position of final output int32_t ts_scale; // timestamp scale: <0: divide by -ts_scale, >0: multiply by ts_scale uint32_t nz_idx[NZ_BFRSZ]; // circular buffer of non-null value positions uint32_t dict_idx[NZ_BFRSZ]; // Dictionary index, boolean, or string offset values uint32_t str_len[NZ_BFRSZ]; // String length for plain encoding of strings // repetition/definition level decoding int32_t input_value_count; // how many values of the input we've processed int32_t input_row_count; // how many rows of the input we've processed int32_t input_leaf_count; // how many leaf values of the input we've processed uint32_t rep[NZ_BFRSZ]; // circular buffer of repetition level values uint32_t def[NZ_BFRSZ]; // circular buffer of definition level values const uint8_t *lvl_start[NUM_LEVEL_TYPES]; // [def,rep] int32_t lvl_count[NUM_LEVEL_TYPES]; // how many of each of the streams we've decoded int32_t row_index_lower_bound; // lower bound of row indices we should process }; /** * @brief Computes a 32-bit hash when given a byte stream and range. * * MurmurHash3_32 implementation from * https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp * * MurmurHash3 was written by Austin Appleby, and is placed in the public * domain. The author hereby disclaims copyright to this source code. * * @param[in] key The input data to hash * @param[in] len The length of the input data * @param[in] seed An initialization value * * @return The hash value */ __device__ uint32_t device_str2hash32(const char *key, size_t len, uint32_t seed = 33) { const uint8_t *p = reinterpret_cast<const uint8_t *>(key); uint32_t h1 = seed, k1; const uint32_t c1 = 0xcc9e2d51; const uint32_t c2 = 0x1b873593; int l = len; // body while (l >= 4) { k1 = p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); k1 *= c1; k1 = rotl32(k1, 15); k1 *= c2; h1 ^= k1; h1 = rotl32(h1, 13); h1 = h1 * 5 + 0xe6546b64; p += 4; l -= 4; } // tail k1 = 0; switch (l) { case 3: k1 ^= p[2] << 16; case 2: k1 ^= p[1] << 8; case 1: k1 ^= p[0]; k1 *= c1; k1 = rotl32(k1, 15); k1 *= c2; h1 ^= k1; } // finalization h1 ^= len; h1 ^= h1 >> 16; h1 *= 0x85ebca6b; h1 ^= h1 >> 13; h1 *= 0xc2b2ae35; h1 ^= h1 >> 16; return h1; } /** * @brief Read a 32-bit varint integer * * @param[in,out] cur The current data position, updated after the read * @param[in] end The end data position * * @return The 32-bit value read */ inline __device__ uint32_t get_vlq32(const uint8_t *&cur, const uint8_t *end) { uint32_t v = *cur++; if (v >= 0x80 && cur < end) { v = (v & 0x7f) | ((*cur++) << 7); if (v >= (0x80 << 7) && cur < end) { v = (v & ((0x7f << 7) | 0x7f)) | ((*cur++) << 14); if (v >= (0x80 << 14) && cur < end) { v = (v & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | ((*cur++) << 21); if (v >= (0x80 << 21) && cur < end) { v = (v & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) | ((*cur++) << 28); } } } } return v; } /** * @brief Parse the beginning of the level section (definition or repetition), * initializes the initial RLE run & value, and returns the section length * * @param[in,out] s The page state * @param[in] cur The current data position * @param[in] end The end of the data * @param[in] level_bits The bits required * * @return The length of the section */ __device__ uint32_t InitLevelSection(page_state_s *s, const uint8_t *cur, const uint8_t *end, level_type lvl) { int32_t len; int level_bits = s->col.level_bits[lvl]; int encoding = lvl == level_type::DEFINITION ? s->page.definition_level_encoding : s->page.repetition_level_encoding; if (level_bits == 0) { len = 0; s->initial_rle_run[lvl] = s->page.num_input_values * 2; // repeated value s->initial_rle_value[lvl] = 0; s->lvl_start[lvl] = cur; } else if (encoding == RLE) { if (cur + 4 < end) { uint32_t run; len = 4 + (cur[0]) + (cur[1] << 8) + (cur[2] << 16) + (cur[3] << 24); cur += 4; run = get_vlq32(cur, end); s->initial_rle_run[lvl] = run; if (!(run & 1)) { int v = (cur < end) ? cur[0] : 0; cur++; if (level_bits > 8) { v |= ((cur < end) ? cur[0] : 0) << 8; cur++; } s->initial_rle_value[lvl] = v; } s->lvl_start[lvl] = cur; if (cur > end) { s->error = 2; } } else { len = 0; s->error = 2; } } else if (encoding == BIT_PACKED) { len = (s->page.num_input_values * level_bits + 7) >> 3; s->initial_rle_run[lvl] = ((s->page.num_input_values + 7) >> 3) * 2 + 1; // literal run s->initial_rle_value[lvl] = 0; s->lvl_start[lvl] = cur; } else { s->error = 3; len = 0; } return (uint32_t)len; } /** * @brief Decode values out of a definition or repetition stream * * @param[in,out] s Page state input/output * @param[in] t target_count Target count of stream values on output * @param[in] t Warp0 thread ID (0..31) * @param[in] lvl The level type we are decoding - DEFINITION or REPETITION */ __device__ void gpuDecodeStream( uint32_t *output, page_state_s *s, int32_t target_count, int t, level_type lvl) { const uint8_t *cur_def = s->lvl_start[lvl]; const uint8_t *end = s->data_start; uint32_t level_run = s->initial_rle_run[lvl]; int32_t level_val = s->initial_rle_value[lvl]; int level_bits = s->col.level_bits[lvl]; int32_t num_input_values = s->num_input_values; int32_t value_count = s->lvl_count[lvl]; int32_t batch_coded_count = 0; while (value_count < target_count && value_count < num_input_values) { int batch_len; if (level_run <= 1) { // Get a new run symbol from the byte stream int sym_len = 0; if (!t) { const uint8_t *cur = cur_def; if (cur < end) { level_run = get_vlq32(cur, end); } if (!(level_run & 1)) { if (cur < end) level_val = cur[0]; cur++; if (level_bits > 8) { if (cur < end) level_val |= cur[0] << 8; cur++; } } if (cur > end || level_run <= 1) { s->error = 0x10; } sym_len = (int32_t)(cur - cur_def); __threadfence_block(); } sym_len = SHFL0(sym_len); level_val = SHFL0(level_val); level_run = SHFL0(level_run); cur_def += sym_len; } if (s->error) { break; } batch_len = min(num_input_values - value_count, 32); if (level_run & 1) { // Literal run int batch_len8; batch_len = min(batch_len, (level_run >> 1) * 8); batch_len8 = (batch_len + 7) >> 3; if (t < batch_len) { int bitpos = t * level_bits; const uint8_t *cur = cur_def + (bitpos >> 3); bitpos &= 7; if (cur < end) level_val = cur[0]; cur++; if (level_bits > 8 - bitpos && cur < end) { level_val |= cur[0] << 8; cur++; if (level_bits > 16 - bitpos && cur < end) level_val |= cur[0] << 16; } level_val = (level_val >> bitpos) & ((1 << level_bits) - 1); } level_run -= batch_len8 * 2; cur_def += batch_len8 * level_bits; } else { // Repeated value batch_len = min(batch_len, level_run >> 1); level_run -= batch_len * 2; } if (t < batch_len) { int idx = value_count + t; output[idx & (NZ_BFRSZ - 1)] = level_val; } batch_coded_count += batch_len; value_count += batch_len; } // update the stream info if (!t) { s->lvl_start[lvl] = cur_def; s->initial_rle_run[lvl] = level_run; s->initial_rle_value[lvl] = level_val; s->lvl_count[lvl] = value_count; } } /** * @brief Performs RLE decoding of dictionary indexes * * @param[in,out] s Page state input/output * @param[in] target_pos Target index position in dict_idx buffer (may exceed this value by up to * 31) * @param[in] t Warp1 thread ID (0..31) * * @return The new output position */ __device__ int gpuDecodeDictionaryIndices(volatile page_state_s *s, int target_pos, int t) { const uint8_t *end = s->data_end; int dict_bits = s->dict_bits; int pos = s->dict_pos; while (pos < target_pos) { int is_literal, batch_len; if (!t) { uint32_t run = s->dict_run; const uint8_t *cur = s->data_start; if (run <= 1) { run = (cur < end) ? get_vlq32(cur, end) : 0; if (!(run & 1)) { // Repeated value int bytecnt = (dict_bits + 7) >> 3; if (cur + bytecnt <= end) { int32_t run_val = cur[0]; if (bytecnt > 1) { run_val |= cur[1] << 8; if (bytecnt > 2) { run_val |= cur[2] << 16; if (bytecnt > 3) { run_val |= cur[3] << 24; } } } s->dict_val = run_val & ((1 << dict_bits) - 1); } cur += bytecnt; } } if (run & 1) { // Literal batch: must output a multiple of 8, except for the last batch int batch_len_div8; batch_len = max(min(32, (int)(run >> 1) * 8), 1); batch_len_div8 = (batch_len + 7) >> 3; run -= batch_len_div8 * 2; cur += batch_len_div8 * dict_bits; } else { batch_len = max(min(32, (int)(run >> 1)), 1); run -= batch_len * 2; } s->dict_run = run; s->data_start = cur; is_literal = run & 1; __threadfence_block(); } SYNCWARP(); is_literal = SHFL0(is_literal); batch_len = SHFL0(batch_len); if (t < batch_len) { int dict_idx = s->dict_val; if (is_literal) { int32_t ofs = (t - ((batch_len + 7) & ~7)) * dict_bits; const uint8_t *p = s->data_start + (ofs >> 3); ofs &= 7; if (p < end) { uint32_t c = 8 - ofs; dict_idx = (*p++) >> ofs; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; c += 8; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; c += 8; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; } } } dict_idx &= (1 << dict_bits) - 1; } } s->dict_idx[(pos + t) & (NZ_BFRSZ - 1)] = dict_idx; } pos += batch_len; } return pos; } /** * @brief Performs RLE decoding of dictionary indexes, for when dict_size=1 * * @param[in,out] s Page state input/output * @param[in] target_pos Target write position * @param[in] t Thread ID * * @return The new output position */ __device__ int gpuDecodeRleBooleans(volatile page_state_s *s, int target_pos, int t) { const uint8_t *end = s->data_end; int pos = s->dict_pos; while (pos < target_pos) { int is_literal, batch_len; if (!t) { uint32_t run = s->dict_run; const uint8_t *cur = s->data_start; if (run <= 1) { run = (cur < end) ? get_vlq32(cur, end) : 0; if (!(run & 1)) { // Repeated value s->dict_val = (cur < end) ? cur[0] & 1 : 0; cur++; } } if (run & 1) { // Literal batch: must output a multiple of 8, except for the last batch int batch_len_div8; batch_len = max(min(32, (int)(run >> 1) * 8), 1); if (batch_len >= 8) { batch_len &= ~7; } batch_len_div8 = (batch_len + 7) >> 3; run -= batch_len_div8 * 2; cur += batch_len_div8; } else { batch_len = max(min(32, (int)(run >> 1)), 1); run -= batch_len * 2; } s->dict_run = run; s->data_start = cur; is_literal = run & 1; __threadfence_block(); } SYNCWARP(); is_literal = SHFL0(is_literal); batch_len = SHFL0(batch_len); if (t < batch_len) { int dict_idx; if (is_literal) { int32_t ofs = t - ((batch_len + 7) & ~7); const uint8_t *p = s->data_start + (ofs >> 3); dict_idx = (p < end) ? (p[0] >> (ofs & 7u)) & 1 : 0; } else { dict_idx = s->dict_val; } s->dict_idx[(pos + t) & (NZ_BFRSZ - 1)] = dict_idx; } pos += batch_len; } return pos; } /** * @brief Parses the length and position of strings * * @param[in,out] s Page state input/output * @param[in] target_pos Target output position * @param[in] t Thread ID * * @return The new output position */ __device__ void gpuInitStringDescriptors(volatile page_state_s *s, int target_pos, int t) { int pos = s->dict_pos; // This step is purely serial if (!t) { const uint8_t *cur = s->data_start; int dict_size = s->dict_size; int k = s->dict_val; while (pos < target_pos) { int len; if (k + 4 <= dict_size) { len = (cur[k]) | (cur[k + 1] << 8) | (cur[k + 2] << 16) | (cur[k + 3] << 24); k += 4; if (k + len > dict_size) { len = 0; } } else { len = 0; } s->dict_idx[pos & (NZ_BFRSZ - 1)] = k; s->str_len[pos & (NZ_BFRSZ - 1)] = len; k += len; pos++; } s->dict_val = k; __threadfence_block(); } } /** * @brief Output a string descriptor * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dstv Pointer to row output data (string descriptor or 32-bit hash) */ inline __device__ void gpuOutputString(volatile page_state_s *s, int src_pos, void *dstv) { const char *ptr = NULL; size_t len = 0; if (s->dict_base) { // String dictionary uint32_t dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] * sizeof(nvstrdesc_s) : 0; if (dict_pos < (uint32_t)s->dict_size) { const nvstrdesc_s *src = reinterpret_cast<const nvstrdesc_s *>(s->dict_base + dict_pos); ptr = src->ptr; len = src->count; } } else { // Plain encoding uint32_t dict_pos = s->dict_idx[src_pos & (NZ_BFRSZ - 1)]; if (dict_pos <= (uint32_t)s->dict_size) { ptr = reinterpret_cast<const char *>(s->data_start + dict_pos); len = s->str_len[src_pos & (NZ_BFRSZ - 1)]; } } if (s->dtype_len == 4) { // Output hash *static_cast<uint32_t *>(dstv) = device_str2hash32(ptr, len); } else { // Output string descriptor nvstrdesc_s *dst = static_cast<nvstrdesc_s *>(dstv); dst->ptr = ptr; dst->count = len; } } /** * @brief Output a boolean * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ inline __device__ void gpuOutputBoolean(volatile page_state_s *s, int src_pos, uint8_t *dst) { *dst = s->dict_idx[src_pos & (NZ_BFRSZ - 1)]; } /** * @brief Store a 32-bit data element * * @param[out] dst ptr to output * @param[in] src8 raw input bytes * @param[in] dict_pos byte position in dictionary * @param[in] dict_size size of dictionary */ inline __device__ void gpuStoreOutput(uint32_t *dst, const uint8_t *src8, uint32_t dict_pos, uint32_t dict_size) { uint32_t bytebuf; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos < dict_size) { bytebuf = *reinterpret_cast<const uint32_t *>(src8 + dict_pos); if (ofs) { uint32_t bytebufnext = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 4); bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs); } } else { bytebuf = 0; } *dst = bytebuf; } /** * @brief Store a 64-bit data element * * @param[out] dst ptr to output * @param[in] src8 raw input bytes * @param[in] dict_pos byte position in dictionary * @param[in] dict_size size of dictionary */ inline __device__ void gpuStoreOutput(uint2 *dst, const uint8_t *src8, uint32_t dict_pos, uint32_t dict_size) { uint2 v; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos < dict_size) { v.x = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 0); v.y = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 4); if (ofs) { uint32_t next = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 8); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, next, ofs); } } else { v.x = v.y = 0; } *dst = v; } /** * @brief Convert an INT96 Spark timestamp to 64-bit timestamp * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ inline __device__ void gpuOutputInt96Timestamp(volatile page_state_s *s, int src_pos, int64_t *dst) { const uint8_t *src8; uint32_t dict_pos, dict_size = s->dict_size, ofs; int64_t ts; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] : 0; src8 = s->dict_base; } else { // Plain dict_pos = src_pos; src8 = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos + 4 < dict_size) { uint3 v; int64_t nanos, secs, days; v.x = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 0); v.y = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 4); v.z = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 8); if (ofs) { uint32_t next = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 12); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, v.z, ofs); v.z = __funnelshift_r(v.z, next, ofs); } nanos = v.y; nanos <<= 32; nanos |= v.x; // Convert from Julian day at noon to UTC seconds days = static_cast<int32_t>(v.z); secs = (days - 2440588) * (24 * 60 * 60); // TBD: Should be noon instead of midnight, but this matches pyarrow if (s->col.ts_clock_rate) ts = (secs * s->col.ts_clock_rate) + nanos / (1000000000 / s->col.ts_clock_rate); // Output to desired clock rate else ts = (secs * 1000000000) + nanos; } else { ts = 0; } *dst = ts; } /** * @brief Output a 64-bit timestamp * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ inline __device__ void gpuOutputInt64Timestamp(volatile page_state_s *s, int src_pos, int64_t *dst) { const uint8_t *src8; uint32_t dict_pos, dict_size = s->dict_size, ofs; int64_t ts; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] : 0; src8 = s->dict_base; } else { // Plain dict_pos = src_pos; src8 = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos + 4 < dict_size) { uint2 v; int64_t val; int32_t ts_scale; v.x = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 0); v.y = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 4); if (ofs) { uint32_t next = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 8); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, next, ofs); } val = v.y; val <<= 32; val |= v.x; // Output to desired clock rate ts_scale = s->ts_scale; if (ts_scale < 0) { // round towards negative infinity int sign = (val < 0); ts = ((val + sign) / -ts_scale) + sign; } else { ts = val * ts_scale; } } else { ts = 0; } *dst = ts; } /** * @brief Powers of 10 */ static const __device__ __constant__ double kPow10[40] = { 1.0, 1.e1, 1.e2, 1.e3, 1.e4, 1.e5, 1.e6, 1.e7, 1.e8, 1.e9, 1.e10, 1.e11, 1.e12, 1.e13, 1.e14, 1.e15, 1.e16, 1.e17, 1.e18, 1.e19, 1.e20, 1.e21, 1.e22, 1.e23, 1.e24, 1.e25, 1.e26, 1.e27, 1.e28, 1.e29, 1.e30, 1.e31, 1.e32, 1.e33, 1.e34, 1.e35, 1.e36, 1.e37, 1.e38, 1.e39, }; /** * @brief Output a decimal type ([INT32..INT128] + scale) as a 64-bit float * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data * @param[in] dtype Stored data type */ inline __device__ void gpuOutputDecimal(volatile page_state_s *s, int src_pos, double *dst, int dtype) { const uint8_t *dict; uint32_t dict_pos, dict_size = s->dict_size, dtype_len_in; int64_t i128_hi, i128_lo; int32_t scale; double d; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dtype_len_in = s->dtype_len_in; dict_pos *= dtype_len_in; // FIXME: Not very efficient (currently reading 1 byte at a time) -> need a variable-length // unaligned load utility function (both little-endian and big-endian versions) if (dtype == INT32) { int32_t lo32 = 0; for (unsigned int i = 0; i < dtype_len_in; i++) { uint32_t v = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; lo32 |= v << (i * 8); } i128_lo = lo32; i128_hi = lo32 >> 31; } else if (dtype == INT64) { int64_t lo64 = 0; for (unsigned int i = 0; i < dtype_len_in; i++) { uint64_t v = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; lo64 |= v << (i * 8); } i128_lo = lo64; i128_hi = lo64 >> 63; } else // if (dtype == FIXED_LENGTH_BYTE_ARRAY) { i128_lo = 0; for (unsigned int i = dtype_len_in - min(dtype_len_in, 8); i < dtype_len_in; i++) { uint32_t v = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; i128_lo = (i128_lo << 8) | v; } if (dtype_len_in > 8) { i128_hi = 0; for (unsigned int i = dtype_len_in - min(dtype_len_in, 16); i < dtype_len_in - 8; i++) { uint32_t v = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; i128_hi = (i128_hi << 8) | v; } if (dtype_len_in < 16) { i128_hi <<= 64 - (dtype_len_in - 8) * 8; i128_hi >>= 64 - (dtype_len_in - 8) * 8; } } else { if (dtype_len_in < 8) { i128_lo <<= 64 - dtype_len_in * 8; i128_lo >>= 64 - dtype_len_in * 8; } i128_hi = i128_lo >> 63; } } scale = s->col.decimal_scale; d = Int128ToDouble_rn(i128_lo, i128_hi); *dst = (scale < 0) ? (d * kPow10[min(-scale, 39)]) : (d / kPow10[min(scale, 39)]); } /** * @brief Output a small fixed-length value * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ template <typename T> inline __device__ void gpuOutputFast(volatile page_state_s *s, int src_pos, T *dst) { const uint8_t *dict; uint32_t dict_pos, dict_size = s->dict_size; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; gpuStoreOutput(dst, dict, dict_pos, dict_size); } /** * @brief Output a N-byte value * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst8 Pointer to row output data * @param[in] len Length of element */ static __device__ void gpuOutputGeneric(volatile page_state_s *s, int src_pos, uint8_t *dst8, int len) { const uint8_t *dict; uint32_t dict_pos, dict_size = s->dict_size; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; if (len & 3) { // Generic slow path for (unsigned int i = 0; i < len; i++) { dst8[i] = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; } } else { // Copy 4 bytes at a time const uint8_t *src8 = dict; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits for (unsigned int i = 0; i < len; i += 4) { uint32_t bytebuf; if (dict_pos < dict_size) { bytebuf = *reinterpret_cast<const uint32_t *>(src8 + dict_pos); if (ofs) { uint32_t bytebufnext = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 4); bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs); } } else { bytebuf = 0; } dict_pos += 4; *reinterpret_cast<uint32_t *>(dst8 + i) = bytebuf; } } } /** * @brief Sets up block-local page state information from the global pages. * * @param[in, out] s The local page state to be filled in * @param[in] p The global page to be copied from * @param[in] chunks The global list of chunks * @param[in] num_rows Maximum number of rows to read * @param[in] min_row crop all rows below min_row * @param[in] num_chunk Number of column chunks */ static __device__ bool setupLocalPageInfo(page_state_s *const s, PageInfo *p, ColumnChunkDesc const *chunks, size_t min_row, size_t num_rows, int32_t num_chunks) { int t = threadIdx.x; int chunk_idx; // Fetch page info // NOTE: Assumes that sizeof(PageInfo) <= 256 (and is padded to 4 bytes) if (t < sizeof(PageInfo) / sizeof(uint32_t)) { reinterpret_cast<uint32_t *>(&s->page)[t] = reinterpret_cast<const uint32_t *>(p)[t]; } __syncthreads(); if (s->page.flags & PAGEINFO_FLAGS_DICTIONARY) { return false; } // Fetch column chunk info chunk_idx = s->page.chunk_idx; if ((uint32_t)chunk_idx < (uint32_t)num_chunks) { // NOTE: Assumes that sizeof(ColumnChunkDesc) <= 256 (and is padded to 4 bytes) if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t)) { reinterpret_cast<uint32_t *>(&s->col)[t] = reinterpret_cast<const uint32_t *>(&chunks[chunk_idx])[t]; } } // zero nested value and valid counts int d = 0; while (d < s->page.num_nesting_levels) { if (d + t < s->page.num_nesting_levels) { s->page.nesting[d + t].valid_count = 0; s->page.nesting[d + t].value_count = 0; } d += blockDim.x; } __syncthreads(); if (!t) { s->error = 0; // our starting row (absolute index) is // col.start_row == absolute row index // page.chunk-row == relative row index within the chunk size_t page_start_row = s->col.start_row + s->page.chunk_row; // IMPORTANT : nested schemas can have 0 rows in a page but still have // values. The case is: // - On page N-1, the last row starts, with 2/6 values encoded // - On page N, the remaining 4/6 values are encoded, but there are no new rows. // if (s->page.num_input_values > 0 && s->page.num_rows > 0) { if (s->page.num_input_values > 0) { uint8_t *cur = s->page.page_data; uint8_t *end = cur + s->page.uncompressed_page_size; uint32_t dtype_len_out = s->col.data_type >> 3; s->ts_scale = 0; // Validate data type switch (s->col.data_type & 7) { case BOOLEAN: s->dtype_len = 1; // Boolean are stored as 1 byte on the output break; case INT32: case FLOAT: s->dtype_len = 4; break; case INT64: if (s->col.ts_clock_rate) { int32_t units = 0; if (s->col.converted_type == TIME_MICROS || s->col.converted_type == TIMESTAMP_MICROS) units = 1000000; else if (s->col.converted_type == TIME_MILLIS || s->col.converted_type == TIMESTAMP_MILLIS) units = 1000; if (units && units != s->col.ts_clock_rate) s->ts_scale = (s->col.ts_clock_rate < units) ? -(units / s->col.ts_clock_rate) : (s->col.ts_clock_rate / units); } // Fall through to DOUBLE case DOUBLE: s->dtype_len = 8; break; case INT96: s->dtype_len = 12; break; case BYTE_ARRAY: s->dtype_len = sizeof(nvstrdesc_s); break; default: // FIXED_LEN_BYTE_ARRAY: s->dtype_len = dtype_len_out; s->error |= (s->dtype_len <= 0); break; } // Special check for downconversions s->dtype_len_in = s->dtype_len; if (s->col.converted_type == DECIMAL) { s->dtype_len = 8; // Convert DECIMAL to 64-bit float } else if ((s->col.data_type & 7) == INT32) { if (dtype_len_out == 1) s->dtype_len = 1; // INT8 output if (dtype_len_out == 2) s->dtype_len = 2; // INT16 output } else if ((s->col.data_type & 7) == BYTE_ARRAY && dtype_len_out == 4) { s->dtype_len = 4; // HASH32 output } else if ((s->col.data_type & 7) == INT96) { s->dtype_len = 8; // Convert to 64-bit timestamp } // first row within the page to start reading if (page_start_row >= min_row) { s->first_row = 0; } else { s->first_row = (int32_t)min(min_row - page_start_row, (size_t)s->page.num_rows); } // # of rows within the page to read s->num_rows = s->page.num_rows; if ((page_start_row + s->first_row) + s->num_rows > min_row + num_rows) { s->num_rows = (int32_t)max((int64_t)(min_row + num_rows - (page_start_row + s->first_row)), INT64_C(0)); } // during the decoding step we need to offset the global output buffers // for each level of nesting so that we write to the section this page // is responsible for. // - for flat schemas, we can do this directly by using row counts // - for nested schemas, these offsets are computed during the preprocess step if (s->col.column_data_base != nullptr) { int max_depth = s->col.max_nesting_depth; for (int idx = 0; idx < max_depth; idx++) { PageNestingInfo *pni = &s->page.nesting[idx]; size_t output_offset; // schemas without lists if (s->col.max_level[level_type::REPETITION] == 0) { output_offset = page_start_row >= min_row ? page_start_row - min_row : 0; } // for schemas with lists, we've already got the exactly value precomputed else { output_offset = pni->page_start_value; } pni->data_out = static_cast<uint8_t *>(s->col.column_data_base[idx]); if (pni->data_out != nullptr) { // anything below max depth with a valid data pointer must be a list, so the // element size is the size of the offset type. uint32_t len = idx < max_depth - 1 ? sizeof(cudf::size_type) : s->dtype_len; pni->data_out += (output_offset * len); } pni->valid_map = s->col.valid_map_base[idx]; if (pni->valid_map != nullptr) { pni->valid_map += output_offset >> 5; pni->valid_map_offset = (int32_t)(output_offset & 0x1f); } } } s->first_output_value = 0; // Find the compressed size of repetition levels cur += InitLevelSection(s, cur, end, level_type::REPETITION); // Find the compressed size of definition levels cur += InitLevelSection(s, cur, end, level_type::DEFINITION); s->dict_bits = 0; s->dict_base = 0; s->dict_size = 0; switch (s->page.encoding) { case PLAIN_DICTIONARY: case RLE_DICTIONARY: // RLE-packed dictionary indices, first byte indicates index length in bits if (((s->col.data_type & 7) == BYTE_ARRAY) && (s->col.str_dict_index)) { // String dictionary: use index s->dict_base = reinterpret_cast<const uint8_t *>(s->col.str_dict_index); s->dict_size = s->col.page_info[0].num_input_values * sizeof(nvstrdesc_s); } else { s->dict_base = s->col.page_info[0].page_data; // dictionary is always stored in the first page s->dict_size = s->col.page_info[0].uncompressed_page_size; } s->dict_run = 0; s->dict_val = 0; s->dict_bits = (cur < end) ? *cur++ : 0; if (s->dict_bits > 32 || !s->dict_base) { s->error = (10 << 8) | s->dict_bits; } break; case PLAIN: s->dict_size = static_cast<int32_t>(end - cur); s->dict_val = 0; if ((s->col.data_type & 7) == BOOLEAN) { s->dict_run = s->dict_size * 2 + 1; } break; case RLE: s->dict_run = 0; break; default: s->error = 1; // Unsupported encoding break; } if (cur > end) { s->error = 1; } s->data_start = cur; s->data_end = end; } else { s->error = 1; } s->lvl_count[level_type::REPETITION] = 0; s->lvl_count[level_type::DEFINITION] = 0; s->nz_count = 0; s->num_input_values = s->page.num_input_values; s->dict_pos = 0; s->out_pos = 0; // handle row bounds (skip_rows, min_rows) s->input_row_count = s->first_row; // return the lower bound to compare (page-relative) thread row index against. Explanation: // In the case of nested schemas, rows can span page boundaries. That is to say, // we can encounter the first value for row X on page M, but the last value for page M // might not be the last value for row X. page M+1 (or further) may contain the last value. // // This means that the first values we encounter for a given page (M+1) may not belong to the // row indicated by chunk_row, but to the row before it that spanned page boundaries. If that // previous row is within the overall row bounds, include the values by allowing relative row // index -1 int max_row = (min_row + num_rows) - 1; if (min_row < page_start_row && max_row >= page_start_row - 1) { s->row_index_lower_bound = -1; } else { s->row_index_lower_bound = s->first_row; } // if we're in the decoding step, jump directly to the first // value we care about if (s->col.column_data_base != nullptr) { // for flat hierarchies, we haven't computed skipped_values yet, but we can do so trivially // now if (s->col.max_level[level_type::REPETITION] == 0) { s->page.skipped_values = s->first_row; s->page.skipped_leaf_values = s->first_row; } s->input_value_count = s->page.skipped_values; } else { s->input_value_count = 0; s->input_leaf_count = 0; s->page.skipped_values = -1; s->page.skipped_leaf_values = -1; } __threadfence_block(); } __syncthreads(); return true; } /** * @brief Store a validity mask containing value_count bits into the output validity buffer of the * page. * * @param[in,out] pni The page/nesting information to store the mask in. The validity map offset is * also updated * @param[in] valid_mask The validity mask to be stored * @param[in] value_count # of bits in the validity mask */ static __device__ void store_validity(PageNestingInfo *pni, uint32_t valid_mask, int32_t value_count) { int word_offset = pni->valid_map_offset / 32; int bit_offset = pni->valid_map_offset % 32; // if we fit entirely in the output word if (bit_offset + value_count <= 32) { uint32_t relevant_mask = static_cast<uint32_t>((static_cast<uint64_t>(1) << value_count) - 1); if (relevant_mask == ~0) { pni->valid_map[word_offset] = valid_mask; } else { atomicAnd(pni->valid_map + word_offset, ~(relevant_mask << bit_offset)); atomicOr(pni->valid_map + word_offset, (valid_mask & relevant_mask) << bit_offset); } } // we're going to spill over into the next word. // note : writing both values here is the lazy/slow way. we could be writing just // the first word and rolling the remaining bits over into the next call. // however, some basic performance tests shows almost no difference between these two // methods. More detailed performance testing might be worthwhile here. else { uint32_t bits_left = 32 - bit_offset; // first word. strip bits_left bits off the beginning and store that uint32_t relevant_mask = ((1 << bits_left) - 1); uint32_t mask_word0 = valid_mask & relevant_mask; atomicAnd(pni->valid_map + word_offset, ~(relevant_mask << bit_offset)); atomicOr(pni->valid_map + word_offset, mask_word0 << bit_offset); // second word. strip the remainder of the bits off the end and store that relevant_mask = ((1 << (value_count - bits_left)) - 1); uint32_t mask_word1 = valid_mask & (relevant_mask << bits_left); atomicAnd(pni->valid_map + word_offset + 1, ~(relevant_mask)); atomicOr(pni->valid_map + word_offset + 1, mask_word1 >> bits_left); } pni->valid_map_offset += value_count; } /** * @brief Compute the nesting bounds within the hierarchy to add values to, and the definition level * D to which we should considered them null or not. * * @param[out] start_depth The start nesting depth * @param[out] end_depth The end nesting depth (inclusive) * @param[out] d The definition level up to which added values are not-null. if t is out of bounds, * d will be -1 * @param[in] s Local page information * @param[in] input_value_count The current count of input level values we have processed * @param[in] target_input_value_count The desired # of input level values we want to process * @param[in] t Thread index */ inline __device__ void get_nesting_bounds(int &start_depth, int &end_depth, int &d, page_state_s *s, int input_value_count, int32_t target_input_value_count, int t) { start_depth = -1; end_depth = -1; d = -1; if (input_value_count + t < target_input_value_count) { int index = rolling_index(input_value_count + t); d = s->def[index]; // if we have repetition (there are list columns involved) we have to // bound what nesting levels we apply values to if (s->col.max_level[level_type::REPETITION] > 0) { int r = s->rep[index]; start_depth = s->page.nesting[r].start_depth; end_depth = s->page.nesting[d].end_depth; } // for columns without repetition (even ones involving structs) we always // traverse the entire hierarchy. else { start_depth = 0; end_depth = s->col.max_nesting_depth - 1; } } } /** * @brief Process a batch of incoming repetition/definition level values and generate * validity, nested column offsets (where appropriate) and decoding indices. * * @param[in] target_input_value_count The # of repetition/definition levels to process up to * @param[in] s Local page information * @param[in] t Thread index */ static __device__ void gpuUpdateValidityOffsetsAndRowIndices(int32_t target_input_value_count, page_state_s *s, int t) { // max nesting depth of the column int max_depth = s->col.max_nesting_depth; // how many (input) values we've processed in the page so far int input_value_count = s->input_value_count; // how many rows we've processed in the page so far int input_row_count = s->input_row_count; // process until we've reached the target while (input_value_count < target_input_value_count) { // determine the nesting bounds for this thread (the range of nesting depths we // will generate new value indices and validity bits for) int start_depth, end_depth, d; get_nesting_bounds( start_depth, end_depth, d, s, input_value_count, target_input_value_count, t); // 4 interesting things to track: // thread_value_count : # of output values from the view of this thread // warp_value_count : # of output values for the whole warp // // thread_valid_count : # of valid values from the view of this thread // warp_valid_count : # of valid values for the whole warp uint32_t thread_value_count, warp_value_count; uint32_t thread_valid_count, warp_valid_count; // track (page-relative) row index for the thread so we can compare against input bounds // keep track of overall # of rows we've read. int is_new_row = start_depth == 0 ? 1 : 0; uint32_t warp_row_count_mask = BALLOT(is_new_row); int32_t thread_row_index = input_row_count + ((__popc(warp_row_count_mask & ((1 << t) - 1)) + is_new_row) - 1); input_row_count += __popc(warp_row_count_mask); // is this thread within row bounds? int in_row_bounds = thread_row_index >= s->row_index_lower_bound && thread_row_index < (s->first_row + s->num_rows) ? 1 : 0; // compute warp and thread value counts uint32_t warp_count_mask = BALLOT((0 >= start_depth && 0 <= end_depth) && in_row_bounds ? 1 : 0); warp_value_count = __popc(warp_count_mask); // Note : ((1 << t) - 1) implies "for all threads before me" thread_value_count = __popc(warp_count_mask & ((1 << t) - 1)); // walk from 0 to max_depth uint32_t next_thread_value_count, next_warp_value_count; for (int s_idx = 0; s_idx < max_depth; s_idx++) { PageNestingInfo *pni = &s->page.nesting[s_idx]; // if we are within the range of nesting levels we should be adding value indices for int in_nesting_bounds = ((s_idx >= start_depth && s_idx <= end_depth) && in_row_bounds) ? 1 : 0; // everything up to the max_def_level is a non-null value uint32_t is_valid = 0; if (d >= pni->max_def_level && in_nesting_bounds) { is_valid = 1; } // compute warp and thread valid counts uint32_t warp_valid_mask; // for flat schemas, a simple ballot_sync gives us the correct count and bit positions because // every value in the input matches to a value in the output if (max_depth == 0) { warp_valid_mask = BALLOT(is_valid); } // for nested schemas, it's more complicated. This warp will visit 32 incoming values, // however not all of them will necessarily represent a value at this nesting level. so the // validity bit for thread t might actually represent output value t-6. the correct position // for thread t's bit is cur_value_count. for cuda 11 we could use __reduce_or_sync(), but // until then we have to do a warp reduce. else { warp_valid_mask = WarpReduceOr32(is_valid << thread_value_count); } thread_valid_count = __popc(warp_valid_mask & ((1 << thread_value_count) - 1)); warp_valid_count = __popc(warp_valid_mask); // if this is the value column emit an index for value decoding if (is_valid && s_idx == max_depth - 1) { int idx = pni->valid_count + thread_valid_count; int ofs = pni->value_count + thread_value_count; s->nz_idx[rolling_index(idx)] = ofs; } // compute warp and thread value counts for the -next- nesting level. we need to // do this for nested schemas so that we can emit an offset for the -current- nesting // level. more concretely : the offset for the current nesting level == current length of the // next nesting level if (s_idx < max_depth - 1) { uint32_t next_warp_count_mask = BALLOT((s_idx + 1 >= start_depth && s_idx + 1 <= end_depth && in_row_bounds) ? 1 : 0); next_warp_value_count = __popc(next_warp_count_mask); next_thread_value_count = __popc(next_warp_count_mask & ((1 << t) - 1)); // if we're -not- at a leaf column and we're within nesting/row bounds // and we have a valid data_out pointer, it implies this is a list column, so // emit an offset. if (in_nesting_bounds && pni->data_out != nullptr) { int idx = pni->value_count + thread_value_count; cudf::size_type ofs = s->page.nesting[s_idx + 1].value_count + next_thread_value_count + s->page.nesting[s_idx + 1].page_start_value; (reinterpret_cast<cudf::size_type *>(pni->data_out))[idx] = ofs; } } // increment count of valid values, count of total values, and validity mask if (!t) { if (pni->valid_map != nullptr && in_row_bounds) { store_validity(pni, warp_valid_mask, warp_value_count); } pni->valid_count += warp_valid_count; pni->value_count += warp_value_count; } // propagate value counts for the next level warp_value_count = next_warp_value_count; thread_value_count = next_thread_value_count; } input_value_count += min(32, (target_input_value_count - input_value_count)); SYNCWARP(); } // update if (!t) { // update valid value count for decoding and total # of values we've processed s->nz_count = s->page.nesting[max_depth - 1].valid_count; s->input_value_count = input_value_count; s->input_row_count = input_row_count; } } /** * @brief Process repetition and definition levels up to the target count of leaf values. * * In order to decode actual leaf values from the input stream, we need to generate the * list of non-null value positions (page_state_s::nz_idx). We do this by processing * the repetition and definition level streams. This process also generates validity information, * and offset column values in the case of nested schemas. Because of the way the streams * are encoded, this function may generate slightly more than target_leaf_count. * * Only runs on 1 warp. * * @param[in] s The local page state * @param[in] target_leaf_count Target count of non-null leaf values to generate indices for * @param[in] t Thread index */ __device__ void gpuDecodeLevels(page_state_s *s, int32_t target_leaf_count, int t) { bool has_repetition = s->col.max_level[level_type::REPETITION] > 0; constexpr int batch_size = 32; int cur_leaf_count = target_leaf_count; while (!s->error && s->nz_count < target_leaf_count && s->input_value_count < s->num_input_values) { if (has_repetition) { gpuDecodeStream(s->rep, s, cur_leaf_count, t, level_type::REPETITION); } gpuDecodeStream(s->def, s, cur_leaf_count, t, level_type::DEFINITION); SYNCWARP(); // because the rep and def streams are encoded seperately, we cannot request an exact // # of values to be decoded at once. we can only process the lowest # of decoded rep/def // levels we get. int actual_leaf_count = has_repetition ? min(s->lvl_count[level_type::REPETITION], s->lvl_count[level_type::DEFINITION]) : s->lvl_count[level_type::DEFINITION]; // process what we got back gpuUpdateValidityOffsetsAndRowIndices(actual_leaf_count, s, t); cur_leaf_count = actual_leaf_count + batch_size; SYNCWARP(); } } /** * @brief Process a batch of incoming repetition/definition level values to generate * per-nesting level output column size for this page. * * Each page represents one piece of the overall output column. The total output (cudf) * column sizes are the sum of the values in each individual page. * * @param[in] s The local page info * @param[in] target_input_value_count The # of repetition/definition levels to process up to * @param[in] t Thread index * @param[in] bounds_set Whether or not s->row_index_lower_bound, s->first_row and s->num_rows * have been computed for this page (they will only be set in the second/trim pass). */ static __device__ void gpuUpdatePageSizes(page_state_s *s, int32_t target_input_value_count, int t, bool bounds_set) { // max nesting depth of the column int max_depth = s->col.max_nesting_depth; // bool has_repetition = s->col.max_level[level_type::REPETITION] > 0 ? true : false; // how many input level values we've processed in the page so far int input_value_count = s->input_value_count; // how many leaf values we've processed in the page so far int input_leaf_count = s->input_leaf_count; // how many rows we've processed in the page so far int input_row_count = s->input_row_count; while (input_value_count < target_input_value_count) { int start_depth, end_depth, d; get_nesting_bounds( start_depth, end_depth, d, s, input_value_count, target_input_value_count, t); // count rows and leaf values int is_new_row = start_depth == 0 ? 1 : 0; uint32_t warp_row_count_mask = BALLOT(is_new_row); int is_new_leaf = (d >= s->page.nesting[max_depth - 1].max_def_level) ? 1 : 0; uint32_t warp_leaf_count_mask = BALLOT(is_new_leaf); // is this thread within row bounds? on the first pass we don't know the bounds, so we will be // computing the full size of the column. on the second pass, we will know our actual row // bounds, so the computation will cap sizes properly. int in_row_bounds = 1; if (bounds_set) { // absolute row index int32_t thread_row_index = input_row_count + ((__popc(warp_row_count_mask & ((1 << t) - 1)) + is_new_row) - 1); in_row_bounds = thread_row_index >= s->row_index_lower_bound && thread_row_index < (s->first_row + s->num_rows) ? 1 : 0; uint32_t row_bounds_mask = BALLOT(in_row_bounds); int first_thread_in_range = __ffs(row_bounds_mask) - 1; // if we've found the beginning of the first row, mark down the position // in the def/repetition buffer (skipped_values) and the data buffer (skipped_leaf_values) if (!t && first_thread_in_range >= 0 && s->page.skipped_values < 0) { // how many values we've skipped in the rep/def levels s->page.skipped_values = input_value_count + first_thread_in_range; // how many values we've skipped in the actual data stream s->page.skipped_leaf_values = input_leaf_count + __popc(warp_leaf_count_mask & ((1 << first_thread_in_range) - 1)); } } // increment counts across all nesting depths for (int s_idx = 0; s_idx < max_depth; s_idx++) { // if we are within the range of nesting levels we should be adding value indices for int in_nesting_bounds = (s_idx >= start_depth && s_idx <= end_depth && in_row_bounds) ? 1 : 0; uint32_t count_mask = BALLOT(in_nesting_bounds); if (!t) { s->page.nesting[s_idx].size += __popc(count_mask); } } input_value_count += min(32, (target_input_value_count - input_value_count)); input_row_count += __popc(warp_row_count_mask); input_leaf_count += __popc(warp_leaf_count_mask); } // update final page value count if (!t) { s->input_value_count = target_input_value_count; s->input_leaf_count = input_leaf_count; s->input_row_count = input_row_count; } } /** * @brief Kernel for computing per-page column size information for all nesting levels. * * This function will write out the size field for each level of nesting. * * @param[in,out] pages List of pages * @param[in] chunks List of column chunks * @param[in] num_chunks Number of column chunks * @param[in] min_row Row index to start reading at * @param[in] num_rows Maximum number of rows to read * @param[in] num_chunks Number of column chunks * @param[in] trim_pass Whether or not this is the trim pass. We first have to compute * the full size information of every page before we come through in a second (trim) pass * to determine what subset of rows in this page we should be reading. */ // blockDim {NTHREADS,1,1} extern "C" __global__ void __launch_bounds__(NTHREADS) gpuComputePageSizes(PageInfo *pages, ColumnChunkDesc const *chunks, size_t min_row, size_t num_rows, int32_t num_chunks, bool trim_pass) { __shared__ __align__(16) page_state_s state_g; page_state_s *const s = &state_g; int page_idx = blockIdx.x; int t = threadIdx.x; PageInfo *pp = &pages[page_idx]; if (!setupLocalPageInfo( s, pp, chunks, trim_pass ? min_row : 0, trim_pass ? num_rows : INT_MAX, num_chunks)) { return; } // zero sizes int d = 0; while (d < s->page.num_nesting_levels) { if (d + t < s->page.num_nesting_levels) { s->page.nesting[d + t].size = 0; } d += blockDim.x; } if (!t) { s->page.skipped_values = -1; s->page.skipped_leaf_values = -1; s->input_row_count = 0; s->input_value_count = 0; // if this isn't the trim pass, make sure we visit absolutely everything if (!trim_pass) { s->first_row = 0; s->num_rows = INT_MAX; s->row_index_lower_bound = -1; } } __syncthreads(); bool has_repetition = s->col.max_level[level_type::REPETITION] > 0; // optimization : it might be useful to have a version of gpuDecodeStream that could go // wider than 1 warp. Currently it only only uses 1 warp so that it can overlap work // with the value decoding step when in the actual value decoding kernel. however during // this preprocess step we have no such limits - we could go as wide as NTHREADS if (t < 32) { constexpr int batch_size = 32; int target_input_count = batch_size; while (!s->error && s->input_value_count < s->num_input_values) { // decode repetition and definition levels. these will attempt to decode at // least up to the target, but may decode a few more. if (has_repetition) { gpuDecodeStream(s->rep, s, target_input_count, t, level_type::REPETITION); } gpuDecodeStream(s->def, s, target_input_count, t, level_type::DEFINITION); SYNCWARP(); // we may have decoded different amounts from each stream, so only process what we've been int actual_input_count = has_repetition ? min(s->lvl_count[level_type::REPETITION], s->lvl_count[level_type::DEFINITION]) : s->lvl_count[level_type::DEFINITION]; // process what we got back gpuUpdatePageSizes(s, actual_input_count, t, trim_pass); target_input_count = actual_input_count + batch_size; SYNCWARP(); } } // update # rows in the actual page if (!t) { pp->num_rows = s->page.nesting[0].size; pp->skipped_values = s->page.skipped_values; pp->skipped_leaf_values = s->page.skipped_leaf_values; } } /** * @brief Kernel for co the column data stored in the pages * * This function will write the page data and the page data's validity to the * output specified in the page's column chunk. If necessary, additional * conversion will be performed to translate from the Parquet datatype to * desired output datatype (ex. 32-bit to 16-bit, string to hash). * * @param[in] pages List of pages * @param[in,out] chunks List of column chunks * @param[in] min_row Row index to start reading at * @param[in] num_rows Maximum number of rows to read * @param[in] num_chunks Number of column chunks */ // blockDim {NTHREADS,1,1} extern "C" __global__ void __launch_bounds__(NTHREADS) gpuDecodePageData(PageInfo *pages, ColumnChunkDesc const *chunks, size_t min_row, size_t num_rows, int32_t num_chunks) { __shared__ __align__(16) page_state_s state_g; page_state_s *const s = &state_g; int page_idx = blockIdx.x; int t = threadIdx.x; int out_thread0; if (!setupLocalPageInfo(s, &pages[page_idx], chunks, min_row, num_rows, num_chunks)) { return; } if (s->dict_base) { out_thread0 = (s->dict_bits > 0) ? 64 : 32; } else { out_thread0 = ((s->col.data_type & 7) == BOOLEAN || (s->col.data_type & 7) == BYTE_ARRAY) ? 64 : 32; } uint32_t skipped_leaf_values = s->page.skipped_leaf_values; while (!s->error && (s->input_value_count < s->num_input_values || s->out_pos < s->nz_count)) { int target_pos; int out_pos = s->out_pos; if (t < out_thread0) { target_pos = min(out_pos + 2 * (NTHREADS - out_thread0), s->nz_count + (NTHREADS - out_thread0)); } else { target_pos = min(s->nz_count, out_pos + NTHREADS - out_thread0); if (out_thread0 > 32) { target_pos = min(target_pos, s->dict_pos); } } __syncthreads(); if (t < 32) { // decode repetition and definition levels. // - update validity vectors // - updates offsets (for nested columns) // - produces non-NULL value indices in s->nz_idx for subsequent decoding gpuDecodeLevels(s, target_pos, t); } else if (t < out_thread0) { uint32_t src_target_pos = target_pos + skipped_leaf_values; // WARP1: Decode dictionary indices, booleans or string positions if (s->dict_base) { src_target_pos = gpuDecodeDictionaryIndices(s, src_target_pos, t & 0x1f); } else if ((s->col.data_type & 7) == BOOLEAN) { src_target_pos = gpuDecodeRleBooleans(s, src_target_pos, t & 0x1f); } else if ((s->col.data_type & 7) == BYTE_ARRAY) { gpuInitStringDescriptors(s, src_target_pos, t & 0x1f); } if (t == 32) { *(volatile int32_t *)&s->dict_pos = src_target_pos; } } else { // WARP1..WARP3: Decode values int dtype = s->col.data_type & 7; out_pos += t - out_thread0; uint32_t src_pos = out_pos + skipped_leaf_values; int output_value_idx = s->nz_idx[rolling_index(out_pos)]; if (out_pos < target_pos && output_value_idx >= 0 && output_value_idx < s->num_input_values) { // nesting level that is storing actual leaf values int leaf_level_index = s->col.max_nesting_depth - 1; uint32_t dtype_len = s->dtype_len; void *dst = s->page.nesting[leaf_level_index].data_out + static_cast<size_t>(output_value_idx) * dtype_len; if (dtype == BYTE_ARRAY) gpuOutputString(s, src_pos, dst); else if (dtype == BOOLEAN) gpuOutputBoolean(s, src_pos, static_cast<uint8_t *>(dst)); else if (s->col.converted_type == DECIMAL) gpuOutputDecimal(s, src_pos, static_cast<double *>(dst), dtype); else if (dtype == INT96) gpuOutputInt96Timestamp(s, src_pos, static_cast<int64_t *>(dst)); else if (dtype_len == 8) { if (s->ts_scale) gpuOutputInt64Timestamp(s, src_pos, static_cast<int64_t *>(dst)); else gpuOutputFast(s, src_pos, static_cast<uint2 *>(dst)); } else if (dtype_len == 4) gpuOutputFast(s, src_pos, static_cast<uint32_t *>(dst)); else gpuOutputGeneric(s, src_pos, static_cast<uint8_t *>(dst), dtype_len); } if (t == out_thread0) { *(volatile int32_t *)&s->out_pos = target_pos; } } __syncthreads(); } } struct chunk_row_output_iter { PageInfo *p; using value_type = size_type; using difference_type = size_type; using pointer = size_type *; using reference = size_type &; using iterator_category = thrust::output_device_iterator_tag; chunk_row_output_iter operator+ __host__ __device__(int i) { return chunk_row_output_iter{p + i}; } void operator++ __host__ __device__() { p++; } reference operator[] __device__(int i) { return p[i].chunk_row; } reference operator*__device__() { return p->chunk_row; } void operator= __device__(value_type v) { p->chunk_row = v; } }; struct start_offset_output_iterator { PageInfo *pages; int *page_indices; int cur_index; int src_col_schema; int nesting_depth; int empty = 0; using value_type = size_type; using difference_type = size_type; using pointer = size_type *; using reference = size_type &; using iterator_category = thrust::output_device_iterator_tag; start_offset_output_iterator operator+ __host__ __device__(int i) { return start_offset_output_iterator{ pages, page_indices, cur_index + i, src_col_schema, nesting_depth}; } void operator++ __host__ __device__() { cur_index++; } reference operator[] __device__(int i) { return dereference(cur_index + i); } reference operator*__device__() { return dereference(cur_index); } private: reference __device__ dereference(int index) { PageInfo const &p = pages[page_indices[index]]; if (p.src_col_schema != src_col_schema || p.flags & PAGEINFO_FLAGS_DICTIONARY) { return empty; } return p.nesting[nesting_depth].page_start_value; } }; /** * @copydoc cudf::io::parquet::gpu::PreprocessColumnData */ hipError_t PreprocessColumnData(hostdevice_vector<PageInfo> &pages, hostdevice_vector<ColumnChunkDesc> const &chunks, std::vector<input_column_info> &input_columns, std::vector<cudf::io::detail::column_buffer> &output_columns, size_t num_rows, size_t min_row, hipStream_t stream, rmm::mr::device_memory_resource *mr) { dim3 dim_block(NTHREADS, 1); dim3 dim_grid(pages.size(), 1); // 1 threadblock per page // computes: // PageNestingInfo::size for each level of nesting, for each page. // The output from this does not take row bounds (num_rows, min_row) into account hipLaunchKernelGGL(( gpuComputePageSizes), dim3(dim_grid), dim3(dim_block), 0, stream, pages.device_ptr(), chunks.device_ptr(), min_row, num_rows, chunks.size(), false); CUDA_TRY(hipStreamSynchronize(stream)); // computes: // PageInfo::chunk_row for all pages auto key_input = thrust::make_transform_iterator( pages.device_ptr(), [] __device__(PageInfo const &page) { return page.chunk_idx; }); auto page_input = thrust::make_transform_iterator( pages.device_ptr(), [] __device__(PageInfo const &page) { return page.num_rows; }); thrust::exclusive_scan_by_key(rmm::exec_policy(stream)->on(stream), key_input, key_input + pages.size(), page_input, chunk_row_output_iter{pages.device_ptr()}); // computes: // PageNestingInfo::size for each level of nesting, for each page, taking row bounds into account. // PageInfo::skipped_values, which tells us where to start decoding in the input hipLaunchKernelGGL(( gpuComputePageSizes), dim3(dim_grid), dim3(dim_block), 0, stream, pages.device_ptr(), chunks.device_ptr(), min_row, num_rows, chunks.size(), true); // retrieve pages back (PageInfo::num_rows has been set. if we don't bring it // back, this value will get overwritten later on). pages.device_to_host(stream, true); // ordering of pages is by input column schema, repeated across row groups. so // if we had 3 columns, each with 2 pages, and 1 row group, our schema values might look like // // 1, 1, 2, 2, 3, 3 // // However, if we had more than one row group, the pattern would be // // 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3 // ^ row group 0 | // ^ row group 1 // // To use exclusive_scan_by_key, the ordering we actually want is // // 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3 // // We also need to preserve key-relative page ordering, so we need to use a stable sort. rmm::device_uvector<int> page_keys(pages.size(), stream); rmm::device_uvector<int> page_index(pages.size(), stream); { thrust::transform(rmm::exec_policy(stream)->on(stream), pages.device_ptr(), pages.device_ptr() + pages.size(), page_keys.begin(), [] __device__(PageInfo const &page) { return page.src_col_schema; }); thrust::sequence(rmm::exec_policy(stream)->on(stream), page_index.begin(), page_index.end()); thrust::stable_sort_by_key(rmm::exec_policy(stream)->on(stream), page_keys.begin(), page_keys.end(), page_index.begin(), thrust::less<int>()); } // compute output column sizes by examining the pages of the -input- columns for (size_t idx = 0; idx < input_columns.size(); idx++) { auto const &input_col = input_columns[idx]; auto src_col_schema = input_col.schema_idx; size_t max_depth = input_col.nesting_depth(); auto *cols = &output_columns; for (size_t l_idx = 0; l_idx < input_col.nesting_depth(); l_idx++) { auto &out_buf = (*cols)[input_col.nesting[l_idx]]; cols = &out_buf.children; // size iterator. indexes pages by sorted order auto size_input = thrust::make_transform_iterator( page_index.begin(), [src_col_schema, l_idx, pages = pages.device_ptr()] __device__(int index) { auto const &page = pages[index]; if (page.src_col_schema != src_col_schema || page.flags & PAGEINFO_FLAGS_DICTIONARY) { return 0; } return page.nesting[l_idx].size; }); // compute column size. // for struct columns, higher levels of the output columns are shared between input // columns. so don't compute any given level more than once. if (out_buf.size == 0) { int size = thrust::reduce( rmm::exec_policy(stream)->on(stream), size_input, size_input + pages.size()); // if this is a list column add 1 for non-leaf levels for the terminating offset if (out_buf.type.id() == type_id::LIST && l_idx < max_depth) { size++; } // allocate out_buf.create(size, stream, mr); } // compute per-page start offset thrust::exclusive_scan_by_key(rmm::exec_policy(stream)->on(stream), page_keys.begin(), page_keys.end(), size_input, start_offset_output_iterator{pages.device_ptr(), page_index.begin(), 0, static_cast<int>(src_col_schema), static_cast<int>(l_idx)}); } } return hipSuccess; } /** * @copydoc cudf::io::parquet::gpu::DecodePageData */ hipError_t __host__ DecodePageData(hostdevice_vector<PageInfo> &pages, hostdevice_vector<ColumnChunkDesc> const &chunks, size_t num_rows, size_t min_row, hipStream_t stream) { dim3 dim_block(NTHREADS, 1); dim3 dim_grid(pages.size(), 1); // 1 threadblock per page hipLaunchKernelGGL(( gpuDecodePageData), dim3(dim_grid), dim3(dim_block), 0, stream, pages.device_ptr(), chunks.device_ptr(), min_row, num_rows, chunks.size()); return hipSuccess; } } // namespace gpu } // namespace parquet } // namespace io } // namespace cudf
f28fb6809b4882ea949c28914115a1f6df9796b1.cu
/* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <rmm/thrust_rmm_allocator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/iterator/transform_output_iterator.h> #include <thrust/scan.h> #include <thrust/tuple.h> #include <cudf/detail/utilities/release_assert.cuh> #include <cudf/utilities/bit.hpp> #include <io/utilities/block_utils.cuh> #include <io/utilities/column_buffer.hpp> #include <io/parquet/parquet_gpu.hpp> #define LOG2_NTHREADS (5 + 2) #define NTHREADS (1 << LOG2_NTHREADS) #define NZ_BFRSZ (NTHREADS * 2) inline __device__ uint32_t rotl32(uint32_t x, uint32_t r) { return __funnelshift_l(x, x, r); // (x << r) | (x >> (32 - r)); } inline __device__ int rolling_index(int index) { return index & (NZ_BFRSZ - 1); } namespace cudf { namespace io { namespace parquet { namespace gpu { struct page_state_s { const uint8_t *data_start; const uint8_t *data_end; const uint8_t *dict_base; // ptr to dictionary page data int32_t dict_size; // size of dictionary data int32_t first_row; // First row in page to output int32_t num_rows; // Rows in page to decode (including rows to be skipped) int32_t first_output_value; // First value in page to output int32_t num_input_values; // total # of input/level values in the page int32_t dtype_len; // Output data type length int32_t dtype_len_in; // Can be larger than dtype_len if truncating 32-bit into 8-bit int32_t dict_bits; // # of bits to store dictionary indices uint32_t dict_run; int32_t dict_val; uint32_t initial_rle_run[NUM_LEVEL_TYPES]; // [def,rep] int32_t initial_rle_value[NUM_LEVEL_TYPES]; // [def,rep] int32_t error; PageInfo page; ColumnChunkDesc col; // (leaf) value decoding int32_t nz_count; // number of valid entries in nz_idx (write position in circular buffer) int32_t dict_pos; // write position of dictionary indices int32_t out_pos; // read position of final output int32_t ts_scale; // timestamp scale: <0: divide by -ts_scale, >0: multiply by ts_scale uint32_t nz_idx[NZ_BFRSZ]; // circular buffer of non-null value positions uint32_t dict_idx[NZ_BFRSZ]; // Dictionary index, boolean, or string offset values uint32_t str_len[NZ_BFRSZ]; // String length for plain encoding of strings // repetition/definition level decoding int32_t input_value_count; // how many values of the input we've processed int32_t input_row_count; // how many rows of the input we've processed int32_t input_leaf_count; // how many leaf values of the input we've processed uint32_t rep[NZ_BFRSZ]; // circular buffer of repetition level values uint32_t def[NZ_BFRSZ]; // circular buffer of definition level values const uint8_t *lvl_start[NUM_LEVEL_TYPES]; // [def,rep] int32_t lvl_count[NUM_LEVEL_TYPES]; // how many of each of the streams we've decoded int32_t row_index_lower_bound; // lower bound of row indices we should process }; /** * @brief Computes a 32-bit hash when given a byte stream and range. * * MurmurHash3_32 implementation from * https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp * * MurmurHash3 was written by Austin Appleby, and is placed in the public * domain. The author hereby disclaims copyright to this source code. * * @param[in] key The input data to hash * @param[in] len The length of the input data * @param[in] seed An initialization value * * @return The hash value */ __device__ uint32_t device_str2hash32(const char *key, size_t len, uint32_t seed = 33) { const uint8_t *p = reinterpret_cast<const uint8_t *>(key); uint32_t h1 = seed, k1; const uint32_t c1 = 0xcc9e2d51; const uint32_t c2 = 0x1b873593; int l = len; // body while (l >= 4) { k1 = p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); k1 *= c1; k1 = rotl32(k1, 15); k1 *= c2; h1 ^= k1; h1 = rotl32(h1, 13); h1 = h1 * 5 + 0xe6546b64; p += 4; l -= 4; } // tail k1 = 0; switch (l) { case 3: k1 ^= p[2] << 16; case 2: k1 ^= p[1] << 8; case 1: k1 ^= p[0]; k1 *= c1; k1 = rotl32(k1, 15); k1 *= c2; h1 ^= k1; } // finalization h1 ^= len; h1 ^= h1 >> 16; h1 *= 0x85ebca6b; h1 ^= h1 >> 13; h1 *= 0xc2b2ae35; h1 ^= h1 >> 16; return h1; } /** * @brief Read a 32-bit varint integer * * @param[in,out] cur The current data position, updated after the read * @param[in] end The end data position * * @return The 32-bit value read */ inline __device__ uint32_t get_vlq32(const uint8_t *&cur, const uint8_t *end) { uint32_t v = *cur++; if (v >= 0x80 && cur < end) { v = (v & 0x7f) | ((*cur++) << 7); if (v >= (0x80 << 7) && cur < end) { v = (v & ((0x7f << 7) | 0x7f)) | ((*cur++) << 14); if (v >= (0x80 << 14) && cur < end) { v = (v & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | ((*cur++) << 21); if (v >= (0x80 << 21) && cur < end) { v = (v & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) | ((*cur++) << 28); } } } } return v; } /** * @brief Parse the beginning of the level section (definition or repetition), * initializes the initial RLE run & value, and returns the section length * * @param[in,out] s The page state * @param[in] cur The current data position * @param[in] end The end of the data * @param[in] level_bits The bits required * * @return The length of the section */ __device__ uint32_t InitLevelSection(page_state_s *s, const uint8_t *cur, const uint8_t *end, level_type lvl) { int32_t len; int level_bits = s->col.level_bits[lvl]; int encoding = lvl == level_type::DEFINITION ? s->page.definition_level_encoding : s->page.repetition_level_encoding; if (level_bits == 0) { len = 0; s->initial_rle_run[lvl] = s->page.num_input_values * 2; // repeated value s->initial_rle_value[lvl] = 0; s->lvl_start[lvl] = cur; } else if (encoding == RLE) { if (cur + 4 < end) { uint32_t run; len = 4 + (cur[0]) + (cur[1] << 8) + (cur[2] << 16) + (cur[3] << 24); cur += 4; run = get_vlq32(cur, end); s->initial_rle_run[lvl] = run; if (!(run & 1)) { int v = (cur < end) ? cur[0] : 0; cur++; if (level_bits > 8) { v |= ((cur < end) ? cur[0] : 0) << 8; cur++; } s->initial_rle_value[lvl] = v; } s->lvl_start[lvl] = cur; if (cur > end) { s->error = 2; } } else { len = 0; s->error = 2; } } else if (encoding == BIT_PACKED) { len = (s->page.num_input_values * level_bits + 7) >> 3; s->initial_rle_run[lvl] = ((s->page.num_input_values + 7) >> 3) * 2 + 1; // literal run s->initial_rle_value[lvl] = 0; s->lvl_start[lvl] = cur; } else { s->error = 3; len = 0; } return (uint32_t)len; } /** * @brief Decode values out of a definition or repetition stream * * @param[in,out] s Page state input/output * @param[in] t target_count Target count of stream values on output * @param[in] t Warp0 thread ID (0..31) * @param[in] lvl The level type we are decoding - DEFINITION or REPETITION */ __device__ void gpuDecodeStream( uint32_t *output, page_state_s *s, int32_t target_count, int t, level_type lvl) { const uint8_t *cur_def = s->lvl_start[lvl]; const uint8_t *end = s->data_start; uint32_t level_run = s->initial_rle_run[lvl]; int32_t level_val = s->initial_rle_value[lvl]; int level_bits = s->col.level_bits[lvl]; int32_t num_input_values = s->num_input_values; int32_t value_count = s->lvl_count[lvl]; int32_t batch_coded_count = 0; while (value_count < target_count && value_count < num_input_values) { int batch_len; if (level_run <= 1) { // Get a new run symbol from the byte stream int sym_len = 0; if (!t) { const uint8_t *cur = cur_def; if (cur < end) { level_run = get_vlq32(cur, end); } if (!(level_run & 1)) { if (cur < end) level_val = cur[0]; cur++; if (level_bits > 8) { if (cur < end) level_val |= cur[0] << 8; cur++; } } if (cur > end || level_run <= 1) { s->error = 0x10; } sym_len = (int32_t)(cur - cur_def); __threadfence_block(); } sym_len = SHFL0(sym_len); level_val = SHFL0(level_val); level_run = SHFL0(level_run); cur_def += sym_len; } if (s->error) { break; } batch_len = min(num_input_values - value_count, 32); if (level_run & 1) { // Literal run int batch_len8; batch_len = min(batch_len, (level_run >> 1) * 8); batch_len8 = (batch_len + 7) >> 3; if (t < batch_len) { int bitpos = t * level_bits; const uint8_t *cur = cur_def + (bitpos >> 3); bitpos &= 7; if (cur < end) level_val = cur[0]; cur++; if (level_bits > 8 - bitpos && cur < end) { level_val |= cur[0] << 8; cur++; if (level_bits > 16 - bitpos && cur < end) level_val |= cur[0] << 16; } level_val = (level_val >> bitpos) & ((1 << level_bits) - 1); } level_run -= batch_len8 * 2; cur_def += batch_len8 * level_bits; } else { // Repeated value batch_len = min(batch_len, level_run >> 1); level_run -= batch_len * 2; } if (t < batch_len) { int idx = value_count + t; output[idx & (NZ_BFRSZ - 1)] = level_val; } batch_coded_count += batch_len; value_count += batch_len; } // update the stream info if (!t) { s->lvl_start[lvl] = cur_def; s->initial_rle_run[lvl] = level_run; s->initial_rle_value[lvl] = level_val; s->lvl_count[lvl] = value_count; } } /** * @brief Performs RLE decoding of dictionary indexes * * @param[in,out] s Page state input/output * @param[in] target_pos Target index position in dict_idx buffer (may exceed this value by up to * 31) * @param[in] t Warp1 thread ID (0..31) * * @return The new output position */ __device__ int gpuDecodeDictionaryIndices(volatile page_state_s *s, int target_pos, int t) { const uint8_t *end = s->data_end; int dict_bits = s->dict_bits; int pos = s->dict_pos; while (pos < target_pos) { int is_literal, batch_len; if (!t) { uint32_t run = s->dict_run; const uint8_t *cur = s->data_start; if (run <= 1) { run = (cur < end) ? get_vlq32(cur, end) : 0; if (!(run & 1)) { // Repeated value int bytecnt = (dict_bits + 7) >> 3; if (cur + bytecnt <= end) { int32_t run_val = cur[0]; if (bytecnt > 1) { run_val |= cur[1] << 8; if (bytecnt > 2) { run_val |= cur[2] << 16; if (bytecnt > 3) { run_val |= cur[3] << 24; } } } s->dict_val = run_val & ((1 << dict_bits) - 1); } cur += bytecnt; } } if (run & 1) { // Literal batch: must output a multiple of 8, except for the last batch int batch_len_div8; batch_len = max(min(32, (int)(run >> 1) * 8), 1); batch_len_div8 = (batch_len + 7) >> 3; run -= batch_len_div8 * 2; cur += batch_len_div8 * dict_bits; } else { batch_len = max(min(32, (int)(run >> 1)), 1); run -= batch_len * 2; } s->dict_run = run; s->data_start = cur; is_literal = run & 1; __threadfence_block(); } SYNCWARP(); is_literal = SHFL0(is_literal); batch_len = SHFL0(batch_len); if (t < batch_len) { int dict_idx = s->dict_val; if (is_literal) { int32_t ofs = (t - ((batch_len + 7) & ~7)) * dict_bits; const uint8_t *p = s->data_start + (ofs >> 3); ofs &= 7; if (p < end) { uint32_t c = 8 - ofs; dict_idx = (*p++) >> ofs; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; c += 8; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; c += 8; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; } } } dict_idx &= (1 << dict_bits) - 1; } } s->dict_idx[(pos + t) & (NZ_BFRSZ - 1)] = dict_idx; } pos += batch_len; } return pos; } /** * @brief Performs RLE decoding of dictionary indexes, for when dict_size=1 * * @param[in,out] s Page state input/output * @param[in] target_pos Target write position * @param[in] t Thread ID * * @return The new output position */ __device__ int gpuDecodeRleBooleans(volatile page_state_s *s, int target_pos, int t) { const uint8_t *end = s->data_end; int pos = s->dict_pos; while (pos < target_pos) { int is_literal, batch_len; if (!t) { uint32_t run = s->dict_run; const uint8_t *cur = s->data_start; if (run <= 1) { run = (cur < end) ? get_vlq32(cur, end) : 0; if (!(run & 1)) { // Repeated value s->dict_val = (cur < end) ? cur[0] & 1 : 0; cur++; } } if (run & 1) { // Literal batch: must output a multiple of 8, except for the last batch int batch_len_div8; batch_len = max(min(32, (int)(run >> 1) * 8), 1); if (batch_len >= 8) { batch_len &= ~7; } batch_len_div8 = (batch_len + 7) >> 3; run -= batch_len_div8 * 2; cur += batch_len_div8; } else { batch_len = max(min(32, (int)(run >> 1)), 1); run -= batch_len * 2; } s->dict_run = run; s->data_start = cur; is_literal = run & 1; __threadfence_block(); } SYNCWARP(); is_literal = SHFL0(is_literal); batch_len = SHFL0(batch_len); if (t < batch_len) { int dict_idx; if (is_literal) { int32_t ofs = t - ((batch_len + 7) & ~7); const uint8_t *p = s->data_start + (ofs >> 3); dict_idx = (p < end) ? (p[0] >> (ofs & 7u)) & 1 : 0; } else { dict_idx = s->dict_val; } s->dict_idx[(pos + t) & (NZ_BFRSZ - 1)] = dict_idx; } pos += batch_len; } return pos; } /** * @brief Parses the length and position of strings * * @param[in,out] s Page state input/output * @param[in] target_pos Target output position * @param[in] t Thread ID * * @return The new output position */ __device__ void gpuInitStringDescriptors(volatile page_state_s *s, int target_pos, int t) { int pos = s->dict_pos; // This step is purely serial if (!t) { const uint8_t *cur = s->data_start; int dict_size = s->dict_size; int k = s->dict_val; while (pos < target_pos) { int len; if (k + 4 <= dict_size) { len = (cur[k]) | (cur[k + 1] << 8) | (cur[k + 2] << 16) | (cur[k + 3] << 24); k += 4; if (k + len > dict_size) { len = 0; } } else { len = 0; } s->dict_idx[pos & (NZ_BFRSZ - 1)] = k; s->str_len[pos & (NZ_BFRSZ - 1)] = len; k += len; pos++; } s->dict_val = k; __threadfence_block(); } } /** * @brief Output a string descriptor * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dstv Pointer to row output data (string descriptor or 32-bit hash) */ inline __device__ void gpuOutputString(volatile page_state_s *s, int src_pos, void *dstv) { const char *ptr = NULL; size_t len = 0; if (s->dict_base) { // String dictionary uint32_t dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] * sizeof(nvstrdesc_s) : 0; if (dict_pos < (uint32_t)s->dict_size) { const nvstrdesc_s *src = reinterpret_cast<const nvstrdesc_s *>(s->dict_base + dict_pos); ptr = src->ptr; len = src->count; } } else { // Plain encoding uint32_t dict_pos = s->dict_idx[src_pos & (NZ_BFRSZ - 1)]; if (dict_pos <= (uint32_t)s->dict_size) { ptr = reinterpret_cast<const char *>(s->data_start + dict_pos); len = s->str_len[src_pos & (NZ_BFRSZ - 1)]; } } if (s->dtype_len == 4) { // Output hash *static_cast<uint32_t *>(dstv) = device_str2hash32(ptr, len); } else { // Output string descriptor nvstrdesc_s *dst = static_cast<nvstrdesc_s *>(dstv); dst->ptr = ptr; dst->count = len; } } /** * @brief Output a boolean * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ inline __device__ void gpuOutputBoolean(volatile page_state_s *s, int src_pos, uint8_t *dst) { *dst = s->dict_idx[src_pos & (NZ_BFRSZ - 1)]; } /** * @brief Store a 32-bit data element * * @param[out] dst ptr to output * @param[in] src8 raw input bytes * @param[in] dict_pos byte position in dictionary * @param[in] dict_size size of dictionary */ inline __device__ void gpuStoreOutput(uint32_t *dst, const uint8_t *src8, uint32_t dict_pos, uint32_t dict_size) { uint32_t bytebuf; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos < dict_size) { bytebuf = *reinterpret_cast<const uint32_t *>(src8 + dict_pos); if (ofs) { uint32_t bytebufnext = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 4); bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs); } } else { bytebuf = 0; } *dst = bytebuf; } /** * @brief Store a 64-bit data element * * @param[out] dst ptr to output * @param[in] src8 raw input bytes * @param[in] dict_pos byte position in dictionary * @param[in] dict_size size of dictionary */ inline __device__ void gpuStoreOutput(uint2 *dst, const uint8_t *src8, uint32_t dict_pos, uint32_t dict_size) { uint2 v; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos < dict_size) { v.x = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 0); v.y = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 4); if (ofs) { uint32_t next = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 8); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, next, ofs); } } else { v.x = v.y = 0; } *dst = v; } /** * @brief Convert an INT96 Spark timestamp to 64-bit timestamp * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ inline __device__ void gpuOutputInt96Timestamp(volatile page_state_s *s, int src_pos, int64_t *dst) { const uint8_t *src8; uint32_t dict_pos, dict_size = s->dict_size, ofs; int64_t ts; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] : 0; src8 = s->dict_base; } else { // Plain dict_pos = src_pos; src8 = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos + 4 < dict_size) { uint3 v; int64_t nanos, secs, days; v.x = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 0); v.y = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 4); v.z = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 8); if (ofs) { uint32_t next = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 12); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, v.z, ofs); v.z = __funnelshift_r(v.z, next, ofs); } nanos = v.y; nanos <<= 32; nanos |= v.x; // Convert from Julian day at noon to UTC seconds days = static_cast<int32_t>(v.z); secs = (days - 2440588) * (24 * 60 * 60); // TBD: Should be noon instead of midnight, but this matches pyarrow if (s->col.ts_clock_rate) ts = (secs * s->col.ts_clock_rate) + nanos / (1000000000 / s->col.ts_clock_rate); // Output to desired clock rate else ts = (secs * 1000000000) + nanos; } else { ts = 0; } *dst = ts; } /** * @brief Output a 64-bit timestamp * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ inline __device__ void gpuOutputInt64Timestamp(volatile page_state_s *s, int src_pos, int64_t *dst) { const uint8_t *src8; uint32_t dict_pos, dict_size = s->dict_size, ofs; int64_t ts; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] : 0; src8 = s->dict_base; } else { // Plain dict_pos = src_pos; src8 = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos + 4 < dict_size) { uint2 v; int64_t val; int32_t ts_scale; v.x = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 0); v.y = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 4); if (ofs) { uint32_t next = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 8); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, next, ofs); } val = v.y; val <<= 32; val |= v.x; // Output to desired clock rate ts_scale = s->ts_scale; if (ts_scale < 0) { // round towards negative infinity int sign = (val < 0); ts = ((val + sign) / -ts_scale) + sign; } else { ts = val * ts_scale; } } else { ts = 0; } *dst = ts; } /** * @brief Powers of 10 */ static const __device__ __constant__ double kPow10[40] = { 1.0, 1.e1, 1.e2, 1.e3, 1.e4, 1.e5, 1.e6, 1.e7, 1.e8, 1.e9, 1.e10, 1.e11, 1.e12, 1.e13, 1.e14, 1.e15, 1.e16, 1.e17, 1.e18, 1.e19, 1.e20, 1.e21, 1.e22, 1.e23, 1.e24, 1.e25, 1.e26, 1.e27, 1.e28, 1.e29, 1.e30, 1.e31, 1.e32, 1.e33, 1.e34, 1.e35, 1.e36, 1.e37, 1.e38, 1.e39, }; /** * @brief Output a decimal type ([INT32..INT128] + scale) as a 64-bit float * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data * @param[in] dtype Stored data type */ inline __device__ void gpuOutputDecimal(volatile page_state_s *s, int src_pos, double *dst, int dtype) { const uint8_t *dict; uint32_t dict_pos, dict_size = s->dict_size, dtype_len_in; int64_t i128_hi, i128_lo; int32_t scale; double d; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dtype_len_in = s->dtype_len_in; dict_pos *= dtype_len_in; // FIXME: Not very efficient (currently reading 1 byte at a time) -> need a variable-length // unaligned load utility function (both little-endian and big-endian versions) if (dtype == INT32) { int32_t lo32 = 0; for (unsigned int i = 0; i < dtype_len_in; i++) { uint32_t v = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; lo32 |= v << (i * 8); } i128_lo = lo32; i128_hi = lo32 >> 31; } else if (dtype == INT64) { int64_t lo64 = 0; for (unsigned int i = 0; i < dtype_len_in; i++) { uint64_t v = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; lo64 |= v << (i * 8); } i128_lo = lo64; i128_hi = lo64 >> 63; } else // if (dtype == FIXED_LENGTH_BYTE_ARRAY) { i128_lo = 0; for (unsigned int i = dtype_len_in - min(dtype_len_in, 8); i < dtype_len_in; i++) { uint32_t v = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; i128_lo = (i128_lo << 8) | v; } if (dtype_len_in > 8) { i128_hi = 0; for (unsigned int i = dtype_len_in - min(dtype_len_in, 16); i < dtype_len_in - 8; i++) { uint32_t v = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; i128_hi = (i128_hi << 8) | v; } if (dtype_len_in < 16) { i128_hi <<= 64 - (dtype_len_in - 8) * 8; i128_hi >>= 64 - (dtype_len_in - 8) * 8; } } else { if (dtype_len_in < 8) { i128_lo <<= 64 - dtype_len_in * 8; i128_lo >>= 64 - dtype_len_in * 8; } i128_hi = i128_lo >> 63; } } scale = s->col.decimal_scale; d = Int128ToDouble_rn(i128_lo, i128_hi); *dst = (scale < 0) ? (d * kPow10[min(-scale, 39)]) : (d / kPow10[min(scale, 39)]); } /** * @brief Output a small fixed-length value * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ template <typename T> inline __device__ void gpuOutputFast(volatile page_state_s *s, int src_pos, T *dst) { const uint8_t *dict; uint32_t dict_pos, dict_size = s->dict_size; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; gpuStoreOutput(dst, dict, dict_pos, dict_size); } /** * @brief Output a N-byte value * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst8 Pointer to row output data * @param[in] len Length of element */ static __device__ void gpuOutputGeneric(volatile page_state_s *s, int src_pos, uint8_t *dst8, int len) { const uint8_t *dict; uint32_t dict_pos, dict_size = s->dict_size; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; if (len & 3) { // Generic slow path for (unsigned int i = 0; i < len; i++) { dst8[i] = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; } } else { // Copy 4 bytes at a time const uint8_t *src8 = dict; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits for (unsigned int i = 0; i < len; i += 4) { uint32_t bytebuf; if (dict_pos < dict_size) { bytebuf = *reinterpret_cast<const uint32_t *>(src8 + dict_pos); if (ofs) { uint32_t bytebufnext = *reinterpret_cast<const uint32_t *>(src8 + dict_pos + 4); bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs); } } else { bytebuf = 0; } dict_pos += 4; *reinterpret_cast<uint32_t *>(dst8 + i) = bytebuf; } } } /** * @brief Sets up block-local page state information from the global pages. * * @param[in, out] s The local page state to be filled in * @param[in] p The global page to be copied from * @param[in] chunks The global list of chunks * @param[in] num_rows Maximum number of rows to read * @param[in] min_row crop all rows below min_row * @param[in] num_chunk Number of column chunks */ static __device__ bool setupLocalPageInfo(page_state_s *const s, PageInfo *p, ColumnChunkDesc const *chunks, size_t min_row, size_t num_rows, int32_t num_chunks) { int t = threadIdx.x; int chunk_idx; // Fetch page info // NOTE: Assumes that sizeof(PageInfo) <= 256 (and is padded to 4 bytes) if (t < sizeof(PageInfo) / sizeof(uint32_t)) { reinterpret_cast<uint32_t *>(&s->page)[t] = reinterpret_cast<const uint32_t *>(p)[t]; } __syncthreads(); if (s->page.flags & PAGEINFO_FLAGS_DICTIONARY) { return false; } // Fetch column chunk info chunk_idx = s->page.chunk_idx; if ((uint32_t)chunk_idx < (uint32_t)num_chunks) { // NOTE: Assumes that sizeof(ColumnChunkDesc) <= 256 (and is padded to 4 bytes) if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t)) { reinterpret_cast<uint32_t *>(&s->col)[t] = reinterpret_cast<const uint32_t *>(&chunks[chunk_idx])[t]; } } // zero nested value and valid counts int d = 0; while (d < s->page.num_nesting_levels) { if (d + t < s->page.num_nesting_levels) { s->page.nesting[d + t].valid_count = 0; s->page.nesting[d + t].value_count = 0; } d += blockDim.x; } __syncthreads(); if (!t) { s->error = 0; // our starting row (absolute index) is // col.start_row == absolute row index // page.chunk-row == relative row index within the chunk size_t page_start_row = s->col.start_row + s->page.chunk_row; // IMPORTANT : nested schemas can have 0 rows in a page but still have // values. The case is: // - On page N-1, the last row starts, with 2/6 values encoded // - On page N, the remaining 4/6 values are encoded, but there are no new rows. // if (s->page.num_input_values > 0 && s->page.num_rows > 0) { if (s->page.num_input_values > 0) { uint8_t *cur = s->page.page_data; uint8_t *end = cur + s->page.uncompressed_page_size; uint32_t dtype_len_out = s->col.data_type >> 3; s->ts_scale = 0; // Validate data type switch (s->col.data_type & 7) { case BOOLEAN: s->dtype_len = 1; // Boolean are stored as 1 byte on the output break; case INT32: case FLOAT: s->dtype_len = 4; break; case INT64: if (s->col.ts_clock_rate) { int32_t units = 0; if (s->col.converted_type == TIME_MICROS || s->col.converted_type == TIMESTAMP_MICROS) units = 1000000; else if (s->col.converted_type == TIME_MILLIS || s->col.converted_type == TIMESTAMP_MILLIS) units = 1000; if (units && units != s->col.ts_clock_rate) s->ts_scale = (s->col.ts_clock_rate < units) ? -(units / s->col.ts_clock_rate) : (s->col.ts_clock_rate / units); } // Fall through to DOUBLE case DOUBLE: s->dtype_len = 8; break; case INT96: s->dtype_len = 12; break; case BYTE_ARRAY: s->dtype_len = sizeof(nvstrdesc_s); break; default: // FIXED_LEN_BYTE_ARRAY: s->dtype_len = dtype_len_out; s->error |= (s->dtype_len <= 0); break; } // Special check for downconversions s->dtype_len_in = s->dtype_len; if (s->col.converted_type == DECIMAL) { s->dtype_len = 8; // Convert DECIMAL to 64-bit float } else if ((s->col.data_type & 7) == INT32) { if (dtype_len_out == 1) s->dtype_len = 1; // INT8 output if (dtype_len_out == 2) s->dtype_len = 2; // INT16 output } else if ((s->col.data_type & 7) == BYTE_ARRAY && dtype_len_out == 4) { s->dtype_len = 4; // HASH32 output } else if ((s->col.data_type & 7) == INT96) { s->dtype_len = 8; // Convert to 64-bit timestamp } // first row within the page to start reading if (page_start_row >= min_row) { s->first_row = 0; } else { s->first_row = (int32_t)min(min_row - page_start_row, (size_t)s->page.num_rows); } // # of rows within the page to read s->num_rows = s->page.num_rows; if ((page_start_row + s->first_row) + s->num_rows > min_row + num_rows) { s->num_rows = (int32_t)max((int64_t)(min_row + num_rows - (page_start_row + s->first_row)), INT64_C(0)); } // during the decoding step we need to offset the global output buffers // for each level of nesting so that we write to the section this page // is responsible for. // - for flat schemas, we can do this directly by using row counts // - for nested schemas, these offsets are computed during the preprocess step if (s->col.column_data_base != nullptr) { int max_depth = s->col.max_nesting_depth; for (int idx = 0; idx < max_depth; idx++) { PageNestingInfo *pni = &s->page.nesting[idx]; size_t output_offset; // schemas without lists if (s->col.max_level[level_type::REPETITION] == 0) { output_offset = page_start_row >= min_row ? page_start_row - min_row : 0; } // for schemas with lists, we've already got the exactly value precomputed else { output_offset = pni->page_start_value; } pni->data_out = static_cast<uint8_t *>(s->col.column_data_base[idx]); if (pni->data_out != nullptr) { // anything below max depth with a valid data pointer must be a list, so the // element size is the size of the offset type. uint32_t len = idx < max_depth - 1 ? sizeof(cudf::size_type) : s->dtype_len; pni->data_out += (output_offset * len); } pni->valid_map = s->col.valid_map_base[idx]; if (pni->valid_map != nullptr) { pni->valid_map += output_offset >> 5; pni->valid_map_offset = (int32_t)(output_offset & 0x1f); } } } s->first_output_value = 0; // Find the compressed size of repetition levels cur += InitLevelSection(s, cur, end, level_type::REPETITION); // Find the compressed size of definition levels cur += InitLevelSection(s, cur, end, level_type::DEFINITION); s->dict_bits = 0; s->dict_base = 0; s->dict_size = 0; switch (s->page.encoding) { case PLAIN_DICTIONARY: case RLE_DICTIONARY: // RLE-packed dictionary indices, first byte indicates index length in bits if (((s->col.data_type & 7) == BYTE_ARRAY) && (s->col.str_dict_index)) { // String dictionary: use index s->dict_base = reinterpret_cast<const uint8_t *>(s->col.str_dict_index); s->dict_size = s->col.page_info[0].num_input_values * sizeof(nvstrdesc_s); } else { s->dict_base = s->col.page_info[0].page_data; // dictionary is always stored in the first page s->dict_size = s->col.page_info[0].uncompressed_page_size; } s->dict_run = 0; s->dict_val = 0; s->dict_bits = (cur < end) ? *cur++ : 0; if (s->dict_bits > 32 || !s->dict_base) { s->error = (10 << 8) | s->dict_bits; } break; case PLAIN: s->dict_size = static_cast<int32_t>(end - cur); s->dict_val = 0; if ((s->col.data_type & 7) == BOOLEAN) { s->dict_run = s->dict_size * 2 + 1; } break; case RLE: s->dict_run = 0; break; default: s->error = 1; // Unsupported encoding break; } if (cur > end) { s->error = 1; } s->data_start = cur; s->data_end = end; } else { s->error = 1; } s->lvl_count[level_type::REPETITION] = 0; s->lvl_count[level_type::DEFINITION] = 0; s->nz_count = 0; s->num_input_values = s->page.num_input_values; s->dict_pos = 0; s->out_pos = 0; // handle row bounds (skip_rows, min_rows) s->input_row_count = s->first_row; // return the lower bound to compare (page-relative) thread row index against. Explanation: // In the case of nested schemas, rows can span page boundaries. That is to say, // we can encounter the first value for row X on page M, but the last value for page M // might not be the last value for row X. page M+1 (or further) may contain the last value. // // This means that the first values we encounter for a given page (M+1) may not belong to the // row indicated by chunk_row, but to the row before it that spanned page boundaries. If that // previous row is within the overall row bounds, include the values by allowing relative row // index -1 int max_row = (min_row + num_rows) - 1; if (min_row < page_start_row && max_row >= page_start_row - 1) { s->row_index_lower_bound = -1; } else { s->row_index_lower_bound = s->first_row; } // if we're in the decoding step, jump directly to the first // value we care about if (s->col.column_data_base != nullptr) { // for flat hierarchies, we haven't computed skipped_values yet, but we can do so trivially // now if (s->col.max_level[level_type::REPETITION] == 0) { s->page.skipped_values = s->first_row; s->page.skipped_leaf_values = s->first_row; } s->input_value_count = s->page.skipped_values; } else { s->input_value_count = 0; s->input_leaf_count = 0; s->page.skipped_values = -1; s->page.skipped_leaf_values = -1; } __threadfence_block(); } __syncthreads(); return true; } /** * @brief Store a validity mask containing value_count bits into the output validity buffer of the * page. * * @param[in,out] pni The page/nesting information to store the mask in. The validity map offset is * also updated * @param[in] valid_mask The validity mask to be stored * @param[in] value_count # of bits in the validity mask */ static __device__ void store_validity(PageNestingInfo *pni, uint32_t valid_mask, int32_t value_count) { int word_offset = pni->valid_map_offset / 32; int bit_offset = pni->valid_map_offset % 32; // if we fit entirely in the output word if (bit_offset + value_count <= 32) { uint32_t relevant_mask = static_cast<uint32_t>((static_cast<uint64_t>(1) << value_count) - 1); if (relevant_mask == ~0) { pni->valid_map[word_offset] = valid_mask; } else { atomicAnd(pni->valid_map + word_offset, ~(relevant_mask << bit_offset)); atomicOr(pni->valid_map + word_offset, (valid_mask & relevant_mask) << bit_offset); } } // we're going to spill over into the next word. // note : writing both values here is the lazy/slow way. we could be writing just // the first word and rolling the remaining bits over into the next call. // however, some basic performance tests shows almost no difference between these two // methods. More detailed performance testing might be worthwhile here. else { uint32_t bits_left = 32 - bit_offset; // first word. strip bits_left bits off the beginning and store that uint32_t relevant_mask = ((1 << bits_left) - 1); uint32_t mask_word0 = valid_mask & relevant_mask; atomicAnd(pni->valid_map + word_offset, ~(relevant_mask << bit_offset)); atomicOr(pni->valid_map + word_offset, mask_word0 << bit_offset); // second word. strip the remainder of the bits off the end and store that relevant_mask = ((1 << (value_count - bits_left)) - 1); uint32_t mask_word1 = valid_mask & (relevant_mask << bits_left); atomicAnd(pni->valid_map + word_offset + 1, ~(relevant_mask)); atomicOr(pni->valid_map + word_offset + 1, mask_word1 >> bits_left); } pni->valid_map_offset += value_count; } /** * @brief Compute the nesting bounds within the hierarchy to add values to, and the definition level * D to which we should considered them null or not. * * @param[out] start_depth The start nesting depth * @param[out] end_depth The end nesting depth (inclusive) * @param[out] d The definition level up to which added values are not-null. if t is out of bounds, * d will be -1 * @param[in] s Local page information * @param[in] input_value_count The current count of input level values we have processed * @param[in] target_input_value_count The desired # of input level values we want to process * @param[in] t Thread index */ inline __device__ void get_nesting_bounds(int &start_depth, int &end_depth, int &d, page_state_s *s, int input_value_count, int32_t target_input_value_count, int t) { start_depth = -1; end_depth = -1; d = -1; if (input_value_count + t < target_input_value_count) { int index = rolling_index(input_value_count + t); d = s->def[index]; // if we have repetition (there are list columns involved) we have to // bound what nesting levels we apply values to if (s->col.max_level[level_type::REPETITION] > 0) { int r = s->rep[index]; start_depth = s->page.nesting[r].start_depth; end_depth = s->page.nesting[d].end_depth; } // for columns without repetition (even ones involving structs) we always // traverse the entire hierarchy. else { start_depth = 0; end_depth = s->col.max_nesting_depth - 1; } } } /** * @brief Process a batch of incoming repetition/definition level values and generate * validity, nested column offsets (where appropriate) and decoding indices. * * @param[in] target_input_value_count The # of repetition/definition levels to process up to * @param[in] s Local page information * @param[in] t Thread index */ static __device__ void gpuUpdateValidityOffsetsAndRowIndices(int32_t target_input_value_count, page_state_s *s, int t) { // max nesting depth of the column int max_depth = s->col.max_nesting_depth; // how many (input) values we've processed in the page so far int input_value_count = s->input_value_count; // how many rows we've processed in the page so far int input_row_count = s->input_row_count; // process until we've reached the target while (input_value_count < target_input_value_count) { // determine the nesting bounds for this thread (the range of nesting depths we // will generate new value indices and validity bits for) int start_depth, end_depth, d; get_nesting_bounds( start_depth, end_depth, d, s, input_value_count, target_input_value_count, t); // 4 interesting things to track: // thread_value_count : # of output values from the view of this thread // warp_value_count : # of output values for the whole warp // // thread_valid_count : # of valid values from the view of this thread // warp_valid_count : # of valid values for the whole warp uint32_t thread_value_count, warp_value_count; uint32_t thread_valid_count, warp_valid_count; // track (page-relative) row index for the thread so we can compare against input bounds // keep track of overall # of rows we've read. int is_new_row = start_depth == 0 ? 1 : 0; uint32_t warp_row_count_mask = BALLOT(is_new_row); int32_t thread_row_index = input_row_count + ((__popc(warp_row_count_mask & ((1 << t) - 1)) + is_new_row) - 1); input_row_count += __popc(warp_row_count_mask); // is this thread within row bounds? int in_row_bounds = thread_row_index >= s->row_index_lower_bound && thread_row_index < (s->first_row + s->num_rows) ? 1 : 0; // compute warp and thread value counts uint32_t warp_count_mask = BALLOT((0 >= start_depth && 0 <= end_depth) && in_row_bounds ? 1 : 0); warp_value_count = __popc(warp_count_mask); // Note : ((1 << t) - 1) implies "for all threads before me" thread_value_count = __popc(warp_count_mask & ((1 << t) - 1)); // walk from 0 to max_depth uint32_t next_thread_value_count, next_warp_value_count; for (int s_idx = 0; s_idx < max_depth; s_idx++) { PageNestingInfo *pni = &s->page.nesting[s_idx]; // if we are within the range of nesting levels we should be adding value indices for int in_nesting_bounds = ((s_idx >= start_depth && s_idx <= end_depth) && in_row_bounds) ? 1 : 0; // everything up to the max_def_level is a non-null value uint32_t is_valid = 0; if (d >= pni->max_def_level && in_nesting_bounds) { is_valid = 1; } // compute warp and thread valid counts uint32_t warp_valid_mask; // for flat schemas, a simple ballot_sync gives us the correct count and bit positions because // every value in the input matches to a value in the output if (max_depth == 0) { warp_valid_mask = BALLOT(is_valid); } // for nested schemas, it's more complicated. This warp will visit 32 incoming values, // however not all of them will necessarily represent a value at this nesting level. so the // validity bit for thread t might actually represent output value t-6. the correct position // for thread t's bit is cur_value_count. for cuda 11 we could use __reduce_or_sync(), but // until then we have to do a warp reduce. else { warp_valid_mask = WarpReduceOr32(is_valid << thread_value_count); } thread_valid_count = __popc(warp_valid_mask & ((1 << thread_value_count) - 1)); warp_valid_count = __popc(warp_valid_mask); // if this is the value column emit an index for value decoding if (is_valid && s_idx == max_depth - 1) { int idx = pni->valid_count + thread_valid_count; int ofs = pni->value_count + thread_value_count; s->nz_idx[rolling_index(idx)] = ofs; } // compute warp and thread value counts for the -next- nesting level. we need to // do this for nested schemas so that we can emit an offset for the -current- nesting // level. more concretely : the offset for the current nesting level == current length of the // next nesting level if (s_idx < max_depth - 1) { uint32_t next_warp_count_mask = BALLOT((s_idx + 1 >= start_depth && s_idx + 1 <= end_depth && in_row_bounds) ? 1 : 0); next_warp_value_count = __popc(next_warp_count_mask); next_thread_value_count = __popc(next_warp_count_mask & ((1 << t) - 1)); // if we're -not- at a leaf column and we're within nesting/row bounds // and we have a valid data_out pointer, it implies this is a list column, so // emit an offset. if (in_nesting_bounds && pni->data_out != nullptr) { int idx = pni->value_count + thread_value_count; cudf::size_type ofs = s->page.nesting[s_idx + 1].value_count + next_thread_value_count + s->page.nesting[s_idx + 1].page_start_value; (reinterpret_cast<cudf::size_type *>(pni->data_out))[idx] = ofs; } } // increment count of valid values, count of total values, and validity mask if (!t) { if (pni->valid_map != nullptr && in_row_bounds) { store_validity(pni, warp_valid_mask, warp_value_count); } pni->valid_count += warp_valid_count; pni->value_count += warp_value_count; } // propagate value counts for the next level warp_value_count = next_warp_value_count; thread_value_count = next_thread_value_count; } input_value_count += min(32, (target_input_value_count - input_value_count)); SYNCWARP(); } // update if (!t) { // update valid value count for decoding and total # of values we've processed s->nz_count = s->page.nesting[max_depth - 1].valid_count; s->input_value_count = input_value_count; s->input_row_count = input_row_count; } } /** * @brief Process repetition and definition levels up to the target count of leaf values. * * In order to decode actual leaf values from the input stream, we need to generate the * list of non-null value positions (page_state_s::nz_idx). We do this by processing * the repetition and definition level streams. This process also generates validity information, * and offset column values in the case of nested schemas. Because of the way the streams * are encoded, this function may generate slightly more than target_leaf_count. * * Only runs on 1 warp. * * @param[in] s The local page state * @param[in] target_leaf_count Target count of non-null leaf values to generate indices for * @param[in] t Thread index */ __device__ void gpuDecodeLevels(page_state_s *s, int32_t target_leaf_count, int t) { bool has_repetition = s->col.max_level[level_type::REPETITION] > 0; constexpr int batch_size = 32; int cur_leaf_count = target_leaf_count; while (!s->error && s->nz_count < target_leaf_count && s->input_value_count < s->num_input_values) { if (has_repetition) { gpuDecodeStream(s->rep, s, cur_leaf_count, t, level_type::REPETITION); } gpuDecodeStream(s->def, s, cur_leaf_count, t, level_type::DEFINITION); SYNCWARP(); // because the rep and def streams are encoded seperately, we cannot request an exact // # of values to be decoded at once. we can only process the lowest # of decoded rep/def // levels we get. int actual_leaf_count = has_repetition ? min(s->lvl_count[level_type::REPETITION], s->lvl_count[level_type::DEFINITION]) : s->lvl_count[level_type::DEFINITION]; // process what we got back gpuUpdateValidityOffsetsAndRowIndices(actual_leaf_count, s, t); cur_leaf_count = actual_leaf_count + batch_size; SYNCWARP(); } } /** * @brief Process a batch of incoming repetition/definition level values to generate * per-nesting level output column size for this page. * * Each page represents one piece of the overall output column. The total output (cudf) * column sizes are the sum of the values in each individual page. * * @param[in] s The local page info * @param[in] target_input_value_count The # of repetition/definition levels to process up to * @param[in] t Thread index * @param[in] bounds_set Whether or not s->row_index_lower_bound, s->first_row and s->num_rows * have been computed for this page (they will only be set in the second/trim pass). */ static __device__ void gpuUpdatePageSizes(page_state_s *s, int32_t target_input_value_count, int t, bool bounds_set) { // max nesting depth of the column int max_depth = s->col.max_nesting_depth; // bool has_repetition = s->col.max_level[level_type::REPETITION] > 0 ? true : false; // how many input level values we've processed in the page so far int input_value_count = s->input_value_count; // how many leaf values we've processed in the page so far int input_leaf_count = s->input_leaf_count; // how many rows we've processed in the page so far int input_row_count = s->input_row_count; while (input_value_count < target_input_value_count) { int start_depth, end_depth, d; get_nesting_bounds( start_depth, end_depth, d, s, input_value_count, target_input_value_count, t); // count rows and leaf values int is_new_row = start_depth == 0 ? 1 : 0; uint32_t warp_row_count_mask = BALLOT(is_new_row); int is_new_leaf = (d >= s->page.nesting[max_depth - 1].max_def_level) ? 1 : 0; uint32_t warp_leaf_count_mask = BALLOT(is_new_leaf); // is this thread within row bounds? on the first pass we don't know the bounds, so we will be // computing the full size of the column. on the second pass, we will know our actual row // bounds, so the computation will cap sizes properly. int in_row_bounds = 1; if (bounds_set) { // absolute row index int32_t thread_row_index = input_row_count + ((__popc(warp_row_count_mask & ((1 << t) - 1)) + is_new_row) - 1); in_row_bounds = thread_row_index >= s->row_index_lower_bound && thread_row_index < (s->first_row + s->num_rows) ? 1 : 0; uint32_t row_bounds_mask = BALLOT(in_row_bounds); int first_thread_in_range = __ffs(row_bounds_mask) - 1; // if we've found the beginning of the first row, mark down the position // in the def/repetition buffer (skipped_values) and the data buffer (skipped_leaf_values) if (!t && first_thread_in_range >= 0 && s->page.skipped_values < 0) { // how many values we've skipped in the rep/def levels s->page.skipped_values = input_value_count + first_thread_in_range; // how many values we've skipped in the actual data stream s->page.skipped_leaf_values = input_leaf_count + __popc(warp_leaf_count_mask & ((1 << first_thread_in_range) - 1)); } } // increment counts across all nesting depths for (int s_idx = 0; s_idx < max_depth; s_idx++) { // if we are within the range of nesting levels we should be adding value indices for int in_nesting_bounds = (s_idx >= start_depth && s_idx <= end_depth && in_row_bounds) ? 1 : 0; uint32_t count_mask = BALLOT(in_nesting_bounds); if (!t) { s->page.nesting[s_idx].size += __popc(count_mask); } } input_value_count += min(32, (target_input_value_count - input_value_count)); input_row_count += __popc(warp_row_count_mask); input_leaf_count += __popc(warp_leaf_count_mask); } // update final page value count if (!t) { s->input_value_count = target_input_value_count; s->input_leaf_count = input_leaf_count; s->input_row_count = input_row_count; } } /** * @brief Kernel for computing per-page column size information for all nesting levels. * * This function will write out the size field for each level of nesting. * * @param[in,out] pages List of pages * @param[in] chunks List of column chunks * @param[in] num_chunks Number of column chunks * @param[in] min_row Row index to start reading at * @param[in] num_rows Maximum number of rows to read * @param[in] num_chunks Number of column chunks * @param[in] trim_pass Whether or not this is the trim pass. We first have to compute * the full size information of every page before we come through in a second (trim) pass * to determine what subset of rows in this page we should be reading. */ // blockDim {NTHREADS,1,1} extern "C" __global__ void __launch_bounds__(NTHREADS) gpuComputePageSizes(PageInfo *pages, ColumnChunkDesc const *chunks, size_t min_row, size_t num_rows, int32_t num_chunks, bool trim_pass) { __shared__ __align__(16) page_state_s state_g; page_state_s *const s = &state_g; int page_idx = blockIdx.x; int t = threadIdx.x; PageInfo *pp = &pages[page_idx]; if (!setupLocalPageInfo( s, pp, chunks, trim_pass ? min_row : 0, trim_pass ? num_rows : INT_MAX, num_chunks)) { return; } // zero sizes int d = 0; while (d < s->page.num_nesting_levels) { if (d + t < s->page.num_nesting_levels) { s->page.nesting[d + t].size = 0; } d += blockDim.x; } if (!t) { s->page.skipped_values = -1; s->page.skipped_leaf_values = -1; s->input_row_count = 0; s->input_value_count = 0; // if this isn't the trim pass, make sure we visit absolutely everything if (!trim_pass) { s->first_row = 0; s->num_rows = INT_MAX; s->row_index_lower_bound = -1; } } __syncthreads(); bool has_repetition = s->col.max_level[level_type::REPETITION] > 0; // optimization : it might be useful to have a version of gpuDecodeStream that could go // wider than 1 warp. Currently it only only uses 1 warp so that it can overlap work // with the value decoding step when in the actual value decoding kernel. however during // this preprocess step we have no such limits - we could go as wide as NTHREADS if (t < 32) { constexpr int batch_size = 32; int target_input_count = batch_size; while (!s->error && s->input_value_count < s->num_input_values) { // decode repetition and definition levels. these will attempt to decode at // least up to the target, but may decode a few more. if (has_repetition) { gpuDecodeStream(s->rep, s, target_input_count, t, level_type::REPETITION); } gpuDecodeStream(s->def, s, target_input_count, t, level_type::DEFINITION); SYNCWARP(); // we may have decoded different amounts from each stream, so only process what we've been int actual_input_count = has_repetition ? min(s->lvl_count[level_type::REPETITION], s->lvl_count[level_type::DEFINITION]) : s->lvl_count[level_type::DEFINITION]; // process what we got back gpuUpdatePageSizes(s, actual_input_count, t, trim_pass); target_input_count = actual_input_count + batch_size; SYNCWARP(); } } // update # rows in the actual page if (!t) { pp->num_rows = s->page.nesting[0].size; pp->skipped_values = s->page.skipped_values; pp->skipped_leaf_values = s->page.skipped_leaf_values; } } /** * @brief Kernel for co the column data stored in the pages * * This function will write the page data and the page data's validity to the * output specified in the page's column chunk. If necessary, additional * conversion will be performed to translate from the Parquet datatype to * desired output datatype (ex. 32-bit to 16-bit, string to hash). * * @param[in] pages List of pages * @param[in,out] chunks List of column chunks * @param[in] min_row Row index to start reading at * @param[in] num_rows Maximum number of rows to read * @param[in] num_chunks Number of column chunks */ // blockDim {NTHREADS,1,1} extern "C" __global__ void __launch_bounds__(NTHREADS) gpuDecodePageData(PageInfo *pages, ColumnChunkDesc const *chunks, size_t min_row, size_t num_rows, int32_t num_chunks) { __shared__ __align__(16) page_state_s state_g; page_state_s *const s = &state_g; int page_idx = blockIdx.x; int t = threadIdx.x; int out_thread0; if (!setupLocalPageInfo(s, &pages[page_idx], chunks, min_row, num_rows, num_chunks)) { return; } if (s->dict_base) { out_thread0 = (s->dict_bits > 0) ? 64 : 32; } else { out_thread0 = ((s->col.data_type & 7) == BOOLEAN || (s->col.data_type & 7) == BYTE_ARRAY) ? 64 : 32; } uint32_t skipped_leaf_values = s->page.skipped_leaf_values; while (!s->error && (s->input_value_count < s->num_input_values || s->out_pos < s->nz_count)) { int target_pos; int out_pos = s->out_pos; if (t < out_thread0) { target_pos = min(out_pos + 2 * (NTHREADS - out_thread0), s->nz_count + (NTHREADS - out_thread0)); } else { target_pos = min(s->nz_count, out_pos + NTHREADS - out_thread0); if (out_thread0 > 32) { target_pos = min(target_pos, s->dict_pos); } } __syncthreads(); if (t < 32) { // decode repetition and definition levels. // - update validity vectors // - updates offsets (for nested columns) // - produces non-NULL value indices in s->nz_idx for subsequent decoding gpuDecodeLevels(s, target_pos, t); } else if (t < out_thread0) { uint32_t src_target_pos = target_pos + skipped_leaf_values; // WARP1: Decode dictionary indices, booleans or string positions if (s->dict_base) { src_target_pos = gpuDecodeDictionaryIndices(s, src_target_pos, t & 0x1f); } else if ((s->col.data_type & 7) == BOOLEAN) { src_target_pos = gpuDecodeRleBooleans(s, src_target_pos, t & 0x1f); } else if ((s->col.data_type & 7) == BYTE_ARRAY) { gpuInitStringDescriptors(s, src_target_pos, t & 0x1f); } if (t == 32) { *(volatile int32_t *)&s->dict_pos = src_target_pos; } } else { // WARP1..WARP3: Decode values int dtype = s->col.data_type & 7; out_pos += t - out_thread0; uint32_t src_pos = out_pos + skipped_leaf_values; int output_value_idx = s->nz_idx[rolling_index(out_pos)]; if (out_pos < target_pos && output_value_idx >= 0 && output_value_idx < s->num_input_values) { // nesting level that is storing actual leaf values int leaf_level_index = s->col.max_nesting_depth - 1; uint32_t dtype_len = s->dtype_len; void *dst = s->page.nesting[leaf_level_index].data_out + static_cast<size_t>(output_value_idx) * dtype_len; if (dtype == BYTE_ARRAY) gpuOutputString(s, src_pos, dst); else if (dtype == BOOLEAN) gpuOutputBoolean(s, src_pos, static_cast<uint8_t *>(dst)); else if (s->col.converted_type == DECIMAL) gpuOutputDecimal(s, src_pos, static_cast<double *>(dst), dtype); else if (dtype == INT96) gpuOutputInt96Timestamp(s, src_pos, static_cast<int64_t *>(dst)); else if (dtype_len == 8) { if (s->ts_scale) gpuOutputInt64Timestamp(s, src_pos, static_cast<int64_t *>(dst)); else gpuOutputFast(s, src_pos, static_cast<uint2 *>(dst)); } else if (dtype_len == 4) gpuOutputFast(s, src_pos, static_cast<uint32_t *>(dst)); else gpuOutputGeneric(s, src_pos, static_cast<uint8_t *>(dst), dtype_len); } if (t == out_thread0) { *(volatile int32_t *)&s->out_pos = target_pos; } } __syncthreads(); } } struct chunk_row_output_iter { PageInfo *p; using value_type = size_type; using difference_type = size_type; using pointer = size_type *; using reference = size_type &; using iterator_category = thrust::output_device_iterator_tag; chunk_row_output_iter operator+ __host__ __device__(int i) { return chunk_row_output_iter{p + i}; } void operator++ __host__ __device__() { p++; } reference operator[] __device__(int i) { return p[i].chunk_row; } reference operator*__device__() { return p->chunk_row; } void operator= __device__(value_type v) { p->chunk_row = v; } }; struct start_offset_output_iterator { PageInfo *pages; int *page_indices; int cur_index; int src_col_schema; int nesting_depth; int empty = 0; using value_type = size_type; using difference_type = size_type; using pointer = size_type *; using reference = size_type &; using iterator_category = thrust::output_device_iterator_tag; start_offset_output_iterator operator+ __host__ __device__(int i) { return start_offset_output_iterator{ pages, page_indices, cur_index + i, src_col_schema, nesting_depth}; } void operator++ __host__ __device__() { cur_index++; } reference operator[] __device__(int i) { return dereference(cur_index + i); } reference operator*__device__() { return dereference(cur_index); } private: reference __device__ dereference(int index) { PageInfo const &p = pages[page_indices[index]]; if (p.src_col_schema != src_col_schema || p.flags & PAGEINFO_FLAGS_DICTIONARY) { return empty; } return p.nesting[nesting_depth].page_start_value; } }; /** * @copydoc cudf::io::parquet::gpu::PreprocessColumnData */ cudaError_t PreprocessColumnData(hostdevice_vector<PageInfo> &pages, hostdevice_vector<ColumnChunkDesc> const &chunks, std::vector<input_column_info> &input_columns, std::vector<cudf::io::detail::column_buffer> &output_columns, size_t num_rows, size_t min_row, cudaStream_t stream, rmm::mr::device_memory_resource *mr) { dim3 dim_block(NTHREADS, 1); dim3 dim_grid(pages.size(), 1); // 1 threadblock per page // computes: // PageNestingInfo::size for each level of nesting, for each page. // The output from this does not take row bounds (num_rows, min_row) into account gpuComputePageSizes<<<dim_grid, dim_block, 0, stream>>>( pages.device_ptr(), chunks.device_ptr(), min_row, num_rows, chunks.size(), false); CUDA_TRY(cudaStreamSynchronize(stream)); // computes: // PageInfo::chunk_row for all pages auto key_input = thrust::make_transform_iterator( pages.device_ptr(), [] __device__(PageInfo const &page) { return page.chunk_idx; }); auto page_input = thrust::make_transform_iterator( pages.device_ptr(), [] __device__(PageInfo const &page) { return page.num_rows; }); thrust::exclusive_scan_by_key(rmm::exec_policy(stream)->on(stream), key_input, key_input + pages.size(), page_input, chunk_row_output_iter{pages.device_ptr()}); // computes: // PageNestingInfo::size for each level of nesting, for each page, taking row bounds into account. // PageInfo::skipped_values, which tells us where to start decoding in the input gpuComputePageSizes<<<dim_grid, dim_block, 0, stream>>>( pages.device_ptr(), chunks.device_ptr(), min_row, num_rows, chunks.size(), true); // retrieve pages back (PageInfo::num_rows has been set. if we don't bring it // back, this value will get overwritten later on). pages.device_to_host(stream, true); // ordering of pages is by input column schema, repeated across row groups. so // if we had 3 columns, each with 2 pages, and 1 row group, our schema values might look like // // 1, 1, 2, 2, 3, 3 // // However, if we had more than one row group, the pattern would be // // 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3 // ^ row group 0 | // ^ row group 1 // // To use exclusive_scan_by_key, the ordering we actually want is // // 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3 // // We also need to preserve key-relative page ordering, so we need to use a stable sort. rmm::device_uvector<int> page_keys(pages.size(), stream); rmm::device_uvector<int> page_index(pages.size(), stream); { thrust::transform(rmm::exec_policy(stream)->on(stream), pages.device_ptr(), pages.device_ptr() + pages.size(), page_keys.begin(), [] __device__(PageInfo const &page) { return page.src_col_schema; }); thrust::sequence(rmm::exec_policy(stream)->on(stream), page_index.begin(), page_index.end()); thrust::stable_sort_by_key(rmm::exec_policy(stream)->on(stream), page_keys.begin(), page_keys.end(), page_index.begin(), thrust::less<int>()); } // compute output column sizes by examining the pages of the -input- columns for (size_t idx = 0; idx < input_columns.size(); idx++) { auto const &input_col = input_columns[idx]; auto src_col_schema = input_col.schema_idx; size_t max_depth = input_col.nesting_depth(); auto *cols = &output_columns; for (size_t l_idx = 0; l_idx < input_col.nesting_depth(); l_idx++) { auto &out_buf = (*cols)[input_col.nesting[l_idx]]; cols = &out_buf.children; // size iterator. indexes pages by sorted order auto size_input = thrust::make_transform_iterator( page_index.begin(), [src_col_schema, l_idx, pages = pages.device_ptr()] __device__(int index) { auto const &page = pages[index]; if (page.src_col_schema != src_col_schema || page.flags & PAGEINFO_FLAGS_DICTIONARY) { return 0; } return page.nesting[l_idx].size; }); // compute column size. // for struct columns, higher levels of the output columns are shared between input // columns. so don't compute any given level more than once. if (out_buf.size == 0) { int size = thrust::reduce( rmm::exec_policy(stream)->on(stream), size_input, size_input + pages.size()); // if this is a list column add 1 for non-leaf levels for the terminating offset if (out_buf.type.id() == type_id::LIST && l_idx < max_depth) { size++; } // allocate out_buf.create(size, stream, mr); } // compute per-page start offset thrust::exclusive_scan_by_key(rmm::exec_policy(stream)->on(stream), page_keys.begin(), page_keys.end(), size_input, start_offset_output_iterator{pages.device_ptr(), page_index.begin(), 0, static_cast<int>(src_col_schema), static_cast<int>(l_idx)}); } } return cudaSuccess; } /** * @copydoc cudf::io::parquet::gpu::DecodePageData */ cudaError_t __host__ DecodePageData(hostdevice_vector<PageInfo> &pages, hostdevice_vector<ColumnChunkDesc> const &chunks, size_t num_rows, size_t min_row, cudaStream_t stream) { dim3 dim_block(NTHREADS, 1); dim3 dim_grid(pages.size(), 1); // 1 threadblock per page gpuDecodePageData<<<dim_grid, dim_block, 0, stream>>>( pages.device_ptr(), chunks.device_ptr(), min_row, num_rows, chunks.size()); return cudaSuccess; } } // namespace gpu } // namespace parquet } // namespace io } // namespace cudf
903714c80517c12328c4525f1517f3343b6aad45.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> /** * This example illustrates implementation of custom atomic operations using * CUDA's built-in atomicCAS function to implement atomic signed 32-bit integer * addition. **/ __device__ int sqrt1(int *address) { // Create an initial guess for the value stored at *address. int guess = *address; int nValue = (int)sqrtf(guess)+guess; int oldValue = atomicCAS(address, guess,nValue); // Loop while the guess is incorrect. while (oldValue != guess) { guess = oldValue; int nValue = (int)sqrtf(guess)+guess; oldValue = atomicCAS(address, guess, nValue); } return oldValue; } __device__ int sqrt2(int *address) { // Create an initial guess for the value stored at *address. int guess = *address; int nValue =__fsqrt_rn(guess)+guess; int oldValue = atomicCAS(address, guess,nValue); // Loop while the guess is incorrect. while (oldValue != guess) { guess = oldValue; int nValue = __fsqrt_rn(guess)+guess; oldValue = atomicCAS(address, guess, nValue); } return oldValue; } __global__ void kernel1(int *sharedInteger) { sqrt1(sharedInteger); } __global__ void kernel2(int *sharedInteger) { sqrt2(sharedInteger); } int main(int argc, char **argv) { hipEvent_t start, stop; CHECK(hipEventCreate(&start)); CHECK(hipEventCreate(&stop)); int * value; int N = 25; if (argc > 1) N = atoi(argv[1]); CHECK(hipMallocManaged((void **)&value, sizeof(int))); *value = N; CHECK(hipEventRecord(start, 0)); hipLaunchKernelGGL(( kernel1), dim3(1),dim3(1), 0, 0, value); CHECK(hipDeviceSynchronize()); CHECK(hipEventRecord(stop, 0)); CHECK(hipEventSynchronize(stop)); float elapsed_time; CHECK(hipEventElapsedTime(&elapsed_time, start, stop)); printf("Measured time for kernel 1 execution = %f\n", elapsed_time / 1000.0f); printf("OLA sqrtf %d\n", *value); *value = N; CHECK(hipEventRecord(start, 0)); hipLaunchKernelGGL(( kernel2), dim3(1),dim3(1), 0, 0, value); CHECK(hipDeviceSynchronize()); CHECK(hipEventRecord(stop, 0)); CHECK(hipEventSynchronize(stop)); CHECK(hipEventElapsedTime(&elapsed_time, start, stop)); printf("Measured time for kernel 2 execution = %f\n", elapsed_time / 1000.0f); printf("OLA __fsqrt_rn %d\n", *value); CHECK(hipFree(value)); CHECK(hipEventDestroy(start)); CHECK(hipEventDestroy(stop)); return 0; } /* Measured time for kernel 1 execution = 0.000078 OLA sqrtf 30 Measured time for kernel 2 execution = 0.000124 OLA __fsqrt_rn 30 */
903714c80517c12328c4525f1517f3343b6aad45.cu
#include "../common/common.h" #include <stdio.h> #include <stdlib.h> #include <cuda.h> /** * This example illustrates implementation of custom atomic operations using * CUDA's built-in atomicCAS function to implement atomic signed 32-bit integer * addition. **/ __device__ int sqrt1(int *address) { // Create an initial guess for the value stored at *address. int guess = *address; int nValue = (int)sqrtf(guess)+guess; int oldValue = atomicCAS(address, guess,nValue); // Loop while the guess is incorrect. while (oldValue != guess) { guess = oldValue; int nValue = (int)sqrtf(guess)+guess; oldValue = atomicCAS(address, guess, nValue); } return oldValue; } __device__ int sqrt2(int *address) { // Create an initial guess for the value stored at *address. int guess = *address; int nValue =__fsqrt_rn(guess)+guess; int oldValue = atomicCAS(address, guess,nValue); // Loop while the guess is incorrect. while (oldValue != guess) { guess = oldValue; int nValue = __fsqrt_rn(guess)+guess; oldValue = atomicCAS(address, guess, nValue); } return oldValue; } __global__ void kernel1(int *sharedInteger) { sqrt1(sharedInteger); } __global__ void kernel2(int *sharedInteger) { sqrt2(sharedInteger); } int main(int argc, char **argv) { cudaEvent_t start, stop; CHECK(cudaEventCreate(&start)); CHECK(cudaEventCreate(&stop)); int * value; int N = 25; if (argc > 1) N = atoi(argv[1]); CHECK(cudaMallocManaged((void **)&value, sizeof(int))); *value = N; CHECK(cudaEventRecord(start, 0)); kernel1<<<1,1>>>(value); CHECK(cudaDeviceSynchronize()); CHECK(cudaEventRecord(stop, 0)); CHECK(cudaEventSynchronize(stop)); float elapsed_time; CHECK(cudaEventElapsedTime(&elapsed_time, start, stop)); printf("Measured time for kernel 1 execution = %f\n", elapsed_time / 1000.0f); printf("OLA sqrtf %d\n", *value); *value = N; CHECK(cudaEventRecord(start, 0)); kernel2<<<1,1>>>(value); CHECK(cudaDeviceSynchronize()); CHECK(cudaEventRecord(stop, 0)); CHECK(cudaEventSynchronize(stop)); CHECK(cudaEventElapsedTime(&elapsed_time, start, stop)); printf("Measured time for kernel 2 execution = %f\n", elapsed_time / 1000.0f); printf("OLA __fsqrt_rn %d\n", *value); CHECK(cudaFree(value)); CHECK(cudaEventDestroy(start)); CHECK(cudaEventDestroy(stop)); return 0; } /* Measured time for kernel 1 execution = 0.000078 OLA sqrtf 30 Measured time for kernel 2 execution = 0.000124 OLA __fsqrt_rn 30 */
0079fd58212bef0430380bd529f34dfc3fc93a69.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cutf/thread.hpp> #include <iostream> __global__ void get_lane_id_kernel() { const unsigned thread_id = threadIdx.x; const unsigned lane_id = cutf::thread::get_lane_id(); const unsigned warp_id = cutf::thread::get_warp_id(); std::printf("threadIdx.x = %u, lane_id = %u, warp_id = %u, warp_size_cont = %u\n", thread_id, lane_id, warp_id, cutf::thread::warp_size_const); } int main(){ hipLaunchKernelGGL(( get_lane_id_kernel), dim3(1), dim3(64), 0, 0, ); hipDeviceSynchronize(); }
0079fd58212bef0430380bd529f34dfc3fc93a69.cu
#include <cutf/thread.hpp> #include <iostream> __global__ void get_lane_id_kernel() { const unsigned thread_id = threadIdx.x; const unsigned lane_id = cutf::thread::get_lane_id(); const unsigned warp_id = cutf::thread::get_warp_id(); std::printf("threadIdx.x = %u, lane_id = %u, warp_id = %u, warp_size_cont = %u\n", thread_id, lane_id, warp_id, cutf::thread::warp_size_const); } int main(){ get_lane_id_kernel<<<1, 64>>>(); cudaDeviceSynchronize(); }
99247c28866d81b68c2ed320f1532ec9e5ce8241.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "dofrays.h" #include <librta/basic_types.h> #include <librta/cuda-kernels.h> #include <librta/cuda-vec.h> namespace rta { namespace cuda { namespace k { __global__ void setup_shirley_lens_rays(float *dirs, float *orgs, float *maxts, float fovy, float aspect, int w, int h, float3 view_dir, float3 pos, float3 up, float maxt, float focus_distance, float aperture, float eye_to_lens, gi::cuda::mt_pool3f uniform_random_01) { int2 gid = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); if (gid.x >= w || gid.y >= h) return; int id = gid.y*w+gid.x; maxts[gid.y * w + gid.x] = maxt; fovy /= 2.0; float height = tanf(M_PI * fovy / 180.0f); float width = aspect * height; float u_s = (((float)gid.x+0.5f)/(float)w) * 2.0f - 1.0f; // \in (-1,1) float v_s = (((float)gid.y+0.5f)/(float)h) * 2.0f - 1.0f; u_s = width * u_s; // \in (-pw/2, pw/2) v_s = height * v_s; float3 vd = view_dir; float3 vu = up; float3 W, TxW, U, V; div_vec3f_by_scalar(&W, &vd, length_of_vec3f(&vd)); cross_vec3f(&TxW, &vu, &W); div_vec3f_by_scalar(&U, &TxW, length_of_vec3f(&TxW)); cross_vec3f(&V, &W, &U); float3 dir = make_float3(0,0,0), tmp; mul_vec3f_by_scalar(&dir, &U, u_s); mul_vec3f_by_scalar(&tmp, &V, v_s); add_components_vec3f(&dir, &dir, &tmp); add_components_vec3f(&dir, &dir, &W); normalize_vec3f(&dir); float3 pos_on_focal_plane = pos + dir*(1.0f/(dir|view_dir))*focus_distance; if (gid.x == 200 && gid.y == 100) { printf("dir %6.6f %6.6f %6.6f %f\n", dir.x, dir.y, dir.z, (dir|view_dir)); printf("pos %6.6f %6.6f %6.6f\n", pos.x, pos.y, pos.z); printf("pos %6.6f %6.6f %6.6f\n", pos_on_focal_plane.x, pos_on_focal_plane.y, pos_on_focal_plane.z); } float2 jitter; int i; do { float3 random = gi::next_random3f(uniform_random_01, id+17*i); jitter = make_float2(random.x-0.5f, random.y-0.5f); } while (jitter.x*jitter.x + jitter.y*jitter.y > 1.0f); float3 jitter_pos = pos + U*jitter.x*aperture + V*jitter.y*aperture; dir = (pos_on_focal_plane - jitter_pos); normalize_vec3f(&dir); dirs[3*(gid.y * w + gid.x)+0] = dir.x; dirs[3*(gid.y * w + gid.x)+1] = dir.y; dirs[3*(gid.y * w + gid.x)+2] = dir.z; orgs[3*(gid.y * w + gid.x)+0] = jitter_pos.x; orgs[3*(gid.y * w + gid.x)+1] = jitter_pos.y; orgs[3*(gid.y * w + gid.x)+2] = jitter_pos.z; } } void setup_shirley_lens_rays(float *dirs, float *orgs, float *maxts, float fovy, float aspect, int w, int h, float3 *view_dir, float3 *pos, float3 *up, float maxt, float focus_distance, float aperture, float eye_to_lens, gi::cuda::mt_pool3f uniform_random_01) { checked_cuda(hipPeekAtLastError()); dim3 threads(16, 16); dim3 blocks = block_configuration_2d(w, h, threads); hipLaunchKernelGGL(( k::setup_shirley_lens_rays), dim3(blocks), dim3(threads), 0, 0, dirs, orgs, maxts, fovy, aspect, w, h, *view_dir, *pos, *up, maxt, focus_distance, aperture, eye_to_lens, uniform_random_01); checked_cuda(hipPeekAtLastError()); checked_cuda(hipDeviceSynchronize()); } } }
99247c28866d81b68c2ed320f1532ec9e5ce8241.cu
#include "dofrays.h" #include <librta/basic_types.h> #include <librta/cuda-kernels.h> #include <librta/cuda-vec.h> namespace rta { namespace cuda { namespace k { __global__ void setup_shirley_lens_rays(float *dirs, float *orgs, float *maxts, float fovy, float aspect, int w, int h, float3 view_dir, float3 pos, float3 up, float maxt, float focus_distance, float aperture, float eye_to_lens, gi::cuda::mt_pool3f uniform_random_01) { int2 gid = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); if (gid.x >= w || gid.y >= h) return; int id = gid.y*w+gid.x; maxts[gid.y * w + gid.x] = maxt; fovy /= 2.0; float height = tanf(M_PI * fovy / 180.0f); float width = aspect * height; float u_s = (((float)gid.x+0.5f)/(float)w) * 2.0f - 1.0f; // \in (-1,1) float v_s = (((float)gid.y+0.5f)/(float)h) * 2.0f - 1.0f; u_s = width * u_s; // \in (-pw/2, pw/2) v_s = height * v_s; float3 vd = view_dir; float3 vu = up; float3 W, TxW, U, V; div_vec3f_by_scalar(&W, &vd, length_of_vec3f(&vd)); cross_vec3f(&TxW, &vu, &W); div_vec3f_by_scalar(&U, &TxW, length_of_vec3f(&TxW)); cross_vec3f(&V, &W, &U); float3 dir = make_float3(0,0,0), tmp; mul_vec3f_by_scalar(&dir, &U, u_s); mul_vec3f_by_scalar(&tmp, &V, v_s); add_components_vec3f(&dir, &dir, &tmp); add_components_vec3f(&dir, &dir, &W); normalize_vec3f(&dir); float3 pos_on_focal_plane = pos + dir*(1.0f/(dir|view_dir))*focus_distance; if (gid.x == 200 && gid.y == 100) { printf("dir %6.6f %6.6f %6.6f %f\n", dir.x, dir.y, dir.z, (dir|view_dir)); printf("pos %6.6f %6.6f %6.6f\n", pos.x, pos.y, pos.z); printf("pos %6.6f %6.6f %6.6f\n", pos_on_focal_plane.x, pos_on_focal_plane.y, pos_on_focal_plane.z); } float2 jitter; int i; do { float3 random = gi::next_random3f(uniform_random_01, id+17*i); jitter = make_float2(random.x-0.5f, random.y-0.5f); } while (jitter.x*jitter.x + jitter.y*jitter.y > 1.0f); float3 jitter_pos = pos + U*jitter.x*aperture + V*jitter.y*aperture; dir = (pos_on_focal_plane - jitter_pos); normalize_vec3f(&dir); dirs[3*(gid.y * w + gid.x)+0] = dir.x; dirs[3*(gid.y * w + gid.x)+1] = dir.y; dirs[3*(gid.y * w + gid.x)+2] = dir.z; orgs[3*(gid.y * w + gid.x)+0] = jitter_pos.x; orgs[3*(gid.y * w + gid.x)+1] = jitter_pos.y; orgs[3*(gid.y * w + gid.x)+2] = jitter_pos.z; } } void setup_shirley_lens_rays(float *dirs, float *orgs, float *maxts, float fovy, float aspect, int w, int h, float3 *view_dir, float3 *pos, float3 *up, float maxt, float focus_distance, float aperture, float eye_to_lens, gi::cuda::mt_pool3f uniform_random_01) { checked_cuda(cudaPeekAtLastError()); dim3 threads(16, 16); dim3 blocks = block_configuration_2d(w, h, threads); k::setup_shirley_lens_rays<<<blocks, threads>>>(dirs, orgs, maxts, fovy, aspect, w, h, *view_dir, *pos, *up, maxt, focus_distance, aperture, eye_to_lens, uniform_random_01); checked_cuda(cudaPeekAtLastError()); checked_cuda(cudaDeviceSynchronize()); } } }
1c1bf7d55cb3b47e31db704654c8cff512cfaf4b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string.h> #include <stdlib.h> #include <crypt.h> #include <math.h> #include <time.h> #include <pthread.h> #include <hip/hip_runtime_api.h> //__global__ --> GPU function which can be launched by many blocks and threads //__device__ --> GPU function or variables //__host__ --> CPU function or variables /*********************************************************************** ******* pass = cxbdwy2745 nvcc -o task3a 2040367_Task3_A.cu ./task3a >task3a.txt ************************************************************************ ******/ char *encrypted_passwords[]{ "cxbdwy2745" }; int i =0; int n_passwords = 1; char *encrypted_passwords[]{ "AN4019" }; void substr(char *dest, char *src, int start, int length){ memcpy(dest, src + start, length); *(dest + length) = '\0'; } __device__ char* CudaCrypt(char* rawPassword){ char * newPassword = (char *) malloc(sizeof(char) * 11); newPassword[0] = rawPassword[0] + 2; newPassword[1] = rawPassword[0] - 2; newPassword[2] = rawPassword[0] + 1; newPassword[3] = rawPassword[1] + 3; newPassword[4] = rawPassword[1] - 3; newPassword[5] = rawPassword[1] - 1; newPassword[6] = rawPassword[2] + 2; newPassword[7] = rawPassword[2] - 2; newPassword[8] = rawPassword[3] + 4; newPassword[9] = rawPassword[3] - 4; newPassword[10] = '\0'; for(int i =0; i<10; i++){ if(i >= 0 && i < 6){ //checking all lower case letter limits if(newPassword[i] > 122){ newPassword[i] = (newPassword[i] - 122) + 97; }else if(newPassword[i] < 97){ newPassword[i] = (97 - newPassword[i]) + 97; } }else{ //checking number section if(newPassword[i] > 57){ newPassword[i] = (newPassword[i] - 57) + 48; }else if(newPassword[i] < 48){ newPassword[i] = (48 - newPassword[i]) + 48; } } } return newPassword; } __global__ void crack(char * alphabet, char * numbers, char *salt_and_encrypted){ char genRawPass[4]; genRawPass[0] = alphabet[blockIdx.x]; genRawPass[1] = alphabet[blockIdx.y]; genRawPass[2] = numbers[threadIdx.x]; genRawPass[3] = numbers[threadIdx.y]; //firstLetter - 'a' - 'z' (26 characters) //secondLetter - 'a' - 'z' (26 characters) //firstNum - '0' - '9' (10 characters) //secondNum - '0' - '9' (10 characters) //Idx --> gives current index of the block or thread printf("%c %c %c %c = %s\n", genRawPass[0],genRawPass[1],genRawPass[2],genRawPass[3], CudaCrypt(genRawPass)); int x, y, z; // Loop counters char salt[7]; // String used in hashing the password. Need space for \0 // incase you have modified the salt value, then should modifiy the number accordingly char plain[7]; // The combination of letters currently being checked // Please modifiy the number when you enlarge the encrypted password. char *enc; // Pointer to the encrypted password int count=0; substr(salt, salt_and_encrypted, 0, 6); for(x='A'; x<='Z'; x++){ for(y='A'; y<='Z'; y++){ for(z=0; z<=99; z++){ sprintf(plain, "%c%c%02d", x, y, z); enc = (char *) crypt(plain, salt); count++; if(strcmp(salt_and_encrypted, enc) == 0){ printf("#%-8d%s %s\n", count, plain, enc); //return; //uncomment this line if you want to speed-up the running time, program will find you the cracked password only without exploring all possibilites } } } } } void run(){ void *crack(); for(int i=0;i<n_passwords;i<i++) { crack(encrypted_passwords[i]); } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main(int argc, char ** argv){ struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); char cpuAlphabet[26] = {'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'}; char cpuNumbers[26] = {'0','1','2','3','4','5','6','7','8','9'}; char * gpuAlphabet; hipMalloc( (void**) &gpuAlphabet, sizeof(char) * 26); hipMemcpy(gpuAlphabet, cpuAlphabet, sizeof(char) * 26, hipMemcpyHostToDevice); char * gpuNumbers; hipMalloc( (void**) &gpuNumbers, sizeof(char) * 26); hipMemcpy(gpuNumbers, cpuNumbers, sizeof(char) * 26, hipMemcpyHostToDevice); hipLaunchKernelGGL(( crack), dim3(dim3(26,26,1)), dim3(dim3(10,10,1)) , 0, 0, gpuAlphabet, gpuNumbers ); hipDeviceSynchronize(); run(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
1c1bf7d55cb3b47e31db704654c8cff512cfaf4b.cu
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <crypt.h> #include <math.h> #include <time.h> #include <pthread.h> #include <cuda_runtime_api.h> //__global__ --> GPU function which can be launched by many blocks and threads //__device__ --> GPU function or variables //__host__ --> CPU function or variables /*********************************************************************** ******* pass = cxbdwy2745 nvcc -o task3a 2040367_Task3_A.cu ./task3a >task3a.txt ************************************************************************ ******/ char *encrypted_passwords[]{ "cxbdwy2745" }; int i =0; int n_passwords = 1; char *encrypted_passwords[]{ "AN4019" }; void substr(char *dest, char *src, int start, int length){ memcpy(dest, src + start, length); *(dest + length) = '\0'; } __device__ char* CudaCrypt(char* rawPassword){ char * newPassword = (char *) malloc(sizeof(char) * 11); newPassword[0] = rawPassword[0] + 2; newPassword[1] = rawPassword[0] - 2; newPassword[2] = rawPassword[0] + 1; newPassword[3] = rawPassword[1] + 3; newPassword[4] = rawPassword[1] - 3; newPassword[5] = rawPassword[1] - 1; newPassword[6] = rawPassword[2] + 2; newPassword[7] = rawPassword[2] - 2; newPassword[8] = rawPassword[3] + 4; newPassword[9] = rawPassword[3] - 4; newPassword[10] = '\0'; for(int i =0; i<10; i++){ if(i >= 0 && i < 6){ //checking all lower case letter limits if(newPassword[i] > 122){ newPassword[i] = (newPassword[i] - 122) + 97; }else if(newPassword[i] < 97){ newPassword[i] = (97 - newPassword[i]) + 97; } }else{ //checking number section if(newPassword[i] > 57){ newPassword[i] = (newPassword[i] - 57) + 48; }else if(newPassword[i] < 48){ newPassword[i] = (48 - newPassword[i]) + 48; } } } return newPassword; } __global__ void crack(char * alphabet, char * numbers, char *salt_and_encrypted){ char genRawPass[4]; genRawPass[0] = alphabet[blockIdx.x]; genRawPass[1] = alphabet[blockIdx.y]; genRawPass[2] = numbers[threadIdx.x]; genRawPass[3] = numbers[threadIdx.y]; //firstLetter - 'a' - 'z' (26 characters) //secondLetter - 'a' - 'z' (26 characters) //firstNum - '0' - '9' (10 characters) //secondNum - '0' - '9' (10 characters) //Idx --> gives current index of the block or thread printf("%c %c %c %c = %s\n", genRawPass[0],genRawPass[1],genRawPass[2],genRawPass[3], CudaCrypt(genRawPass)); int x, y, z; // Loop counters char salt[7]; // String used in hashing the password. Need space for \0 // incase you have modified the salt value, then should modifiy the number accordingly char plain[7]; // The combination of letters currently being checked // Please modifiy the number when you enlarge the encrypted password. char *enc; // Pointer to the encrypted password int count=0; substr(salt, salt_and_encrypted, 0, 6); for(x='A'; x<='Z'; x++){ for(y='A'; y<='Z'; y++){ for(z=0; z<=99; z++){ sprintf(plain, "%c%c%02d", x, y, z); enc = (char *) crypt(plain, salt); count++; if(strcmp(salt_and_encrypted, enc) == 0){ printf("#%-8d%s %s\n", count, plain, enc); //return; //uncomment this line if you want to speed-up the running time, program will find you the cracked password only without exploring all possibilites } } } } } void run(){ void *crack(); for(int i=0;i<n_passwords;i<i++) { crack(encrypted_passwords[i]); } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main(int argc, char ** argv){ struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); char cpuAlphabet[26] = {'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'}; char cpuNumbers[26] = {'0','1','2','3','4','5','6','7','8','9'}; char * gpuAlphabet; cudaMalloc( (void**) &gpuAlphabet, sizeof(char) * 26); cudaMemcpy(gpuAlphabet, cpuAlphabet, sizeof(char) * 26, cudaMemcpyHostToDevice); char * gpuNumbers; cudaMalloc( (void**) &gpuNumbers, sizeof(char) * 26); cudaMemcpy(gpuNumbers, cpuNumbers, sizeof(char) * 26, cudaMemcpyHostToDevice); crack<<< dim3(26,26,1), dim3(10,10,1) >>>( gpuAlphabet, gpuNumbers ); cudaThreadSynchronize(); run(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
405f11b2d0a64d26989122e286167e64c1dc3c0f.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2007-2012 Computational Electromagnetic Group (CEM), Dept. ECE, UC San Diego. All rights reserved. * Author: Shaojing Li, March 2012 */ /* * gpu.cu: class definition of Class Gpu */ #include "nbodyfast.h" #include "gpu.h" #include "memory.h" #include "memory_gpu.h" namespace NBODYFAST_NS{ Gpu :: Gpu(class NBODYFAST *n_ptr, int *_num_gpus, int *_gpu_list) { // Initialization of Gpu data-structure and do early device test to see if the selected devices are valid nbodyfast = n_ptr; multi_gpu = false; dev_list = NULL; _d_early_test = NULL; memory_gpu = new MemoryGpu(nbodyfast); num_gpus = *_num_gpus; if (num_gpus > 1) { multi_gpu = true; } dev_list = new DeviceProp[num_gpus]; nbodyfast->memory->alloc_host<int*>(&_d_early_test, num_gpus, "gpu->_d_early_test[]"); for (int i = 0; i < num_gpus; i++) { hipDeviceProp_t _dev_prop; dev_list[i].num = i; dev_list[i].index = _gpu_list[i]; nbodyfast->device_name[i] = _gpu_list[i]; hipGetDeviceProperties(&_dev_prop, _gpu_list[i]); dev_list[i].dev_name.assign(_dev_prop.name); dev_list[i].dev_mem = _dev_prop.totalGlobalMem; } #pragma omp barrier #pragma omp parallel { int _thread_id = omp_get_thread_num(); { cuda_env_early_test(dev_list[_thread_id]); // early test of GPU devices } } #pragma omp barrier nbodyfast->memory->free_host<int*>(&_d_early_test); } Gpu :: ~Gpu() { memory_gpu->output_allocated_list_scr(); // during the destructor, output a list of any remaining allocated arrays (it should be empty under normal circumstances) delete memory_gpu; delete [] dev_list; dev_list = NULL; } inline void Gpu::cuda_env_early_test(DeviceProp _dev) { /* device early test * try to check if the device index supplied is valid * try to allocate a small memory array on the specified device */ hipError_t _cuda_error; _cuda_error = hipSetDevice(_dev.index); if (_cuda_error != hipSuccess) { std::cout << "Cannot select device: " << _dev.index << std::endl; std::cout << "Please check if there is/are " << _dev.index + 1 << " CUDA capable device(s) in this system and the specified device is working properly...exiting..." << std::endl; exit(0); } _d_early_test[_dev.num] = NULL; std::cout << "Testing device: " << _dev.index << " " << _dev.dev_name << std::endl; memory_gpu->alloc_device<int>(_cuda_error, &_d_early_test[_dev.num], 1, "gpu->_d_early_test", _dev.index); if (_cuda_error != hipSuccess) { std::cout << "Device: " << _dev.index << " initialization failed...exiting..." << std::endl; std::cout << hipGetErrorString(_cuda_error) << std::endl; exit(0); } std::cout << "Device: " << _dev.index << " Initial test passed..." << std::endl; memory_gpu->free_device<int>(_cuda_error, &_d_early_test[_dev.num], _dev.index); } }
405f11b2d0a64d26989122e286167e64c1dc3c0f.cu
/* * Copyright 2007-2012 Computational Electromagnetic Group (CEM), Dept. ECE, UC San Diego. All rights reserved. * Author: Shaojing Li, March 2012 */ /* * gpu.cu: class definition of Class Gpu */ #include "nbodyfast.h" #include "gpu.h" #include "memory.h" #include "memory_gpu.h" namespace NBODYFAST_NS{ Gpu :: Gpu(class NBODYFAST *n_ptr, int *_num_gpus, int *_gpu_list) { // Initialization of Gpu data-structure and do early device test to see if the selected devices are valid nbodyfast = n_ptr; multi_gpu = false; dev_list = NULL; _d_early_test = NULL; memory_gpu = new MemoryGpu(nbodyfast); num_gpus = *_num_gpus; if (num_gpus > 1) { multi_gpu = true; } dev_list = new DeviceProp[num_gpus]; nbodyfast->memory->alloc_host<int*>(&_d_early_test, num_gpus, "gpu->_d_early_test[]"); for (int i = 0; i < num_gpus; i++) { cudaDeviceProp _dev_prop; dev_list[i].num = i; dev_list[i].index = _gpu_list[i]; nbodyfast->device_name[i] = _gpu_list[i]; cudaGetDeviceProperties(&_dev_prop, _gpu_list[i]); dev_list[i].dev_name.assign(_dev_prop.name); dev_list[i].dev_mem = _dev_prop.totalGlobalMem; } #pragma omp barrier #pragma omp parallel { int _thread_id = omp_get_thread_num(); { cuda_env_early_test(dev_list[_thread_id]); // early test of GPU devices } } #pragma omp barrier nbodyfast->memory->free_host<int*>(&_d_early_test); } Gpu :: ~Gpu() { memory_gpu->output_allocated_list_scr(); // during the destructor, output a list of any remaining allocated arrays (it should be empty under normal circumstances) delete memory_gpu; delete [] dev_list; dev_list = NULL; } inline void Gpu::cuda_env_early_test(DeviceProp _dev) { /* device early test * try to check if the device index supplied is valid * try to allocate a small memory array on the specified device */ cudaError_t _cuda_error; _cuda_error = cudaSetDevice(_dev.index); if (_cuda_error != cudaSuccess) { std::cout << "Cannot select device: " << _dev.index << std::endl; std::cout << "Please check if there is/are " << _dev.index + 1 << " CUDA capable device(s) in this system and the specified device is working properly...exiting..." << std::endl; exit(0); } _d_early_test[_dev.num] = NULL; std::cout << "Testing device: " << _dev.index << " " << _dev.dev_name << std::endl; memory_gpu->alloc_device<int>(_cuda_error, &_d_early_test[_dev.num], 1, "gpu->_d_early_test", _dev.index); if (_cuda_error != cudaSuccess) { std::cout << "Device: " << _dev.index << " initialization failed...exiting..." << std::endl; std::cout << cudaGetErrorString(_cuda_error) << std::endl; exit(0); } std::cout << "Device: " << _dev.index << " Initial test passed..." << std::endl; memory_gpu->free_device<int>(_cuda_error, &_d_early_test[_dev.num], _dev.index); } }
77c03c43364533b28a72cad1315dfe2879b9cfbf.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <iostream> #include <hip/hip_runtime.h> //CUDA RunTime API #include <hip/hip_runtime.h> #define DATA_SIZE 1048576 int data[DATA_SIZE]; //0-9 void GenerateNumbers(int *number, int size) { for (int i = 0; i < size; i++) { number[i] = rand() % 10; } } //CUDA bool InitCUDA() { int count; //Cuda hipGetDeviceCount(&count); if (count == 0) { fprintf(stderr, "There is no device.\n"); return false; } int i; for (i = 0; i < count; i++) { hipDeviceProp_t prop; if (hipGetDeviceProperties(&prop, i) == hipSuccess) { if (prop.major >= 1) { break; } } } if (i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } hipSetDevice(i); return true; } // __global__ (GPU) __global__ static void sumOfSquares(int *num, int* result, clock_t* time) { int sum = 0; int i; clock_t start = clock(); for (i = 0; i < DATA_SIZE; i++) { sum += num[i] * num[i] * num[i]; } *result = sum; *time = clock() - start; } int cudaGetClockRate() { hipDeviceProp_t prop; if (hipGetDeviceProperties(&prop, 0) == hipSuccess){ return prop.clockRate * 1000; } else { std::cout << "cudaGetClockRate fails" << std::endl; return 10^9; } } int main() { //CUDA if (!InitCUDA()) { return 0; } // GenerateNumbers(data, DATA_SIZE); /**/ int* gpudata, *result; clock_t* time; //hipMalloc ( resulttime ) hipMalloc((void**)&gpudata, sizeof(int)* DATA_SIZE); hipMalloc((void**)&result, sizeof(int)); hipMalloc((void**)&time, sizeof(clock_t)); //hipMemcpy //hipMemcpyHostToDevice - //hipMemcpyDeviceToHost - hipMemcpy(gpudata, data, sizeof(int)* DATA_SIZE, hipMemcpyHostToDevice); // CUDA <<<block , thread , shared memory >>>(...); sumOfSquares << <1, 1, 0 >> >(gpudata, result, time); /**/ int sum; clock_t time_used; //hipMemcpy hipMemcpy(&sum, result, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&time_used, time, sizeof(clock_t), hipMemcpyDeviceToHost); //Free hipFree(gpudata); hipFree(result); hipFree(time); // 1582000 * 1000 int clockRate = cudaGetClockRate(); printf("GPUsum: %d time: %fs\n", sum, time_used * 1.0 / clockRate); sum = 0; for (int i = 0; i < DATA_SIZE; i++) { sum += data[i] * data[i] * data[i]; } printf("CPUsum: %d \n", sum); return 0; }
77c03c43364533b28a72cad1315dfe2879b9cfbf.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <iostream> #include <cuda_runtime.h> //CUDA RunTime API #include <cuda_runtime.h> #define DATA_SIZE 1048576 int data[DATA_SIZE]; //产生大量0-9之间的随机数 void GenerateNumbers(int *number, int size) { for (int i = 0; i < size; i++) { number[i] = rand() % 10; } } //CUDA 初始化 bool InitCUDA() { int count; //取得支持Cuda的装置的数目 cudaGetDeviceCount(&count); if (count == 0) { fprintf(stderr, "There is no device.\n"); return false; } int i; for (i = 0; i < count; i++) { cudaDeviceProp prop; if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if (prop.major >= 1) { break; } } } if (i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } cudaSetDevice(i); return true; } // __global__ 函数 (GPU上执行) 计算立方和 __global__ static void sumOfSquares(int *num, int* result, clock_t* time) { int sum = 0; int i; clock_t start = clock(); for (i = 0; i < DATA_SIZE; i++) { sum += num[i] * num[i] * num[i]; } *result = sum; *time = clock() - start; } int cudaGetClockRate() { cudaDeviceProp prop; if (cudaGetDeviceProperties(&prop, 0) == cudaSuccess){ return prop.clockRate * 1000; } else { std::cout << "cudaGetClockRate fails" << std::endl; return 10^9; } } int main() { //CUDA 初始化 if (!InitCUDA()) { return 0; } //生成随机数 GenerateNumbers(data, DATA_SIZE); /*把数据复制到显卡内存中*/ int* gpudata, *result; clock_t* time; //cudaMalloc 取得一块显卡内存 ( 其中result用来存储计算结果,time用来存储运行时间 ) cudaMalloc((void**)&gpudata, sizeof(int)* DATA_SIZE); cudaMalloc((void**)&result, sizeof(int)); cudaMalloc((void**)&time, sizeof(clock_t)); //cudaMemcpy 将产生的随机数复制到显卡内存中 //cudaMemcpyHostToDevice - 从内存复制到显卡内存 //cudaMemcpyDeviceToHost - 从显卡内存复制到内存 cudaMemcpy(gpudata, data, sizeof(int)* DATA_SIZE, cudaMemcpyHostToDevice); // 在CUDA 中执行函数 语法:函数名称<<<block 数目, thread 数目, shared memory 大小>>>(参数...); sumOfSquares << <1, 1, 0 >> >(gpudata, result, time); /*把结果从显示芯片复制回主内存*/ int sum; clock_t time_used; //cudaMemcpy 将结果从显存中复制回内存 cudaMemcpy(&sum, result, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&time_used, time, sizeof(clock_t), cudaMemcpyDeviceToHost); //Free cudaFree(gpudata); cudaFree(result); cudaFree(time); // 1582000 * 1000的主频 int clockRate = cudaGetClockRate(); printf("GPUsum: %d time: %fs\n", sum, time_used * 1.0 / clockRate); sum = 0; for (int i = 0; i < DATA_SIZE; i++) { sum += data[i] * data[i] * data[i]; } printf("CPUsum: %d \n", sum); return 0; }
61b88fe8f8ff5686b35154c02dbe906d000c9733.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include "atomics.cuh" #define THREADS 1024 #define BLOCKS(N) (N + THREADS - 1) / THREADS template <typename scalar_t> __global__ void weighting_fw_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> out, at::cuda::detail::TensorInfo<scalar_t, int64_t> x, at::cuda::detail::TensorInfo<scalar_t, int64_t> weight, at::cuda::detail::TensorInfo<scalar_t, int64_t> basis, at::cuda::detail::TensorInfo<int64_t, int64_t> weight_index, size_t numel) { const size_t index = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (ptrdiff_t i = index; i < numel; i += stride) { int64_t e = i / out.sizes[1], m_out = i % out.sizes[1]; auto S = basis.sizes[1]; scalar_t v = 0; for (ptrdiff_t s = 0; s < S; s++) { auto b = basis.data[e * S + s]; auto wi = weight_index.data[e * S + s]; for (ptrdiff_t m_in = 0; m_in < x.sizes[1]; m_in++) { auto tmp = weight.data[wi * weight.strides[0] + m_in * weight.strides[1] + m_out * weight.strides[2]]; tmp *= b * x.data[e * x.strides[0] + m_in * x.strides[1]]; v += tmp; } } out.data[i] = v; } } at::Tensor weighting_fw_cuda(at::Tensor x, at::Tensor weight, at::Tensor basis, at::Tensor weight_index) { hipSetDevice(x.get_device()); auto E = x.size(0), M_out = weight.size(2); auto out = at::empty({E, M_out}, x.options()); AT_DISPATCH_FLOATING_TYPES(out.scalar_type(), "weighting_fw", [&] { hipLaunchKernelGGL(( weighting_fw_kernel<scalar_t>), dim3(BLOCKS(out.numel())), dim3(THREADS), 0, 0, at::cuda::detail::getTensorInfo<scalar_t, int64_t>(out), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(x), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(weight), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(basis), at::cuda::detail::getTensorInfo<int64_t, int64_t>(weight_index), out.numel()); }); return out; } template <typename scalar_t> __global__ void weighting_bw_x_kernel( at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_x, at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_out, at::cuda::detail::TensorInfo<scalar_t, int64_t> weight, at::cuda::detail::TensorInfo<scalar_t, int64_t> basis, at::cuda::detail::TensorInfo<int64_t, int64_t> weight_index, size_t numel) { const size_t index = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (ptrdiff_t i = index; i < numel; i += stride) { int64_t e = i / grad_x.sizes[1], m_in = i % grad_x.sizes[1]; auto S = basis.sizes[1]; scalar_t v = 0; for (ptrdiff_t s = 0; s < S; s++) { auto b = basis.data[e * S + s]; auto wi = weight_index.data[e * S + s]; for (ptrdiff_t m_out = 0; m_out < grad_out.sizes[1]; m_out++) { auto tmp = weight.data[wi * weight.strides[0] + m_out * weight.strides[1] + m_in * weight.strides[2]]; tmp *= b * grad_out .data[e * grad_out.strides[0] + m_out * grad_out.strides[1]]; v += tmp; } } grad_x.data[i] = v; } } at::Tensor weighting_bw_x_cuda(at::Tensor grad_out, at::Tensor weight, at::Tensor basis, at::Tensor weight_index) { hipSetDevice(grad_out.get_device()); auto E = grad_out.size(0), M_in = weight.size(1); auto grad_x = at::empty({E, M_in}, grad_out.options()); weight = weight.transpose(1, 2).contiguous(); AT_DISPATCH_FLOATING_TYPES(grad_x.scalar_type(), "weighting_bw_x", [&] { hipLaunchKernelGGL(( weighting_bw_x_kernel<scalar_t>), dim3(BLOCKS(grad_x.numel())), dim3(THREADS), 0, 0, at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_x), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_out), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(weight), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(basis), at::cuda::detail::getTensorInfo<int64_t, int64_t>(weight_index), grad_x.numel()); }); return grad_x; } template <typename scalar_t> __global__ void weighting_bw_w_kernel( at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_weight, at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_out, at::cuda::detail::TensorInfo<scalar_t, int64_t> x, at::cuda::detail::TensorInfo<scalar_t, int64_t> basis, at::cuda::detail::TensorInfo<int64_t, int64_t> weight_index, size_t numel) { const size_t index = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (ptrdiff_t i = index; i < numel; i += stride) { int64_t e = i / grad_out.sizes[1], m_out = i % grad_out.sizes[1]; int64_t S = basis.sizes[1], M_in = x.sizes[1], M_out = grad_out.sizes[1]; auto g = grad_out.data[e * grad_out.strides[0] + m_out * grad_out.strides[1]]; for (ptrdiff_t s = 0; s < S; s++) { auto b = basis.data[e * S + s]; auto wi = weight_index.data[e * S + s]; for (ptrdiff_t m_in = 0; m_in < M_in; m_in++) { auto v = g * b * x.data[e * x.strides[0] + m_in * x.strides[1]]; atomicAdd(&grad_weight.data[wi * M_in * M_out + m_in * M_out + m_out], v); } } } } at::Tensor weighting_bw_w_cuda(at::Tensor grad_out, at::Tensor x, at::Tensor basis, at::Tensor weight_index, int64_t K) { hipSetDevice(grad_out.get_device()); auto M_in = x.size(1), M_out = grad_out.size(1); auto grad_weight = at::zeros({K, M_in, M_out}, grad_out.options()); AT_DISPATCH_FLOATING_TYPES(grad_out.scalar_type(), "weighting_bw_w", [&] { hipLaunchKernelGGL(( weighting_bw_w_kernel<scalar_t>), dim3(BLOCKS(grad_out.numel())), dim3(THREADS), 0, 0, at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_weight), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_out), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(x), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(basis), at::cuda::detail::getTensorInfo<int64_t, int64_t>(weight_index), grad_out.numel()); }); return grad_weight; } template <typename scalar_t> __global__ void weighting_bw_b_kernel( at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_basis, at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_out, at::cuda::detail::TensorInfo<scalar_t, int64_t> x, at::cuda::detail::TensorInfo<scalar_t, int64_t> weight, at::cuda::detail::TensorInfo<int64_t, int64_t> weight_index, size_t numel) { const size_t index = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (ptrdiff_t i = index; i < numel; i += stride) { int64_t e = i / grad_out.sizes[1], m_out = i % grad_out.sizes[1]; auto S = grad_basis.sizes[1]; auto g = grad_out.data[e * grad_out.strides[0] + m_out * grad_out.strides[1]]; for (ptrdiff_t s = 0; s < S; s++) { scalar_t v = 0; auto wi = weight_index.data[e * S + s]; for (ptrdiff_t m_in = 0; m_in < x.sizes[1]; m_in++) { auto w = weight.data[wi * weight.strides[0] + m_in * weight.strides[1] + m_out * weight.strides[2]]; v += g * w * x.data[e * x.strides[0] + m_in * x.strides[1]]; } atomicAdd(&grad_basis.data[e * S + s], v); } } } at::Tensor weighting_bw_b_cuda(at::Tensor grad_out, at::Tensor x, at::Tensor weight, at::Tensor weight_index) { hipSetDevice(grad_out.get_device()); auto E = x.size(0), S = weight_index.size(1); auto grad_basis = at::zeros({E, S}, grad_out.options()); AT_DISPATCH_FLOATING_TYPES(grad_out.scalar_type(), "weighting_bw_b", [&] { hipLaunchKernelGGL(( weighting_bw_b_kernel<scalar_t>), dim3(BLOCKS(grad_out.numel())), dim3(THREADS), 0, 0, at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_basis), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_out), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(x), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(weight), at::cuda::detail::getTensorInfo<int64_t, int64_t>(weight_index), grad_out.numel()); }); return grad_basis; }
61b88fe8f8ff5686b35154c02dbe906d000c9733.cu
#include <ATen/ATen.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include "atomics.cuh" #define THREADS 1024 #define BLOCKS(N) (N + THREADS - 1) / THREADS template <typename scalar_t> __global__ void weighting_fw_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> out, at::cuda::detail::TensorInfo<scalar_t, int64_t> x, at::cuda::detail::TensorInfo<scalar_t, int64_t> weight, at::cuda::detail::TensorInfo<scalar_t, int64_t> basis, at::cuda::detail::TensorInfo<int64_t, int64_t> weight_index, size_t numel) { const size_t index = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (ptrdiff_t i = index; i < numel; i += stride) { int64_t e = i / out.sizes[1], m_out = i % out.sizes[1]; auto S = basis.sizes[1]; scalar_t v = 0; for (ptrdiff_t s = 0; s < S; s++) { auto b = basis.data[e * S + s]; auto wi = weight_index.data[e * S + s]; for (ptrdiff_t m_in = 0; m_in < x.sizes[1]; m_in++) { auto tmp = weight.data[wi * weight.strides[0] + m_in * weight.strides[1] + m_out * weight.strides[2]]; tmp *= b * x.data[e * x.strides[0] + m_in * x.strides[1]]; v += tmp; } } out.data[i] = v; } } at::Tensor weighting_fw_cuda(at::Tensor x, at::Tensor weight, at::Tensor basis, at::Tensor weight_index) { cudaSetDevice(x.get_device()); auto E = x.size(0), M_out = weight.size(2); auto out = at::empty({E, M_out}, x.options()); AT_DISPATCH_FLOATING_TYPES(out.scalar_type(), "weighting_fw", [&] { weighting_fw_kernel<scalar_t><<<BLOCKS(out.numel()), THREADS>>>( at::cuda::detail::getTensorInfo<scalar_t, int64_t>(out), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(x), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(weight), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(basis), at::cuda::detail::getTensorInfo<int64_t, int64_t>(weight_index), out.numel()); }); return out; } template <typename scalar_t> __global__ void weighting_bw_x_kernel( at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_x, at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_out, at::cuda::detail::TensorInfo<scalar_t, int64_t> weight, at::cuda::detail::TensorInfo<scalar_t, int64_t> basis, at::cuda::detail::TensorInfo<int64_t, int64_t> weight_index, size_t numel) { const size_t index = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (ptrdiff_t i = index; i < numel; i += stride) { int64_t e = i / grad_x.sizes[1], m_in = i % grad_x.sizes[1]; auto S = basis.sizes[1]; scalar_t v = 0; for (ptrdiff_t s = 0; s < S; s++) { auto b = basis.data[e * S + s]; auto wi = weight_index.data[e * S + s]; for (ptrdiff_t m_out = 0; m_out < grad_out.sizes[1]; m_out++) { auto tmp = weight.data[wi * weight.strides[0] + m_out * weight.strides[1] + m_in * weight.strides[2]]; tmp *= b * grad_out .data[e * grad_out.strides[0] + m_out * grad_out.strides[1]]; v += tmp; } } grad_x.data[i] = v; } } at::Tensor weighting_bw_x_cuda(at::Tensor grad_out, at::Tensor weight, at::Tensor basis, at::Tensor weight_index) { cudaSetDevice(grad_out.get_device()); auto E = grad_out.size(0), M_in = weight.size(1); auto grad_x = at::empty({E, M_in}, grad_out.options()); weight = weight.transpose(1, 2).contiguous(); AT_DISPATCH_FLOATING_TYPES(grad_x.scalar_type(), "weighting_bw_x", [&] { weighting_bw_x_kernel<scalar_t><<<BLOCKS(grad_x.numel()), THREADS>>>( at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_x), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_out), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(weight), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(basis), at::cuda::detail::getTensorInfo<int64_t, int64_t>(weight_index), grad_x.numel()); }); return grad_x; } template <typename scalar_t> __global__ void weighting_bw_w_kernel( at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_weight, at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_out, at::cuda::detail::TensorInfo<scalar_t, int64_t> x, at::cuda::detail::TensorInfo<scalar_t, int64_t> basis, at::cuda::detail::TensorInfo<int64_t, int64_t> weight_index, size_t numel) { const size_t index = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (ptrdiff_t i = index; i < numel; i += stride) { int64_t e = i / grad_out.sizes[1], m_out = i % grad_out.sizes[1]; int64_t S = basis.sizes[1], M_in = x.sizes[1], M_out = grad_out.sizes[1]; auto g = grad_out.data[e * grad_out.strides[0] + m_out * grad_out.strides[1]]; for (ptrdiff_t s = 0; s < S; s++) { auto b = basis.data[e * S + s]; auto wi = weight_index.data[e * S + s]; for (ptrdiff_t m_in = 0; m_in < M_in; m_in++) { auto v = g * b * x.data[e * x.strides[0] + m_in * x.strides[1]]; atomicAdd(&grad_weight.data[wi * M_in * M_out + m_in * M_out + m_out], v); } } } } at::Tensor weighting_bw_w_cuda(at::Tensor grad_out, at::Tensor x, at::Tensor basis, at::Tensor weight_index, int64_t K) { cudaSetDevice(grad_out.get_device()); auto M_in = x.size(1), M_out = grad_out.size(1); auto grad_weight = at::zeros({K, M_in, M_out}, grad_out.options()); AT_DISPATCH_FLOATING_TYPES(grad_out.scalar_type(), "weighting_bw_w", [&] { weighting_bw_w_kernel<scalar_t><<<BLOCKS(grad_out.numel()), THREADS>>>( at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_weight), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_out), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(x), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(basis), at::cuda::detail::getTensorInfo<int64_t, int64_t>(weight_index), grad_out.numel()); }); return grad_weight; } template <typename scalar_t> __global__ void weighting_bw_b_kernel( at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_basis, at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_out, at::cuda::detail::TensorInfo<scalar_t, int64_t> x, at::cuda::detail::TensorInfo<scalar_t, int64_t> weight, at::cuda::detail::TensorInfo<int64_t, int64_t> weight_index, size_t numel) { const size_t index = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (ptrdiff_t i = index; i < numel; i += stride) { int64_t e = i / grad_out.sizes[1], m_out = i % grad_out.sizes[1]; auto S = grad_basis.sizes[1]; auto g = grad_out.data[e * grad_out.strides[0] + m_out * grad_out.strides[1]]; for (ptrdiff_t s = 0; s < S; s++) { scalar_t v = 0; auto wi = weight_index.data[e * S + s]; for (ptrdiff_t m_in = 0; m_in < x.sizes[1]; m_in++) { auto w = weight.data[wi * weight.strides[0] + m_in * weight.strides[1] + m_out * weight.strides[2]]; v += g * w * x.data[e * x.strides[0] + m_in * x.strides[1]]; } atomicAdd(&grad_basis.data[e * S + s], v); } } } at::Tensor weighting_bw_b_cuda(at::Tensor grad_out, at::Tensor x, at::Tensor weight, at::Tensor weight_index) { cudaSetDevice(grad_out.get_device()); auto E = x.size(0), S = weight_index.size(1); auto grad_basis = at::zeros({E, S}, grad_out.options()); AT_DISPATCH_FLOATING_TYPES(grad_out.scalar_type(), "weighting_bw_b", [&] { weighting_bw_b_kernel<scalar_t><<<BLOCKS(grad_out.numel()), THREADS>>>( at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_basis), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_out), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(x), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(weight), at::cuda::detail::getTensorInfo<int64_t, int64_t>(weight_index), grad_out.numel()); }); return grad_basis; }
e07586f41e03488b58dbbfb16d4a04701582f20b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../common.h" // Referenced the blog https://developer.nvidia.com/blog/efficient-matrix-transpose-cuda-cc/ const int TILE_DIM = 32; const int BLOCK_ROWS = 8; // We will launch blocks of [TILE_DIM=32, BLOCK_ROWS=8] threads. // Each processes TILE_DIM x TILE_DIM elements each time, each thread processes (TILE_DIM / BLOCK_ROWS) elements __global__ void CopyMatrix(float *dest, const float *src, const int M, const int N) { // (x,y) coordinates on TILE-wise int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; const int size = M * N; for (int j = 0; j < TILE_DIM && ((y + j) * width + x < size); j += BLOCK_ROWS) { dest[(y + j) * width + x] = src[(y + j) * width + x]; } } __global__ void MatrixTranspose(float *dest, const float *src, const int M, const int N) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { dest[x * width + (y + j)] = src[(y + j) * width + x]; } } __global__ void MatrixTransposeCoalesced(float *dest, const float *src, const int M, const int N) { __shared__ float tile[TILE_DIM][TILE_DIM]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; // Load continuous src memory to TILE for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { tile[threadIdx.y + j][threadIdx.x] = src[(y + j) * width + x]; } __syncthreads(); // After remapping the index, store the TILE to continuous dest memory. x = blockIdx.y * TILE_DIM + threadIdx.x; y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { dest[(y + j) * width + x] = tile[threadIdx.x][threadIdx.y + j]; } } // Void Shared Memory Bank Conflicts __global__ void MatrixTransposeCoalescedBank(float *dest, const float *src, const int M, const int N) { __shared__ float tile[TILE_DIM][TILE_DIM + 1]; // just padding int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; // Load continuous src memory to TILE for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { tile[threadIdx.y + j][threadIdx.x] = src[(y + j) * width + x]; } __syncthreads(); // After remapping the index, store the TILE to continuous dest memory. x = blockIdx.y * TILE_DIM + threadIdx.x; y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { dest[(y + j) * width + x] = tile[threadIdx.x][threadIdx.y + j]; } } __global__ void VerifyDeviceResult(const float *A, const float *B, const int N, int *diff) { __shared__ int mydiff; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM && ((y + j) * width + x) < N; j += BLOCK_ROWS) { if (std::abs(A[(y + j) * width + x] - B[(y + j) * width + x]) >= 1e-5) { mydiff++; } } __syncthreads(); *diff += mydiff; } int main() { { const int M = 1024; const int N = 1024; std::vector<float> A_host; auto *A = CreateDeviceVector(M * N, &A_host, true); auto *A_copied = CreateDeviceVector(M * N, &A_host); std::vector<int> diff_; auto *diffv = CreateDeviceVector<int>(1, &diff_); dim3 grid(N / TILE_DIM, M / TILE_DIM); dim3 thread(TILE_DIM, BLOCK_ROWS); { hipLaunchKernelGGL(( CopyMatrix), dim3(grid), dim3(thread), 0, 0, A_copied, A, M, N); hipLaunchKernelGGL(( VerifyDeviceResult), dim3(grid), dim3(thread), 0, 0, A_copied, A, M * N, diffv); hipMemcpy(diff_.data(), diffv, sizeof(int), hipMemcpyDeviceToHost); std::cerr << "diff: " << diff_[0] << std::endl; } for (int i = 0; i < REPEAT; i++) { hipLaunchKernelGGL(( MatrixTranspose), dim3(grid), dim3(thread), 0, 0, A_copied, A, M, N); } hipDeviceSynchronize(); for (int i = 0; i < REPEAT; i++) { hipLaunchKernelGGL(( MatrixTransposeCoalesced), dim3(grid), dim3(thread), 0, 0, A_copied, A, M, N); } hipDeviceSynchronize(); for (int i = 0; i < REPEAT; i++) { hipLaunchKernelGGL(( MatrixTransposeCoalescedBank), dim3(grid), dim3(thread), 0, 0, A_copied, A, M, N); } hipDeviceSynchronize(); } return 0; }
e07586f41e03488b58dbbfb16d4a04701582f20b.cu
#include "../common.h" // Referenced the blog https://developer.nvidia.com/blog/efficient-matrix-transpose-cuda-cc/ const int TILE_DIM = 32; const int BLOCK_ROWS = 8; // We will launch blocks of [TILE_DIM=32, BLOCK_ROWS=8] threads. // Each processes TILE_DIM x TILE_DIM elements each time, each thread processes (TILE_DIM / BLOCK_ROWS) elements __global__ void CopyMatrix(float *dest, const float *src, const int M, const int N) { // (x,y) coordinates on TILE-wise int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; const int size = M * N; for (int j = 0; j < TILE_DIM && ((y + j) * width + x < size); j += BLOCK_ROWS) { dest[(y + j) * width + x] = src[(y + j) * width + x]; } } __global__ void MatrixTranspose(float *dest, const float *src, const int M, const int N) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { dest[x * width + (y + j)] = src[(y + j) * width + x]; } } __global__ void MatrixTransposeCoalesced(float *dest, const float *src, const int M, const int N) { __shared__ float tile[TILE_DIM][TILE_DIM]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; // Load continuous src memory to TILE for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { tile[threadIdx.y + j][threadIdx.x] = src[(y + j) * width + x]; } __syncthreads(); // After remapping the index, store the TILE to continuous dest memory. x = blockIdx.y * TILE_DIM + threadIdx.x; y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { dest[(y + j) * width + x] = tile[threadIdx.x][threadIdx.y + j]; } } // Void Shared Memory Bank Conflicts __global__ void MatrixTransposeCoalescedBank(float *dest, const float *src, const int M, const int N) { __shared__ float tile[TILE_DIM][TILE_DIM + 1]; // just padding int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; // Load continuous src memory to TILE for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { tile[threadIdx.y + j][threadIdx.x] = src[(y + j) * width + x]; } __syncthreads(); // After remapping the index, store the TILE to continuous dest memory. x = blockIdx.y * TILE_DIM + threadIdx.x; y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { dest[(y + j) * width + x] = tile[threadIdx.x][threadIdx.y + j]; } } __global__ void VerifyDeviceResult(const float *A, const float *B, const int N, int *diff) { __shared__ int mydiff; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM && ((y + j) * width + x) < N; j += BLOCK_ROWS) { if (std::abs(A[(y + j) * width + x] - B[(y + j) * width + x]) >= 1e-5) { mydiff++; } } __syncthreads(); *diff += mydiff; } int main() { { const int M = 1024; const int N = 1024; std::vector<float> A_host; auto *A = CreateDeviceVector(M * N, &A_host, true); auto *A_copied = CreateDeviceVector(M * N, &A_host); std::vector<int> diff_; auto *diffv = CreateDeviceVector<int>(1, &diff_); dim3 grid(N / TILE_DIM, M / TILE_DIM); dim3 thread(TILE_DIM, BLOCK_ROWS); { CopyMatrix<<<grid, thread>>>(A_copied, A, M, N); VerifyDeviceResult<<<grid, thread>>>(A_copied, A, M * N, diffv); cudaMemcpy(diff_.data(), diffv, sizeof(int), cudaMemcpyDeviceToHost); std::cerr << "diff: " << diff_[0] << std::endl; } for (int i = 0; i < REPEAT; i++) { MatrixTranspose<<<grid, thread>>>(A_copied, A, M, N); } cudaDeviceSynchronize(); for (int i = 0; i < REPEAT; i++) { MatrixTransposeCoalesced<<<grid, thread>>>(A_copied, A, M, N); } cudaDeviceSynchronize(); for (int i = 0; i < REPEAT; i++) { MatrixTransposeCoalescedBank<<<grid, thread>>>(A_copied, A, M, N); } cudaDeviceSynchronize(); } return 0; }
500539c84a281f13d0796297a10a67f51ad76924.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <iostream> #include <thrust/device_vector.h> #include <thrust/transform_reduce.h> #include <thrust/reduce.h> #include <thrust/tuple.h> #include "BBComplex.h" #include "InputOutput.cuh" #include "Utilities.cuh" #include "Matlab_like.cuh" #include "Polynomials.cuh" #include "Synthesis.cuh" #include "NFFT2_2D.cuh" #include "NDFT2_2D.cuh" #include "PSO.cuh" #define pi 3.141592653589793238463 // --- Algorithm parameters #define freq ((14.25)*(1e9)) // Operating frequency //#define lambda ((3e8)/(freq)) // Wavelength extern const float lambda_f = (float) 3e8 / freq; // Wavelength extern const double lambda = (double)3e8 / freq; // Wavelength extern const float beta_f = 2. * pi / lambda; // Wavenumber extern const double beta = 2. * pi / lambda; // Wavenumber extern const int M_x = 44; // Number of reflectarray elements along the x-axis extern const int M_y = 44; // Number of reflectarray elements along the y-axis #define dx ((0.5)*(lambda)) // dist elem x (per array) #define dy ((0.5)*(lambda)) // dist elem y (per array) //#define aap (((M_x)-(1))*((dx)/(2))) // Reflectarray semi-dimension along the x-axis //#define bap (((M_y)-(1)))*((dy)/(2)) // Reflectarray semi-dimension along the y-axis extern const float aap = (float)(M_x - 1) * (dx / 2.f); // Reflectarray semi-dimension along the x-axis extern const float bap = (float)(M_y - 1) * (dy / 2.f); // Reflectarray semi-dimension along the y-axis extern const float mfact_f = 12.f; // Feed pattern: cos^mfact(theta) extern const double mfact = 12.; // Feed pattern: cos^mfact(theta) // ??? INUTILI ??? #define dmin ((0.51)*(lambda)) // Minimum allowed inter-element spacing #define dmax ((0.7 )*(lambda)) // Maximum allowed inter-element spacing #define dmin_x ((0.51)*(lambda)) // Minimum allowed inter-element spacing along the x-axis #define dmin_y ((0.51)*(lambda)) // Minimum allowed inter-element spacing along the y-axis #define dmax_x ((0.7 )*(lambda)) // Maximum allowed inter-element spacing along the x-axis #define dmax_y ((0.7 )*(lambda)) // Maximum allowed inter-element spacing along the y-axis #define z0 ((2)*(0.8)*(sqrt((aap)*(aap)+(bap)*(bap)))) // Focal length of the reflectarray surface extern const float feed_center_x_f = 0.f; extern const float feed_center_y_f = 1.15f * bap; extern const float feed_center_z_f = -z0; extern const double feed_center_x = 0.; extern const double feed_center_y = 1.15 * bap; extern const double feed_center_z = -z0; extern const float alfa_f = -atan(feed_center_y_f / feed_center_z_f);// Feed illumination angle extern const double alfa = -atan(feed_center_y / feed_center_z); // Feed illumination angle extern const int Num_unknowns_x = 5; // Number of unknowns for the element positions along the x-axis extern const int Num_unknowns_y = 5; // Number of unknowns for the element positions along the y-axis extern const int Num_unknowns_phases = 6; // Number of unknowns for the phase representation #define chi_u_prime 4 // Spectral oversampling factor along u #define chi_v_prime 4 // Spectral oversampling factor along v //#define a_prime ((chi_u_prime)*(aap)) //#define b_prime ((chi_v_prime)*(bap)) extern const float a_prime_f = (float)((chi_u_prime)*(aap)); extern const float b_prime_f = (float)((chi_v_prime)*(bap)); extern const double a_prime = ((chi_u_prime)*(aap)); extern const double b_prime = ((chi_v_prime)*(bap)); #define u_max ((beta)/(2.)) // Maximum value of the spectral region along the u axis #define u_min (-(beta)/(2.)) // Minimum value of the spectral region along the u axis #define v_max ((beta)/(2.)) // Maximum value of the spectral region along the v axis #define v_min (-(beta)/(2.)) // Minimum value of the spectral region along the v axis extern int Nu; extern int Nv; extern hipblasHandle_t cublasHandleSynthesis; extern float *d_U_discrete_f = NULL; extern float *d_V_discrete_f = NULL; extern float *d_Filter_f = NULL; extern float *d_LEG_f = NULL; extern float *d_ZERNIKE_f = NULL; extern float *d_Internal_Coverage_f = NULL; extern float *d_External_Coverage_f = NULL; extern double *d_U_discrete = NULL; extern double *d_V_discrete = NULL; extern double *d_Filter = NULL; extern double *d_LEG = NULL; extern double *d_ZERNIKE = NULL; extern double *d_Internal_Coverage = NULL; extern double *d_External_Coverage = NULL; extern float *h_U_discrete_f = NULL; extern float *h_V_discrete_f = NULL; extern float *h_Filter_f = NULL; extern float *h_LEG_f = NULL; extern float *h_ZERNIKE_f = NULL; extern float *h_Internal_Coverage_f = NULL; extern float *h_External_Coverage_f = NULL; extern double *h_U_discrete = NULL; extern double *h_V_discrete = NULL; extern double *h_Filter = NULL; extern double *h_LEG = NULL; extern double *h_ZERNIKE = NULL; extern double *h_Internal_Coverage = NULL; extern double *h_External_Coverage = NULL; #define DEBUG /********/ /* MAIN */ /********/ //int main() //{ // cublasSafeCall(hipblasCreate(&cublasHandleSynthesis)); // // // --- Defining spectral quantities // thrust::pair<thrust::pair<float *, float *>, float *> d_SpectralTuple = defineSpectralQuantities((float)u_max, (float)v_max, (float)a_prime_f, (float)b_prime_f, (float)beta, &Nu, &Nv); // thrust::pair<float *, float *> d_UV_discrete = d_SpectralTuple.first; // d_U_discrete_f = d_UV_discrete.first; // d_V_discrete_f = d_UV_discrete.second; // d_Filter_f = d_SpectralTuple.second; // // // --- Generating the (csi, eta) grid and the Legendre polynomials // thrust::pair<thrust::pair<float *, float *>, float *> d_LegendreTuple = generateLegendreFactorized<float>(Num_unknowns_x, Num_unknowns_y, M_x, M_y); // thrust::pair<float *, float *> d_CSI_ETA = d_LegendreTuple.first; // float *d_CSI = d_CSI_ETA.first; // float *d_ETA = d_CSI_ETA.second; // d_LEG_f = d_LegendreTuple.second; // // // --- Generating the Zernike polynomials // d_ZERNIKE_f = generateZernikep(d_CSI, d_ETA, Num_unknowns_phases, M_x, M_y); // // // --- Loading the masks // d_Internal_Coverage_f = loadGPUrealtxt("/home/angelo/cuda-workspace/ParticleSwarmSynthesis/Release/Internal_Coverage.txt", d_Internal_Coverage_f, (2 * Nu) * (2 * Nv)); // d_External_Coverage_f = loadGPUrealtxt("/home/angelo/cuda-workspace/ParticleSwarmSynthesis/Release/External_Coverage.txt", d_External_Coverage_f, (2 * Nu) * (2 * Nv)); // // /**********************/ // /* PSO INITIALIZATION */ // /**********************/ // h_PSO_Initialize(); // h_PSO_Optimize(); // // return 0; //} int main() { cublasSafeCall(hipblasCreate(&cublasHandleSynthesis)); // --- Defining spectral quantities thrust::pair<thrust::pair<float *, float *>, float *> d_SpectralTuple = defineSpectralQuantities((float)u_max, (float)v_max, (float)a_prime_f, (float)b_prime_f, (float)beta, &Nu, &Nv); thrust::pair<float *, float *> d_UV_discrete = d_SpectralTuple.first; d_U_discrete_f = d_UV_discrete.first; d_V_discrete_f = d_UV_discrete.second; d_Filter_f = d_SpectralTuple.second; thrust::pair<thrust::pair<float *, float *>, float *> h_SpectralTuple = h_defineSpectralQuantities((float)u_max, (float)v_max, (float)a_prime_f, (float)b_prime_f, (float)beta, &Nu, &Nv); thrust::pair<float *, float *> h_UV_discrete = h_SpectralTuple.first; h_U_discrete_f = h_UV_discrete.first; h_V_discrete_f = h_UV_discrete.second; h_Filter_f = h_SpectralTuple.second; // --- Generating the (csi, eta) grid and the Legendre polynomials thrust::pair<thrust::pair<float *, float *>, float *> d_LegendreTuple = generateLegendreFactorized<float>(Num_unknowns_x, Num_unknowns_y, M_x, M_y); thrust::pair<float *, float *> d_CSI_ETA = d_LegendreTuple.first; float *d_CSI = d_CSI_ETA.first; float *d_ETA = d_CSI_ETA.second; d_LEG_f = d_LegendreTuple.second; thrust::pair<thrust::pair<float *, float *>, float *> h_LegendreTuple = h_generateLegendreFactorized<float>(Num_unknowns_x, Num_unknowns_y, M_x, M_y); thrust::pair<float *, float *> h_CSI_ETA = h_LegendreTuple.first; float *h_CSI = h_CSI_ETA.first; float *h_ETA = h_CSI_ETA.second; h_LEG_f = h_LegendreTuple.second; // --- Generating the Zernike polynomials d_ZERNIKE_f = generateZernikep(d_CSI, d_ETA, Num_unknowns_phases, M_x, M_y); h_ZERNIKE_f = h_generateZernikep(h_CSI, h_ETA, Num_unknowns_phases, M_x, M_y); //saveGPUrealtxt(d_ZERNIKE_f, "C:\\Users\\angelo\\Documents\\CEM\\ParticleSwarm\\ParticleSwarmSynthesis\\ParticleSwarmSynthesisMatlab\\d_ZERNIKE_f.txt", Num_unknowns_phases * M_x * M_y); // //saveCPUrealtxt(h_ZERNIKE_f, "C:\\Users\\angelo\\Documents\\CEM\\ParticleSwarm\\ParticleSwarmSynthesis\\ParticleSwarmSynthesisMatlab\\h_ZERNIKE_f.txt", Num_unknowns_phases * M_x * M_y); //// --- Loading the masks //d_Internal_Coverage_f = loadGPUrealtxt("/home/angelo/cuda-workspace/ParticleSwarmSynthesis/Release/Internal_Coverage.txt", d_Internal_Coverage_f, (2 * Nu) * (2 * Nv)); //d_External_Coverage_f = loadGPUrealtxt("/home/angelo/cuda-workspace/ParticleSwarmSynthesis/Release/External_Coverage.txt", d_External_Coverage_f, (2 * Nu) * (2 * Nv)); ///**********************/ ///* PSO INITIALIZATION */ ///**********************/ //h_PSO_Initialize(); //h_PSO_Optimize(); return 0; }
500539c84a281f13d0796297a10a67f51ad76924.cu
#include <stdio.h> #include <math.h> #include <iostream> #include <thrust/device_vector.h> #include <thrust/transform_reduce.h> #include <thrust/reduce.h> #include <thrust/tuple.h> #include "BBComplex.h" #include "InputOutput.cuh" #include "Utilities.cuh" #include "Matlab_like.cuh" #include "Polynomials.cuh" #include "Synthesis.cuh" #include "NFFT2_2D.cuh" #include "NDFT2_2D.cuh" #include "PSO.cuh" #define pi 3.141592653589793238463 // --- Algorithm parameters #define freq ((14.25)*(1e9)) // Operating frequency //#define lambda ((3e8)/(freq)) // Wavelength extern const float lambda_f = (float) 3e8 / freq; // Wavelength extern const double lambda = (double)3e8 / freq; // Wavelength extern const float beta_f = 2. * pi / lambda; // Wavenumber extern const double beta = 2. * pi / lambda; // Wavenumber extern const int M_x = 44; // Number of reflectarray elements along the x-axis extern const int M_y = 44; // Number of reflectarray elements along the y-axis #define dx ((0.5)*(lambda)) // dist elem x (per array) #define dy ((0.5)*(lambda)) // dist elem y (per array) //#define aap (((M_x)-(1))*((dx)/(2))) // Reflectarray semi-dimension along the x-axis //#define bap (((M_y)-(1)))*((dy)/(2)) // Reflectarray semi-dimension along the y-axis extern const float aap = (float)(M_x - 1) * (dx / 2.f); // Reflectarray semi-dimension along the x-axis extern const float bap = (float)(M_y - 1) * (dy / 2.f); // Reflectarray semi-dimension along the y-axis extern const float mfact_f = 12.f; // Feed pattern: cos^mfact(theta) extern const double mfact = 12.; // Feed pattern: cos^mfact(theta) // ??? INUTILI ??? #define dmin ((0.51)*(lambda)) // Minimum allowed inter-element spacing #define dmax ((0.7 )*(lambda)) // Maximum allowed inter-element spacing #define dmin_x ((0.51)*(lambda)) // Minimum allowed inter-element spacing along the x-axis #define dmin_y ((0.51)*(lambda)) // Minimum allowed inter-element spacing along the y-axis #define dmax_x ((0.7 )*(lambda)) // Maximum allowed inter-element spacing along the x-axis #define dmax_y ((0.7 )*(lambda)) // Maximum allowed inter-element spacing along the y-axis #define z0 ((2)*(0.8)*(sqrt((aap)*(aap)+(bap)*(bap)))) // Focal length of the reflectarray surface extern const float feed_center_x_f = 0.f; extern const float feed_center_y_f = 1.15f * bap; extern const float feed_center_z_f = -z0; extern const double feed_center_x = 0.; extern const double feed_center_y = 1.15 * bap; extern const double feed_center_z = -z0; extern const float alfa_f = -atan(feed_center_y_f / feed_center_z_f);// Feed illumination angle extern const double alfa = -atan(feed_center_y / feed_center_z); // Feed illumination angle extern const int Num_unknowns_x = 5; // Number of unknowns for the element positions along the x-axis extern const int Num_unknowns_y = 5; // Number of unknowns for the element positions along the y-axis extern const int Num_unknowns_phases = 6; // Number of unknowns for the phase representation #define chi_u_prime 4 // Spectral oversampling factor along u #define chi_v_prime 4 // Spectral oversampling factor along v //#define a_prime ((chi_u_prime)*(aap)) //#define b_prime ((chi_v_prime)*(bap)) extern const float a_prime_f = (float)((chi_u_prime)*(aap)); extern const float b_prime_f = (float)((chi_v_prime)*(bap)); extern const double a_prime = ((chi_u_prime)*(aap)); extern const double b_prime = ((chi_v_prime)*(bap)); #define u_max ((beta)/(2.)) // Maximum value of the spectral region along the u axis #define u_min (-(beta)/(2.)) // Minimum value of the spectral region along the u axis #define v_max ((beta)/(2.)) // Maximum value of the spectral region along the v axis #define v_min (-(beta)/(2.)) // Minimum value of the spectral region along the v axis extern int Nu; extern int Nv; extern cublasHandle_t cublasHandleSynthesis; extern float *d_U_discrete_f = NULL; extern float *d_V_discrete_f = NULL; extern float *d_Filter_f = NULL; extern float *d_LEG_f = NULL; extern float *d_ZERNIKE_f = NULL; extern float *d_Internal_Coverage_f = NULL; extern float *d_External_Coverage_f = NULL; extern double *d_U_discrete = NULL; extern double *d_V_discrete = NULL; extern double *d_Filter = NULL; extern double *d_LEG = NULL; extern double *d_ZERNIKE = NULL; extern double *d_Internal_Coverage = NULL; extern double *d_External_Coverage = NULL; extern float *h_U_discrete_f = NULL; extern float *h_V_discrete_f = NULL; extern float *h_Filter_f = NULL; extern float *h_LEG_f = NULL; extern float *h_ZERNIKE_f = NULL; extern float *h_Internal_Coverage_f = NULL; extern float *h_External_Coverage_f = NULL; extern double *h_U_discrete = NULL; extern double *h_V_discrete = NULL; extern double *h_Filter = NULL; extern double *h_LEG = NULL; extern double *h_ZERNIKE = NULL; extern double *h_Internal_Coverage = NULL; extern double *h_External_Coverage = NULL; #define DEBUG /********/ /* MAIN */ /********/ //int main() //{ // cublasSafeCall(cublasCreate(&cublasHandleSynthesis)); // // // --- Defining spectral quantities // thrust::pair<thrust::pair<float *, float *>, float *> d_SpectralTuple = defineSpectralQuantities((float)u_max, (float)v_max, (float)a_prime_f, (float)b_prime_f, (float)beta, &Nu, &Nv); // thrust::pair<float *, float *> d_UV_discrete = d_SpectralTuple.first; // d_U_discrete_f = d_UV_discrete.first; // d_V_discrete_f = d_UV_discrete.second; // d_Filter_f = d_SpectralTuple.second; // // // --- Generating the (csi, eta) grid and the Legendre polynomials // thrust::pair<thrust::pair<float *, float *>, float *> d_LegendreTuple = generateLegendreFactorized<float>(Num_unknowns_x, Num_unknowns_y, M_x, M_y); // thrust::pair<float *, float *> d_CSI_ETA = d_LegendreTuple.first; // float *d_CSI = d_CSI_ETA.first; // float *d_ETA = d_CSI_ETA.second; // d_LEG_f = d_LegendreTuple.second; // // // --- Generating the Zernike polynomials // d_ZERNIKE_f = generateZernikep(d_CSI, d_ETA, Num_unknowns_phases, M_x, M_y); // // // --- Loading the masks // d_Internal_Coverage_f = loadGPUrealtxt("/home/angelo/cuda-workspace/ParticleSwarmSynthesis/Release/Internal_Coverage.txt", d_Internal_Coverage_f, (2 * Nu) * (2 * Nv)); // d_External_Coverage_f = loadGPUrealtxt("/home/angelo/cuda-workspace/ParticleSwarmSynthesis/Release/External_Coverage.txt", d_External_Coverage_f, (2 * Nu) * (2 * Nv)); // // /**********************/ // /* PSO INITIALIZATION */ // /**********************/ // h_PSO_Initialize(); // h_PSO_Optimize(); // // return 0; //} int main() { cublasSafeCall(cublasCreate(&cublasHandleSynthesis)); // --- Defining spectral quantities thrust::pair<thrust::pair<float *, float *>, float *> d_SpectralTuple = defineSpectralQuantities((float)u_max, (float)v_max, (float)a_prime_f, (float)b_prime_f, (float)beta, &Nu, &Nv); thrust::pair<float *, float *> d_UV_discrete = d_SpectralTuple.first; d_U_discrete_f = d_UV_discrete.first; d_V_discrete_f = d_UV_discrete.second; d_Filter_f = d_SpectralTuple.second; thrust::pair<thrust::pair<float *, float *>, float *> h_SpectralTuple = h_defineSpectralQuantities((float)u_max, (float)v_max, (float)a_prime_f, (float)b_prime_f, (float)beta, &Nu, &Nv); thrust::pair<float *, float *> h_UV_discrete = h_SpectralTuple.first; h_U_discrete_f = h_UV_discrete.first; h_V_discrete_f = h_UV_discrete.second; h_Filter_f = h_SpectralTuple.second; // --- Generating the (csi, eta) grid and the Legendre polynomials thrust::pair<thrust::pair<float *, float *>, float *> d_LegendreTuple = generateLegendreFactorized<float>(Num_unknowns_x, Num_unknowns_y, M_x, M_y); thrust::pair<float *, float *> d_CSI_ETA = d_LegendreTuple.first; float *d_CSI = d_CSI_ETA.first; float *d_ETA = d_CSI_ETA.second; d_LEG_f = d_LegendreTuple.second; thrust::pair<thrust::pair<float *, float *>, float *> h_LegendreTuple = h_generateLegendreFactorized<float>(Num_unknowns_x, Num_unknowns_y, M_x, M_y); thrust::pair<float *, float *> h_CSI_ETA = h_LegendreTuple.first; float *h_CSI = h_CSI_ETA.first; float *h_ETA = h_CSI_ETA.second; h_LEG_f = h_LegendreTuple.second; // --- Generating the Zernike polynomials d_ZERNIKE_f = generateZernikep(d_CSI, d_ETA, Num_unknowns_phases, M_x, M_y); h_ZERNIKE_f = h_generateZernikep(h_CSI, h_ETA, Num_unknowns_phases, M_x, M_y); //saveGPUrealtxt(d_ZERNIKE_f, "C:\\Users\\angelo\\Documents\\CEM\\ParticleSwarm\\ParticleSwarmSynthesis\\ParticleSwarmSynthesisMatlab\\d_ZERNIKE_f.txt", Num_unknowns_phases * M_x * M_y); // //saveCPUrealtxt(h_ZERNIKE_f, "C:\\Users\\angelo\\Documents\\CEM\\ParticleSwarm\\ParticleSwarmSynthesis\\ParticleSwarmSynthesisMatlab\\h_ZERNIKE_f.txt", Num_unknowns_phases * M_x * M_y); //// --- Loading the masks //d_Internal_Coverage_f = loadGPUrealtxt("/home/angelo/cuda-workspace/ParticleSwarmSynthesis/Release/Internal_Coverage.txt", d_Internal_Coverage_f, (2 * Nu) * (2 * Nv)); //d_External_Coverage_f = loadGPUrealtxt("/home/angelo/cuda-workspace/ParticleSwarmSynthesis/Release/External_Coverage.txt", d_External_Coverage_f, (2 * Nu) * (2 * Nv)); ///**********************/ ///* PSO INITIALIZATION */ ///**********************/ //h_PSO_Initialize(); //h_PSO_Optimize(); return 0; }
88754ec04e1c0d730699ee16ba284bfd6dee4e89.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "World.h" #include <cstdio> __global__ void ClearMaterials(Material **ptr) { int idx = threadIdx.x; delete ptr[idx]; } __global__ void ClearObjects(GObject **ptr) { int idx = threadIdx.x; delete ptr[idx]; } __global__ void ClearLights(Light **ptr) { int idx = threadIdx.x; delete ptr[idx]; } __global__ void ClearGPU(World *ptr) { delete ptr->camera; delete ptr->ray_tracer; delete ptr->pixel_sampler; delete ptr->ambient_ptr; free(ptr->scene_lights); free(ptr->scene_objs); free(ptr->scene_materials); } void clearScene(World *wr) { printf("Destroying scene\n"); fflush(stdout); free(wr->image); if(wr->num_of_objects > 0) { printf("\t-Objects\n"); fflush(stdout); hipLaunchKernelGGL(( ClearObjects) , dim3(1), dim3(wr->num_of_objects) , 0, 0, wr->scene_objs); } SYNC_AND_CHECK_CUDA_ERRORS; if (wr->num_of_materials > 0) { printf("\t-Materials\n"); fflush(stdout); hipLaunchKernelGGL(( ClearMaterials) , dim3(1), dim3(wr->num_of_materials) , 0, 0, wr->scene_materials); } SYNC_AND_CHECK_CUDA_ERRORS; if (wr->num_of_lights > 0) { printf("\t-Lights\n"); fflush(stdout); hipLaunchKernelGGL(( ClearLights) , dim3(1), dim3(wr->num_of_lights) , 0, 0, wr->scene_lights); } SYNC_AND_CHECK_CUDA_ERRORS; hipLaunchKernelGGL(( ClearGPU) , dim3(1), dim3(1) , 0, 0, wr); SYNC_AND_CHECK_CUDA_ERRORS; hipFree(wr); SYNC_AND_CHECK_CUDA_ERRORS; printf("\tOK\n"); fflush(stdout); }
88754ec04e1c0d730699ee16ba284bfd6dee4e89.cu
#include "World.h" #include <cstdio> __global__ void ClearMaterials(Material **ptr) { int idx = threadIdx.x; delete ptr[idx]; } __global__ void ClearObjects(GObject **ptr) { int idx = threadIdx.x; delete ptr[idx]; } __global__ void ClearLights(Light **ptr) { int idx = threadIdx.x; delete ptr[idx]; } __global__ void ClearGPU(World *ptr) { delete ptr->camera; delete ptr->ray_tracer; delete ptr->pixel_sampler; delete ptr->ambient_ptr; free(ptr->scene_lights); free(ptr->scene_objs); free(ptr->scene_materials); } void clearScene(World *wr) { printf("Destroying scene\n"); fflush(stdout); free(wr->image); if(wr->num_of_objects > 0) { printf("\t-Objects\n"); fflush(stdout); ClearObjects <<< 1, wr->num_of_objects >>> (wr->scene_objs); } SYNC_AND_CHECK_CUDA_ERRORS; if (wr->num_of_materials > 0) { printf("\t-Materials\n"); fflush(stdout); ClearMaterials <<< 1, wr->num_of_materials >>> (wr->scene_materials); } SYNC_AND_CHECK_CUDA_ERRORS; if (wr->num_of_lights > 0) { printf("\t-Lights\n"); fflush(stdout); ClearLights <<< 1, wr->num_of_lights >>> (wr->scene_lights); } SYNC_AND_CHECK_CUDA_ERRORS; ClearGPU <<< 1, 1 >>> (wr); SYNC_AND_CHECK_CUDA_ERRORS; cudaFree(wr); SYNC_AND_CHECK_CUDA_ERRORS; printf("\tOK\n"); fflush(stdout); }
54c135a105a93578c9278dfb4be85c5defc28a1a.hip
// !!! This is a file automatically generated by hipify!!! #include "debug.h" #include "ray.cuh" #include "sphere_hip.cuh" #include "scene.cuh" #include "importer.cuh" #define GLM_FORCE_CUDA #include <glm/glm.hpp> #define STB_IMAGE_WRITE_IMPLEMENTATION #define STBI_MSC_SECURE_CRT #include <stb/stb_image_write.h> #include <hiprand/hiprand_kernel.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <iostream> const int width = 500; const int height = 500; const int buffer_size = width * height * 3; const int half_width = width / 2; const int half_height = height / 2; const int shadow_sample_count = 10; const int max_depth = 10; __device__ float compute_refl_coefficient(const Ray & ray, const CollisionData & data, float ni) { float cos_angle = glm::dot(-ray.direction(), data.mNormal); float ni_over_nt = ni / data.mMaterial.mRefractionIndex; float inside_sqrt = 1.0f - ni_over_nt * ni_over_nt * (1.0f - cos_angle * cos_angle); if (inside_sqrt < 0.0f) return 1.0f; float square_root = std::sqrt(inside_sqrt); float perpendicular = (ni_over_nt * cos_angle - square_root) / (ni_over_nt * cos_angle + square_root); float parallel = (cos_angle - ni_over_nt * square_root) / (cos_angle + ni_over_nt * square_root); return 0.5f * (perpendicular * perpendicular + parallel * parallel); } __device__ glm::vec3 snells_law(const Ray & ray, const CollisionData & data, float ni) { glm::vec3 incident = -ray.direction(); float dot_incident_normal = glm::dot(incident, data.mNormal); float ni_over_nt = ni / data.mMaterial.mRefractionIndex; float inside_sqrt = 1.0f - (ni_over_nt * ni_over_nt * (1.0f - dot_incident_normal * dot_incident_normal)); float cos_angle = std::sqrt(inside_sqrt); if (dot_incident_normal >= 0.0f) cos_angle = -cos_angle; // Compute refracted ray return glm::normalize((cos_angle + ni_over_nt * dot_incident_normal) * data.mNormal - ni_over_nt * incident); } __device__ float compute_lit_percentage(const vector<Surface *> & surfaces, const PointLight & light, const glm::vec3 & intersection_point, hiprandState_t * random_state) { CollisionData dummy; int shadow_count = 0; // Soft shadows for (unsigned current_sample = 0; current_sample < shadow_sample_count; ++current_sample) { Ray shadow_ray{ intersection_point, light.rand_pos(random_state) - intersection_point }; // Check for collisions, if there's any, we are in shadow for (int j = 0; j < surfaces.size(); ++j) { if (surfaces[j]->collide(shadow_ray, 0.001f, 1.0f, dummy)) { shadow_count++; break; } } } // Compute how much the surface is lit return 1.0f - static_cast<float>(shadow_count) / static_cast<float>(shadow_sample_count); } struct RayData { Ray mRay; float mAttenuationInv; float mNi; int mDepth; }; __device__ glm::vec3 cast_ray(float x, float y, Scene * scene, hiprandState_t * random_state) { float current_x = (static_cast<float>(x) + 0.5f - half_width) / half_width; float current_y = -(static_cast<float>(y) + 0.5f - half_height) / half_height; const Camera & camera = scene->camera(); glm::vec3 pixel_position = camera.projection_center() + camera.right() * current_x + camera.up() * current_y; glm::vec3 final_color{ 0.0f }; // Vector to know how many rays we have left since recursion will lead to stackoverflow, we need an iterative mode // We start with the initial ray casted from the camera to the pixel coordinate vector<RayData> ray_stack; ray_stack.push_back(RayData{ Ray{ camera.position(), glm::normalize(pixel_position - camera.position()) }, 1.0f, 1.0f, 0 }); while(ray_stack.empty() == false) { RayData ray_data = ray_stack.back(); ray_stack.pop_back(); // Check for collisions CollisionData collision_data; const vector<Surface *> & surfaces = scene->surfaces(); for (int i = 0; i < surfaces.size(); ++i) surfaces[i]->collide(ray_data.mRay, 0.0f, collision_data.mT, collision_data); // Hit nothing, next ray if (collision_data.mT == FLT_MAX) continue; // Intersection point and reflected direction glm::vec3 intersection_point = ray_data.mRay.at(collision_data.mT); glm::vec3 reflected = glm::reflect(ray_data.mRay.direction(), collision_data.mNormal); // Ambient final_color += collision_data.mMaterial.mColor * scene->ambient() * ray_data.mAttenuationInv; // Local illumination const vector<PointLight> & lights = scene->lights(); for (int i = 0; i < lights.size(); ++i) { // Check if surface is lit float lit = compute_lit_percentage(surfaces, lights[i], intersection_point, random_state); if (lit == 0.0f) continue; // Diffuse glm::vec3 to_light = glm::normalize(lights[i].position() - intersection_point); float cos_angle = glm::dot(to_light, collision_data.mNormal); if (cos_angle < 0.0f) continue; final_color += collision_data.mMaterial.mColor * lights[i].intensity() * cos_angle * ray_data.mAttenuationInv * lit; // Specular cos_angle = glm::dot(reflected, to_light); if (cos_angle > 0.0f) { float specular_value = powf(cos_angle, collision_data.mMaterial.mShininess) * collision_data.mMaterial.mSpecularCoefficient; final_color += lights[i].intensity() * specular_value * ray_data.mAttenuationInv * lit; } } // Getting out of the object after refraction if (ray_data.mNi != 1.0f) collision_data.mMaterial.mRefractionIndex = 1.0f; if (glm::dot(-ray_data.mRay.direction(), collision_data.mNormal) < 0.0f) collision_data.mNormal = -collision_data.mNormal; // Keep computing color until max depth int next_depth = ray_data.mDepth + 1; if (next_depth < max_depth) { float reflection_coefficient = compute_refl_coefficient(ray_data.mRay, collision_data, ray_data.mNi); float transmission_coefficient = (1.0f - reflection_coefficient) * collision_data.mMaterial.mSpecularCoefficient; reflection_coefficient *= collision_data.mMaterial.mSpecularCoefficient; // Reflection if (reflection_coefficient > 0.0f) { Ray reflected_ray{ intersection_point + collision_data.mNormal * 0.001f, reflected }; float next_attenuation = ray_data.mAttenuationInv * reflection_coefficient; ray_stack.push_back(RayData{ reflected_ray, next_attenuation, ray_data.mNi, next_depth }); } // Transmission if (transmission_coefficient > 0.0f) { Ray refracted_ray{ intersection_point - collision_data.mNormal * 0.001f, snells_law(ray_data.mRay, collision_data, ray_data.mNi) }; float next_attenuation = ray_data.mAttenuationInv * transmission_coefficient; ray_stack.push_back(RayData{ refracted_ray, next_attenuation, collision_data.mMaterial.mRefractionIndex, next_depth }); } } } return glm::min(final_color, glm::vec3{ 1.0f }); } __global__ void render_image(unsigned char * image_data, int width, int height, Scene * scene) { // Get coordinates from block and thread indices int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= width || y >= height) return; int pixel_index = y * width * 3 + x * 3; // Random state for pseudo random number generator hiprandState_t random_state; hiprand_init(1997, pixel_index, 0, &random_state); // Compute and store color glm::vec3 color = cast_ray(static_cast<float>(x), static_cast<float>(y), scene, &random_state); image_data[pixel_index] = static_cast<unsigned char>(color.r * 255.99f); image_data[pixel_index + 1] = static_cast<unsigned char>(color.g * 255.99f); image_data[pixel_index + 2] = static_cast<unsigned char>(color.b * 255.99f); } __global__ void initialize_scene(Scene * scene) { new (scene) Scene{}; } __global__ void destroy_scene(Scene * scene) { scene->~Scene(); } int main() { // Scene creation Scene * scene = nullptr; CheckCUDAError(hipMalloc((void **)&scene, sizeof(Scene))); hipLaunchKernelGGL(( initialize_scene), dim3(1),dim3(1), 0, 0, scene); CheckCUDAError(hipGetLastError()); CheckCUDAError(hipDeviceSynchronize()); importer::import_scene("scene.txt", scene); // Allocate memory in shared memory unsigned char * image_data = nullptr; CheckCUDAError(hipMallocManaged((void **)&image_data, buffer_size)); // Compute needed blocks for the whole image dim3 threads(8, 8); dim3 blocks(width / threads.x + 1, height / threads.y + 1); // Render image hipLaunchKernelGGL(( render_image), dim3(blocks),dim3(threads), 0, 0, image_data, width, height, scene); CheckCUDAError(hipGetLastError()); CheckCUDAError(hipDeviceSynchronize()); // Store color stbi_write_png("MyOutput.png", width, height, 3, image_data, 0); // Free memory hipFree(image_data); hipLaunchKernelGGL(( destroy_scene), dim3(1),dim3(1), 0, 0, scene); CheckCUDAError(hipGetLastError()); CheckCUDAError(hipDeviceSynchronize()); hipFree(scene); return 0; }
54c135a105a93578c9278dfb4be85c5defc28a1a.cu
#include "debug.h" #include "ray.cuh" #include "sphere.cuh" #include "scene.cuh" #include "importer.cuh" #define GLM_FORCE_CUDA #include <glm/glm.hpp> #define STB_IMAGE_WRITE_IMPLEMENTATION #define STBI_MSC_SECURE_CRT #include <stb/stb_image_write.h> #include <curand_kernel.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <iostream> const int width = 500; const int height = 500; const int buffer_size = width * height * 3; const int half_width = width / 2; const int half_height = height / 2; const int shadow_sample_count = 10; const int max_depth = 10; __device__ float compute_refl_coefficient(const Ray & ray, const CollisionData & data, float ni) { float cos_angle = glm::dot(-ray.direction(), data.mNormal); float ni_over_nt = ni / data.mMaterial.mRefractionIndex; float inside_sqrt = 1.0f - ni_over_nt * ni_over_nt * (1.0f - cos_angle * cos_angle); if (inside_sqrt < 0.0f) return 1.0f; float square_root = std::sqrt(inside_sqrt); float perpendicular = (ni_over_nt * cos_angle - square_root) / (ni_over_nt * cos_angle + square_root); float parallel = (cos_angle - ni_over_nt * square_root) / (cos_angle + ni_over_nt * square_root); return 0.5f * (perpendicular * perpendicular + parallel * parallel); } __device__ glm::vec3 snells_law(const Ray & ray, const CollisionData & data, float ni) { glm::vec3 incident = -ray.direction(); float dot_incident_normal = glm::dot(incident, data.mNormal); float ni_over_nt = ni / data.mMaterial.mRefractionIndex; float inside_sqrt = 1.0f - (ni_over_nt * ni_over_nt * (1.0f - dot_incident_normal * dot_incident_normal)); float cos_angle = std::sqrt(inside_sqrt); if (dot_incident_normal >= 0.0f) cos_angle = -cos_angle; // Compute refracted ray return glm::normalize((cos_angle + ni_over_nt * dot_incident_normal) * data.mNormal - ni_over_nt * incident); } __device__ float compute_lit_percentage(const vector<Surface *> & surfaces, const PointLight & light, const glm::vec3 & intersection_point, curandState * random_state) { CollisionData dummy; int shadow_count = 0; // Soft shadows for (unsigned current_sample = 0; current_sample < shadow_sample_count; ++current_sample) { Ray shadow_ray{ intersection_point, light.rand_pos(random_state) - intersection_point }; // Check for collisions, if there's any, we are in shadow for (int j = 0; j < surfaces.size(); ++j) { if (surfaces[j]->collide(shadow_ray, 0.001f, 1.0f, dummy)) { shadow_count++; break; } } } // Compute how much the surface is lit return 1.0f - static_cast<float>(shadow_count) / static_cast<float>(shadow_sample_count); } struct RayData { Ray mRay; float mAttenuationInv; float mNi; int mDepth; }; __device__ glm::vec3 cast_ray(float x, float y, Scene * scene, curandState * random_state) { float current_x = (static_cast<float>(x) + 0.5f - half_width) / half_width; float current_y = -(static_cast<float>(y) + 0.5f - half_height) / half_height; const Camera & camera = scene->camera(); glm::vec3 pixel_position = camera.projection_center() + camera.right() * current_x + camera.up() * current_y; glm::vec3 final_color{ 0.0f }; // Vector to know how many rays we have left since recursion will lead to stackoverflow, we need an iterative mode // We start with the initial ray casted from the camera to the pixel coordinate vector<RayData> ray_stack; ray_stack.push_back(RayData{ Ray{ camera.position(), glm::normalize(pixel_position - camera.position()) }, 1.0f, 1.0f, 0 }); while(ray_stack.empty() == false) { RayData ray_data = ray_stack.back(); ray_stack.pop_back(); // Check for collisions CollisionData collision_data; const vector<Surface *> & surfaces = scene->surfaces(); for (int i = 0; i < surfaces.size(); ++i) surfaces[i]->collide(ray_data.mRay, 0.0f, collision_data.mT, collision_data); // Hit nothing, next ray if (collision_data.mT == FLT_MAX) continue; // Intersection point and reflected direction glm::vec3 intersection_point = ray_data.mRay.at(collision_data.mT); glm::vec3 reflected = glm::reflect(ray_data.mRay.direction(), collision_data.mNormal); // Ambient final_color += collision_data.mMaterial.mColor * scene->ambient() * ray_data.mAttenuationInv; // Local illumination const vector<PointLight> & lights = scene->lights(); for (int i = 0; i < lights.size(); ++i) { // Check if surface is lit float lit = compute_lit_percentage(surfaces, lights[i], intersection_point, random_state); if (lit == 0.0f) continue; // Diffuse glm::vec3 to_light = glm::normalize(lights[i].position() - intersection_point); float cos_angle = glm::dot(to_light, collision_data.mNormal); if (cos_angle < 0.0f) continue; final_color += collision_data.mMaterial.mColor * lights[i].intensity() * cos_angle * ray_data.mAttenuationInv * lit; // Specular cos_angle = glm::dot(reflected, to_light); if (cos_angle > 0.0f) { float specular_value = powf(cos_angle, collision_data.mMaterial.mShininess) * collision_data.mMaterial.mSpecularCoefficient; final_color += lights[i].intensity() * specular_value * ray_data.mAttenuationInv * lit; } } // Getting out of the object after refraction if (ray_data.mNi != 1.0f) collision_data.mMaterial.mRefractionIndex = 1.0f; if (glm::dot(-ray_data.mRay.direction(), collision_data.mNormal) < 0.0f) collision_data.mNormal = -collision_data.mNormal; // Keep computing color until max depth int next_depth = ray_data.mDepth + 1; if (next_depth < max_depth) { float reflection_coefficient = compute_refl_coefficient(ray_data.mRay, collision_data, ray_data.mNi); float transmission_coefficient = (1.0f - reflection_coefficient) * collision_data.mMaterial.mSpecularCoefficient; reflection_coefficient *= collision_data.mMaterial.mSpecularCoefficient; // Reflection if (reflection_coefficient > 0.0f) { Ray reflected_ray{ intersection_point + collision_data.mNormal * 0.001f, reflected }; float next_attenuation = ray_data.mAttenuationInv * reflection_coefficient; ray_stack.push_back(RayData{ reflected_ray, next_attenuation, ray_data.mNi, next_depth }); } // Transmission if (transmission_coefficient > 0.0f) { Ray refracted_ray{ intersection_point - collision_data.mNormal * 0.001f, snells_law(ray_data.mRay, collision_data, ray_data.mNi) }; float next_attenuation = ray_data.mAttenuationInv * transmission_coefficient; ray_stack.push_back(RayData{ refracted_ray, next_attenuation, collision_data.mMaterial.mRefractionIndex, next_depth }); } } } return glm::min(final_color, glm::vec3{ 1.0f }); } __global__ void render_image(unsigned char * image_data, int width, int height, Scene * scene) { // Get coordinates from block and thread indices int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= width || y >= height) return; int pixel_index = y * width * 3 + x * 3; // Random state for pseudo random number generator curandState random_state; curand_init(1997, pixel_index, 0, &random_state); // Compute and store color glm::vec3 color = cast_ray(static_cast<float>(x), static_cast<float>(y), scene, &random_state); image_data[pixel_index] = static_cast<unsigned char>(color.r * 255.99f); image_data[pixel_index + 1] = static_cast<unsigned char>(color.g * 255.99f); image_data[pixel_index + 2] = static_cast<unsigned char>(color.b * 255.99f); } __global__ void initialize_scene(Scene * scene) { new (scene) Scene{}; } __global__ void destroy_scene(Scene * scene) { scene->~Scene(); } int main() { // Scene creation Scene * scene = nullptr; CheckCUDAError(cudaMalloc((void **)&scene, sizeof(Scene))); initialize_scene<<<1,1>>>(scene); CheckCUDAError(cudaGetLastError()); CheckCUDAError(cudaDeviceSynchronize()); importer::import_scene("scene.txt", scene); // Allocate memory in shared memory unsigned char * image_data = nullptr; CheckCUDAError(cudaMallocManaged((void **)&image_data, buffer_size)); // Compute needed blocks for the whole image dim3 threads(8, 8); dim3 blocks(width / threads.x + 1, height / threads.y + 1); // Render image render_image<<<blocks,threads>>>(image_data, width, height, scene); CheckCUDAError(cudaGetLastError()); CheckCUDAError(cudaDeviceSynchronize()); // Store color stbi_write_png("MyOutput.png", width, height, 3, image_data, 0); // Free memory cudaFree(image_data); destroy_scene<<<1,1>>>(scene); CheckCUDAError(cudaGetLastError()); CheckCUDAError(cudaDeviceSynchronize()); cudaFree(scene); return 0; }
92810193c4ceed63ec20717c08cf356948f72276.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); int deviceCount = 0; hipError_t error_id = hipGetDeviceCount(&deviceCount); if (error_id != hipSuccess){ printf("hipGetDeviceCount returned %d\n -> %s\n", (int)error_id, hipGetErrorString(error_id)); printf("Result = FALL\n"); exit(EXIT_FAILURE); } if (deviceCount == 0){ printf("Threr are no available device(s) that support CUDA\n"); } else { printf("Detected %d CUDA Capable device(s)\n", deviceCount); } int maxDevice = 0; if (deviceCount > 1){ int maxMultiprocessors = 0; for (int device = 0; device < deviceCount; ++device) { struct hipDeviceProp_t props; hipGetDeviceProperties(&props, device); if (maxMultiprocessors < props.multiProcessorCount){ maxMultiprocessors = props.multiProcessorCount; maxDevice = device; } } } int driverVersion = 0, runtimeVersion = 0; hipSetDevice(maxDevice); struct hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, maxDevice); printf("Device %d: '%s' \n", maxDevice, deviceProp.name); hipDriverGetVersion(&driverVersion); hipRuntimeGetVersion(&runtimeVersion); printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10); printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor); printf(" Total amount of global memory: %.2f MBytes (%llu bytes)\n", (float)deviceProp.totalGlobalMem/(pow(1024.0,3)), (unsigned long long) deviceProp.totalGlobalMem); printf(" GPU Clock rate: %.0f MHz (%0.2f GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f); printf(" Memory Clock rate: %0.f MHz\n", deviceProp.memoryClockRate * 1e-3f); printf(" Memory Bus Width: %d-bit\n", deviceProp.memoryBusWidth); if (deviceProp.l2CacheSize){ printf(" L2 Cache Size: %d bytes\n", deviceProp.l2CacheSize); } printf(" Max Texture Dimension Size(x,y,z) 1D=(%d), 2D=(%d,%d), 3D=(%d,%d,%d)\n", deviceProp.maxTexture1D, deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1], deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]); printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, 2D=(%d,%d) x %d\n", deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1], deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]); printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem); printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock); printf(" Total amount of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Wrap size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Maximum size of each dimension of a block: %d x %d x %d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Maximum size of each dimension of a grid: %d x %d x %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" Maximum memory pitch: %lu bytes\n", deviceProp.memPitch); exit(EXIT_SUCCESS); return 0; }
92810193c4ceed63ec20717c08cf356948f72276.cu
#include <stdio.h> #include <cuda_runtime.h> int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); int deviceCount = 0; cudaError_t error_id = cudaGetDeviceCount(&deviceCount); if (error_id != cudaSuccess){ printf("cudaGetDeviceCount returned %d\n -> %s\n", (int)error_id, cudaGetErrorString(error_id)); printf("Result = FALL\n"); exit(EXIT_FAILURE); } if (deviceCount == 0){ printf("Threr are no available device(s) that support CUDA\n"); } else { printf("Detected %d CUDA Capable device(s)\n", deviceCount); } int maxDevice = 0; if (deviceCount > 1){ int maxMultiprocessors = 0; for (int device = 0; device < deviceCount; ++device) { struct cudaDeviceProp props; cudaGetDeviceProperties(&props, device); if (maxMultiprocessors < props.multiProcessorCount){ maxMultiprocessors = props.multiProcessorCount; maxDevice = device; } } } int driverVersion = 0, runtimeVersion = 0; cudaSetDevice(maxDevice); struct cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, maxDevice); printf("Device %d: '%s' \n", maxDevice, deviceProp.name); cudaDriverGetVersion(&driverVersion); cudaRuntimeGetVersion(&runtimeVersion); printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10); printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor); printf(" Total amount of global memory: %.2f MBytes (%llu bytes)\n", (float)deviceProp.totalGlobalMem/(pow(1024.0,3)), (unsigned long long) deviceProp.totalGlobalMem); printf(" GPU Clock rate: %.0f MHz (%0.2f GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f); printf(" Memory Clock rate: %0.f MHz\n", deviceProp.memoryClockRate * 1e-3f); printf(" Memory Bus Width: %d-bit\n", deviceProp.memoryBusWidth); if (deviceProp.l2CacheSize){ printf(" L2 Cache Size: %d bytes\n", deviceProp.l2CacheSize); } printf(" Max Texture Dimension Size(x,y,z) 1D=(%d), 2D=(%d,%d), 3D=(%d,%d,%d)\n", deviceProp.maxTexture1D, deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1], deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]); printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, 2D=(%d,%d) x %d\n", deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1], deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]); printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem); printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock); printf(" Total amount of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Wrap size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Maximum size of each dimension of a block: %d x %d x %d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Maximum size of each dimension of a grid: %d x %d x %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" Maximum memory pitch: %lu bytes\n", deviceProp.memPitch); exit(EXIT_SUCCESS); return 0; }
ea6541d5933c124a086bc34592e549f4a9cd5923.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Program to perform ADI time-marching on a regular 3D grid // #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cutil_inline.h> //////////////////////////////////////////////////////////////////////// // define kernel block size for //////////////////////////////////////////////////////////////////////// //#define BLOCK_X 32 //#define BLOCK_Y 4 #define BLOCK_X 32 #define BLOCK_Y 8 //////////////////////////////////////////////////////////////////////// // include kernel function //////////////////////////////////////////////////////////////////////// #include <adi3d_kernel.cu> //////////////////////////////////////////////////////////////////////// // declare Gold routine //////////////////////////////////////////////////////////////////////// void Gold_adi(int, int, int, float, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*); //////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////// int main(int argc, char **argv){ // 'h_' prefix - CPU (host) memory space int NX=256, NY=256, NZ=256, REPEAT=10, i, j, k, ind, printout=0; float *h_u1, *h_u2, *h_du, *h_ax, *h_bx, *h_cx, *h_ay, *h_by, *h_cy, *h_az, *h_bz, *h_cz, err, lam=1.0f; double timer, elapsed; // 'd_' prefix - GPU (device) memory space float *d_u, *d_du, *d_ax, *d_bx, *d_cx, *d_ay, *d_by, *d_cy, *d_az, *d_bz, *d_cz; printf("\nGrid dimensions: %d x %d x %d\n", NX, NY, NZ); if( NX>256 || NY>256 || NZ>256 ) { printf("No dimension can exceed 256 due to hard-coded array sizes\n"); return -1; } // initialise card cutilDeviceInit(argc, argv); // allocate memory for arrays h_u1 = (float *)malloc(sizeof(float)*NX*NY*NZ); h_u2 = (float *)malloc(sizeof(float)*NX*NY*NZ); h_du = (float *)malloc(sizeof(float)*NX*NY*NZ); h_ax = (float *)malloc(sizeof(float)*NX*NY*NZ); h_bx = (float *)malloc(sizeof(float)*NX*NY*NZ); h_cx = (float *)malloc(sizeof(float)*NX*NY*NZ); h_ay = (float *)malloc(sizeof(float)*NX*NY*NZ); h_by = (float *)malloc(sizeof(float)*NX*NY*NZ); h_cy = (float *)malloc(sizeof(float)*NX*NY*NZ); h_az = (float *)malloc(sizeof(float)*NX*NY*NZ); h_bz = (float *)malloc(sizeof(float)*NX*NY*NZ); h_cz = (float *)malloc(sizeof(float)*NX*NY*NZ); cudaSafeCall(hipMalloc((void **)&d_u, sizeof(float)*NX*NY*NZ) ); cudaSafeCall(hipMalloc((void **)&d_du, sizeof(float)*NX*NY*NZ) ); cudaSafeCall(hipMalloc((void **)&d_ax, sizeof(float)*NX*NY*NZ) ); cudaSafeCall(hipMalloc((void **)&d_bx, sizeof(float)*NX*NY*NZ) ); cudaSafeCall(hipMalloc((void **)&d_cx, sizeof(float)*NX*NY*NZ) ); cudaSafeCall(hipMalloc((void **)&d_ay, sizeof(float)*NX*NY*NZ) ); cudaSafeCall(hipMalloc((void **)&d_by, sizeof(float)*NX*NY*NZ) ); cudaSafeCall(hipMalloc((void **)&d_cy, sizeof(float)*NX*NY*NZ) ); cudaSafeCall(hipMalloc((void **)&d_az, sizeof(float)*NX*NY*NZ) ); cudaSafeCall(hipMalloc((void **)&d_bz, sizeof(float)*NX*NY*NZ) ); cudaSafeCall(hipMalloc((void **)&d_cz, sizeof(float)*NX*NY*NZ) ); // initialise u1 for (k=0; k<NZ; k++) { for (j=0; j<NY; j++) { for (i=0; i<NX; i++) { ind = i + j*NX + k*NX*NY; if (i==0 || i==NX-1 || j==0 || j==NY-1|| k==0 || k==NZ-1) h_u1[ind] = 1.0f; // Dirichlet b.c.'s else h_u1[ind] = 0.0f; } } } // copy u1 to device elapsed_time(&timer); cudaSafeCall(hipMemcpy(d_u, h_u1, sizeof(float)*NX*NY*NZ, hipMemcpyHostToDevice)); cudaSafeCall(hipDeviceSynchronize()); elapsed = elapsed_time(&timer); printf("\nCopy u1 to device: %f (s) \n", elapsed); // Set up the execution configuration dim3 dimGrid1(1+(NX-1)/BLOCK_X, 1+(NY-1)/BLOCK_Y); dim3 dimBlock1(BLOCK_X,BLOCK_Y); // dim3 dimGrid2(1+(NX-1)/16, 1+(NY-1)/4); // dim3 dimBlock2(16,4); dim3 dimGrid2(1+(NX-1)/32, 1+(NY-1)/4); dim3 dimBlock2(32,4); dim3 dimGrid3(NY,NZ); dim3 dimBlock3(256); // Execute GPU kernel double time1=0, time2=0, time3=0, time4=0; for (i = 1; i <= REPEAT; ++i) { hipLaunchKernelGGL(( GPU_adi_rhs), dim3(dimGrid1), dim3(dimBlock1), 0, 0, NX, NY, NZ, lam, d_u, d_du, d_ax, d_bx, d_cx, d_ay, d_by, d_cy, d_az, d_bz, d_cz); cudaCheckMsg("GPU_adi_rhs execution failed\n"); cudaSafeCall(hipDeviceSynchronize()); time1 += elapsed_time(&timer); hipLaunchKernelGGL(( GPU_adi_x_float4_2), dim3(dimGrid1), dim3(dimBlock1), 0, 0, NX, NY, NZ, (float4*)d_ax, (float4*)d_bx, (float4*)d_cx, (float4*)d_du); // GPU_adi_x<<<dimGrid2, dimBlock2>>>(NX, NY, NZ, // d_ax, d_bx, d_cx, d_du); // GPU_adi_x_new<<<dimGrid3, dimBlock3>>>(NX, NY, NZ, // d_ax, d_bx, d_cx, d_du); cudaCheckMsg("GPU_adi_x execution failed\n"); cudaSafeCall(hipDeviceSynchronize()); time2 += elapsed_time(&timer); hipLaunchKernelGGL(( GPU_adi_y), dim3(dimGrid2), dim3(dimBlock2), 0, 0, NX, NY, NZ, d_ay, d_by, d_cy, d_du); cudaCheckMsg("GPU_adi_y execution failed\n"); cudaSafeCall(hipDeviceSynchronize()); time3 += elapsed_time(&timer); hipLaunchKernelGGL(( GPU_adi_z), dim3(dimGrid2), dim3(dimBlock2), 0, 0, NX, NY, NZ, d_u, d_az, d_bz, d_cz, d_du); cudaCheckMsg("GPUadi_z execution failed\n"); cudaSafeCall(hipDeviceSynchronize()); time4 += elapsed_time(&timer); } printf("\n%dx GPU_adi: %f (s) %f (s) %f (s) %f (s) \n", REPEAT, time1, time2, time3, time4); // Read back GPU results cudaSafeCall(hipMemcpy(h_u2, d_u, sizeof(float)*NX*NY*NZ, hipMemcpyDeviceToHost) ); elapsed = elapsed_time(&timer); printf("\nCopy u2 to host: %f (s) \n", elapsed); // print out corner of array if (printout) { for (k=0; k<3; k++) { for (j=0; j<8; j++) { for (i=0; i<8; i++) { ind = i + j*NX + k*NX*NY; printf(" %5.2f ", h_u2[ind]); } printf("\n"); } printf("\n"); } } // Gold treatment for (int i = 1; i <= REPEAT; ++i) { Gold_adi(NX, NY, NZ, lam, h_u1, h_du, h_ax, h_bx, h_cx, h_ay, h_by, h_cy, h_az, h_bz, h_cz); } elapsed = elapsed_time(&timer); printf("\n%dx Gold_adi: %f (s) \n \n", REPEAT, elapsed); // print out corner of array if (printout) { for (k=0; k<3; k++) { for (j=0; j<8; j++) { for (i=0; i<8; i++) { ind = i + j*NX + k*NX*NY; printf(" %5.2f ", h_u1[ind]); } printf("\n"); } printf("\n"); } } // error check err = 0.0; for (k=0; k<NZ; k++) { for (j=0; j<NY; j++) { for (i=0; i<NX; i++) { ind = i + j*NX + k*NX*NY; err += (h_u1[ind]-h_u2[ind])*(h_u1[ind]-h_u2[ind]); } } } printf("\n rms error = %f \n",sqrt(err/ (float)(NX*NY*NZ))); // Release GPU and CPU memory cudaSafeCall(hipFree(d_u) ); cudaSafeCall(hipFree(d_du)); cudaSafeCall(hipFree(d_ax)); cudaSafeCall(hipFree(d_bx)); cudaSafeCall(hipFree(d_cx)); cudaSafeCall(hipFree(d_ay)); cudaSafeCall(hipFree(d_by)); cudaSafeCall(hipFree(d_cy)); cudaSafeCall(hipFree(d_az)); cudaSafeCall(hipFree(d_bz)); cudaSafeCall(hipFree(d_cz)); free(h_u1); free(h_u2); free(h_du); free(h_ax); free(h_bx); free(h_cx); free(h_ay); free(h_by); free(h_cy); free(h_az); free(h_bz); free(h_cz); hipDeviceReset(); }
ea6541d5933c124a086bc34592e549f4a9cd5923.cu
// // Program to perform ADI time-marching on a regular 3D grid // #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cutil_inline.h> //////////////////////////////////////////////////////////////////////// // define kernel block size for //////////////////////////////////////////////////////////////////////// //#define BLOCK_X 32 //#define BLOCK_Y 4 #define BLOCK_X 32 #define BLOCK_Y 8 //////////////////////////////////////////////////////////////////////// // include kernel function //////////////////////////////////////////////////////////////////////// #include <adi3d_kernel.cu> //////////////////////////////////////////////////////////////////////// // declare Gold routine //////////////////////////////////////////////////////////////////////// void Gold_adi(int, int, int, float, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*); //////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////// int main(int argc, char **argv){ // 'h_' prefix - CPU (host) memory space int NX=256, NY=256, NZ=256, REPEAT=10, i, j, k, ind, printout=0; float *h_u1, *h_u2, *h_du, *h_ax, *h_bx, *h_cx, *h_ay, *h_by, *h_cy, *h_az, *h_bz, *h_cz, err, lam=1.0f; double timer, elapsed; // 'd_' prefix - GPU (device) memory space float *d_u, *d_du, *d_ax, *d_bx, *d_cx, *d_ay, *d_by, *d_cy, *d_az, *d_bz, *d_cz; printf("\nGrid dimensions: %d x %d x %d\n", NX, NY, NZ); if( NX>256 || NY>256 || NZ>256 ) { printf("No dimension can exceed 256 due to hard-coded array sizes\n"); return -1; } // initialise card cutilDeviceInit(argc, argv); // allocate memory for arrays h_u1 = (float *)malloc(sizeof(float)*NX*NY*NZ); h_u2 = (float *)malloc(sizeof(float)*NX*NY*NZ); h_du = (float *)malloc(sizeof(float)*NX*NY*NZ); h_ax = (float *)malloc(sizeof(float)*NX*NY*NZ); h_bx = (float *)malloc(sizeof(float)*NX*NY*NZ); h_cx = (float *)malloc(sizeof(float)*NX*NY*NZ); h_ay = (float *)malloc(sizeof(float)*NX*NY*NZ); h_by = (float *)malloc(sizeof(float)*NX*NY*NZ); h_cy = (float *)malloc(sizeof(float)*NX*NY*NZ); h_az = (float *)malloc(sizeof(float)*NX*NY*NZ); h_bz = (float *)malloc(sizeof(float)*NX*NY*NZ); h_cz = (float *)malloc(sizeof(float)*NX*NY*NZ); cudaSafeCall(cudaMalloc((void **)&d_u, sizeof(float)*NX*NY*NZ) ); cudaSafeCall(cudaMalloc((void **)&d_du, sizeof(float)*NX*NY*NZ) ); cudaSafeCall(cudaMalloc((void **)&d_ax, sizeof(float)*NX*NY*NZ) ); cudaSafeCall(cudaMalloc((void **)&d_bx, sizeof(float)*NX*NY*NZ) ); cudaSafeCall(cudaMalloc((void **)&d_cx, sizeof(float)*NX*NY*NZ) ); cudaSafeCall(cudaMalloc((void **)&d_ay, sizeof(float)*NX*NY*NZ) ); cudaSafeCall(cudaMalloc((void **)&d_by, sizeof(float)*NX*NY*NZ) ); cudaSafeCall(cudaMalloc((void **)&d_cy, sizeof(float)*NX*NY*NZ) ); cudaSafeCall(cudaMalloc((void **)&d_az, sizeof(float)*NX*NY*NZ) ); cudaSafeCall(cudaMalloc((void **)&d_bz, sizeof(float)*NX*NY*NZ) ); cudaSafeCall(cudaMalloc((void **)&d_cz, sizeof(float)*NX*NY*NZ) ); // initialise u1 for (k=0; k<NZ; k++) { for (j=0; j<NY; j++) { for (i=0; i<NX; i++) { ind = i + j*NX + k*NX*NY; if (i==0 || i==NX-1 || j==0 || j==NY-1|| k==0 || k==NZ-1) h_u1[ind] = 1.0f; // Dirichlet b.c.'s else h_u1[ind] = 0.0f; } } } // copy u1 to device elapsed_time(&timer); cudaSafeCall(cudaMemcpy(d_u, h_u1, sizeof(float)*NX*NY*NZ, cudaMemcpyHostToDevice)); cudaSafeCall(cudaDeviceSynchronize()); elapsed = elapsed_time(&timer); printf("\nCopy u1 to device: %f (s) \n", elapsed); // Set up the execution configuration dim3 dimGrid1(1+(NX-1)/BLOCK_X, 1+(NY-1)/BLOCK_Y); dim3 dimBlock1(BLOCK_X,BLOCK_Y); // dim3 dimGrid2(1+(NX-1)/16, 1+(NY-1)/4); // dim3 dimBlock2(16,4); dim3 dimGrid2(1+(NX-1)/32, 1+(NY-1)/4); dim3 dimBlock2(32,4); dim3 dimGrid3(NY,NZ); dim3 dimBlock3(256); // Execute GPU kernel double time1=0, time2=0, time3=0, time4=0; for (i = 1; i <= REPEAT; ++i) { GPU_adi_rhs<<<dimGrid1, dimBlock1>>>(NX, NY, NZ, lam, d_u, d_du, d_ax, d_bx, d_cx, d_ay, d_by, d_cy, d_az, d_bz, d_cz); cudaCheckMsg("GPU_adi_rhs execution failed\n"); cudaSafeCall(cudaDeviceSynchronize()); time1 += elapsed_time(&timer); GPU_adi_x_float4_2<<<dimGrid1, dimBlock1>>>(NX, NY, NZ, (float4*)d_ax, (float4*)d_bx, (float4*)d_cx, (float4*)d_du); // GPU_adi_x<<<dimGrid2, dimBlock2>>>(NX, NY, NZ, // d_ax, d_bx, d_cx, d_du); // GPU_adi_x_new<<<dimGrid3, dimBlock3>>>(NX, NY, NZ, // d_ax, d_bx, d_cx, d_du); cudaCheckMsg("GPU_adi_x execution failed\n"); cudaSafeCall(cudaDeviceSynchronize()); time2 += elapsed_time(&timer); GPU_adi_y<<<dimGrid2, dimBlock2>>>(NX, NY, NZ, d_ay, d_by, d_cy, d_du); cudaCheckMsg("GPU_adi_y execution failed\n"); cudaSafeCall(cudaDeviceSynchronize()); time3 += elapsed_time(&timer); GPU_adi_z<<<dimGrid2, dimBlock2>>>(NX, NY, NZ, d_u, d_az, d_bz, d_cz, d_du); cudaCheckMsg("GPUadi_z execution failed\n"); cudaSafeCall(cudaDeviceSynchronize()); time4 += elapsed_time(&timer); } printf("\n%dx GPU_adi: %f (s) %f (s) %f (s) %f (s) \n", REPEAT, time1, time2, time3, time4); // Read back GPU results cudaSafeCall(cudaMemcpy(h_u2, d_u, sizeof(float)*NX*NY*NZ, cudaMemcpyDeviceToHost) ); elapsed = elapsed_time(&timer); printf("\nCopy u2 to host: %f (s) \n", elapsed); // print out corner of array if (printout) { for (k=0; k<3; k++) { for (j=0; j<8; j++) { for (i=0; i<8; i++) { ind = i + j*NX + k*NX*NY; printf(" %5.2f ", h_u2[ind]); } printf("\n"); } printf("\n"); } } // Gold treatment for (int i = 1; i <= REPEAT; ++i) { Gold_adi(NX, NY, NZ, lam, h_u1, h_du, h_ax, h_bx, h_cx, h_ay, h_by, h_cy, h_az, h_bz, h_cz); } elapsed = elapsed_time(&timer); printf("\n%dx Gold_adi: %f (s) \n \n", REPEAT, elapsed); // print out corner of array if (printout) { for (k=0; k<3; k++) { for (j=0; j<8; j++) { for (i=0; i<8; i++) { ind = i + j*NX + k*NX*NY; printf(" %5.2f ", h_u1[ind]); } printf("\n"); } printf("\n"); } } // error check err = 0.0; for (k=0; k<NZ; k++) { for (j=0; j<NY; j++) { for (i=0; i<NX; i++) { ind = i + j*NX + k*NX*NY; err += (h_u1[ind]-h_u2[ind])*(h_u1[ind]-h_u2[ind]); } } } printf("\n rms error = %f \n",sqrt(err/ (float)(NX*NY*NZ))); // Release GPU and CPU memory cudaSafeCall(cudaFree(d_u) ); cudaSafeCall(cudaFree(d_du)); cudaSafeCall(cudaFree(d_ax)); cudaSafeCall(cudaFree(d_bx)); cudaSafeCall(cudaFree(d_cx)); cudaSafeCall(cudaFree(d_ay)); cudaSafeCall(cudaFree(d_by)); cudaSafeCall(cudaFree(d_cy)); cudaSafeCall(cudaFree(d_az)); cudaSafeCall(cudaFree(d_bz)); cudaSafeCall(cudaFree(d_cz)); free(h_u1); free(h_u2); free(h_du); free(h_ax); free(h_bx); free(h_cx); free(h_ay); free(h_by); free(h_cy); free(h_az); free(h_bz); free(h_cz); cudaDeviceReset(); }
986b4152c7b636da46bd93bbee5de26c76999d6d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Author: Milad Rakhsha // ============================================================================= #include <thrust/extrema.h> #include <thrust/sort.h> #include "chrono_fsi/physics/ChFsiForceIISPH.cuh" #include "chrono_fsi/physics/ChSphGeneral.cuh" #define RESOLUTION_LENGTH_MULT_IISPH 2.0 //========================================================================================================================================== namespace chrono { namespace fsi { // double precision atomic add function __device__ inline double datomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } ChFsiForceIISPH::ChFsiForceIISPH(std::shared_ptr<ChBce> otherBceWorker, std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD, std::shared_ptr<ProximityDataD> otherMarkersProximityD, std::shared_ptr<FsiGeneralData> otherFsiGeneralData, std::shared_ptr<SimParams> otherParamsH, std::shared_ptr<ChCounters> otherNumObjects, bool verb) : ChFsiForce(otherBceWorker, otherSortedSphMarkersD, otherMarkersProximityD, otherFsiGeneralData, otherParamsH, otherNumObjects, verb) {} ChFsiForceIISPH::~ChFsiForceIISPH() {} //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForceIISPH::Initialize() { ChFsiForce::Initialize(); hipMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams)); hipMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters)); hipMemcpyFromSymbol(paramsH.get(), paramsD, sizeof(SimParams)); hipDeviceSynchronize(); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void V_i_np__AND__d_ii_kernel(Real4* sortedPosRad, // input: sorted positions Real3* sortedVelMas, Real4* sortedRhoPreMu, Real3* d_ii, Real3* V_i_np, Real* sumWij_inv, Real* G_tensor, uint* cellStart, uint* cellEnd, Real delta_t, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers || sortedRhoPreMu[i_idx].w <= -2) { return; } // sortedRhoPreMu[i_idx].x = sortedRhoPreMu[i_idx].x / sumWij_inv[i_idx]; Real h_i = sortedPosRad[i_idx].w; Real m_i = h_i * h_i * h_i * paramsD.rho0; Real mu_0 = paramsD.mu0; Real epsilon = paramsD.epsMinMarkersDis; Real dT = delta_t; Real3 source_term = paramsD.gravity + paramsD.bodyForce3; Real RHO_0 = paramsD.rho0; if (sortedRhoPreMu[i_idx].x < EPSILON) { printf("density is %f,ref density= %f\n", sortedRhoPreMu[i_idx].x, RHO_0); } Real3 posi = mR3(sortedPosRad[i_idx]); Real3 Veli = sortedVelMas[i_idx]; Real Rhoi = sortedRhoPreMu[i_idx].x; Real3 My_d_ii = mR3(0); Real3 My_F_i_np = mR3(0); // get address in grid int3 gridPos = calcGridPos(posi); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posj = mR3(sortedPosRad[j]); Real3 rij = Distance(posi, posj); Real d = length(rij); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j) continue; Real3 eij = rij / d; Real3 Velj = sortedVelMas[j]; Real Rhoj = sortedRhoPreMu[j].x; Real h_j = sortedPosRad[j].w; if (Rhoj == 0) { printf("Bug F_i_np__AND__d_ii_kernel i=%d j=%d, hi=%f, hj=%f\n", i_idx, j, h_i, h_j); } Real m_j = h_j * h_j * h_j * paramsD.rho0; Real h_ij = 0.5 * (h_j + h_i); Real3 grad_ij = GradWh(rij, h_ij); My_d_ii += m_j * (-(dT * dT) / (Rhoi * Rhoi)) * grad_ij; Real Rho_bar = (Rhoj + Rhoi) * 0.5; Real3 V_ij = (Veli - Velj); // Real nu = mu_0 * paramsD.HSML * 320 / Rho_bar; // Real3 muNumerator = nu * fmin(0.0, dot(rij, V_ij)) * grad_ij; Real3 muNumerator = 2 * mu_0 * dot(rij, grad_ij) * V_ij; Real muDenominator = (Rho_bar * Rho_bar) * (d * d + h_ij * h_ij * epsilon); // if ((sortedRhoPreMu[i_idx].w < 0 && sortedRhoPreMu[j].w < 0)) // if (sortedRhoPreMu[i_idx].w < 0 || (sortedRhoPreMu[i_idx].w >= 0 && // sortedRhoPreMu[j].w < 0)) My_F_i_np += m_j * muNumerator / muDenominator; Real Wd = W3h(d, h_ij); My_F_i_np -= paramsD.kappa / m_i * m_j * Wd * rij; } } } } } // if (!paramsD.Conservative_Form) // My_F_i_np = mu_0 * LaplacainVi; My_F_i_np *= m_i; My_F_i_np += m_i * source_term; d_ii[i_idx] = My_d_ii; V_i_np[i_idx] = (My_F_i_np * dT + Veli); // This does not contain m_0? } //-------------------------------------------------------------------------------------------------------------------------------- //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Rho_np_AND_a_ii_AND_sum_m_GradW(Real4* sortedPosRad, Real4* sortedRhoPreMu, Real* rho_np, // Write Real* a_ii, // Write Real* p_old, // Write Real3* V_np, // Read Real3* d_ii, // Read Real3* sum_m_GradW, uint* cellStart, uint* cellEnd, Real delta_t, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers || sortedRhoPreMu[i_idx].w <= -2) { return; } Real h_i = sortedPosRad[i_idx].w; Real m_i = h_i * h_i * h_i * paramsD.rho0; Real3 posi = mR3(sortedPosRad[i_idx]); Real3 Veli_np = V_np[i_idx]; Real Rho_i = sortedRhoPreMu[i_idx].x; Real3 my_d_ii = d_ii[i_idx]; Real rho_temp = 0; Real my_a_ii = 0; Real3 My_sum_m_gradW = mR3(0); Real dT = delta_t; // get address in gridj int3 gridPos = calcGridPos(posi); // // examine neighbouring cells for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posj = mR3(sortedPosRad[j]); Real3 dist3 = Distance(posi, posj); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j) continue; Real h_j = sortedPosRad[j].w; Real m_j = h_j * h_j * h_j * paramsD.rho0; Real h_ij = 0.5 * (h_j + h_i); Real3 Velj_np = V_np[j]; Real3 grad_i_wij = GradWh(dist3, h_ij); rho_temp += m_j * dot((Veli_np - Velj_np), grad_i_wij); Real3 d_ji = m_i * (-(dT * dT) / (Rho_i * Rho_i)) * (-grad_i_wij); my_a_ii += m_j * dot((my_d_ii - d_ji), grad_i_wij); My_sum_m_gradW += m_j * grad_i_wij; } } } } } rho_np[i_idx] = dT * rho_temp + sortedRhoPreMu[i_idx].x; // Note: a_ii can become zero and when this can cause divide by 0 issues for free particles a_ii[i_idx] = abs(my_a_ii) > EPSILON ? my_a_ii : 1.0; sum_m_GradW[i_idx] = My_sum_m_gradW; p_old[i_idx] = sortedRhoPreMu[i_idx].y; // = 1000; // Note that this is outside of the for loop } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Calc_dij_pj(Real3* dij_pj, // write Real3* F_p, // Write Real3* d_ii, // Read Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, Real* p_old, uint* cellStart, uint* cellEnd, Real delta_t, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers || sortedRhoPreMu[i_idx].w <= -2) { return; } Real h_i = sortedPosRad[i_idx].w; Real m_i = h_i * h_i * h_i * paramsD.rho0; Real3 my_F_p = mR3(0); Real p_i_old = p_old[i_idx]; Real3 pos_i = mR3(sortedPosRad[i_idx]); Real Rho_i = sortedRhoPreMu[i_idx].x; if (sortedRhoPreMu[i_idx].x < EPSILON) { printf("(Calc_dij_pj) My density is %f in Calc_dij_pj\n", sortedRhoPreMu[i_idx].x); } Real dT = delta_t; Real3 My_dij_pj = mR3(0); int3 gridPos = calcGridPos(pos_i); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 pos_j = mR3(sortedPosRad[j]); Real3 dist3 = Distance(pos_i, pos_j); Real d = length(dist3); ////CHECK THIS CONDITION!!! if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j) continue; Real h_j = sortedPosRad[j].w; Real m_j = h_j * h_j * h_j * paramsD.rho0; Real h_ij = 0.5 * (h_j + h_i); Real3 grad_i_wij = GradWh(dist3, h_ij); Real Rho_j = sortedRhoPreMu[j].x; Real p_j_old = p_old[j]; My_dij_pj += m_j * (-(dT * dT) / (Rho_j * Rho_j)) * grad_i_wij * p_j_old; my_F_p += m_j * ((p_i_old / (Rho_i * Rho_i)) + (p_j_old / (Rho_j * Rho_j))) * grad_i_wij; } } } } } dij_pj[i_idx] = My_dij_pj; F_p[i_idx] = -m_i * my_F_p; } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void CalcNumber_Contacts(uint* numContacts, Real4* sortedPosRad, Real4* sortedRhoPreMu, uint* cellStart, uint* cellEnd, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w <= -2) { numContacts[i_idx] = 1; return; } Real h_i = sortedPosRad[i_idx].w; // Real m_i = h_i * h_i * h_i * paramsD.rho0; int myType = sortedRhoPreMu[i_idx].w; Real3 pos_i = mR3(sortedPosRad[i_idx]); uint numCol[400]; int counter = 1; numCol[0] = i_idx; // The first one is always the idx of the marker itself int3 gridPos = calcGridPos(pos_i); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 pos_j = mR3(sortedPosRad[j]); Real3 dist3 = Distance(pos_i, pos_j); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j) continue; bool AlreadyHave = false; for (uint findCol = 1; findCol <= counter; findCol++) { if (numCol[findCol] == j) { AlreadyHave = true; continue; } } // Room for improvment ... if (!AlreadyHave) { numCol[counter] = j; counter++; // Do not count BCE-BCE interactions... if (myType >= 0 && sortedRhoPreMu[j].w >= 0 && paramsD.bceType == BceVersion::ADAMI) counter--; } if (myType != -1) // For BCE no need to go deeper than this... continue; Real h_j = sortedPosRad[j].w; int3 gridPosJ = calcGridPos(pos_j); for (int zz = -1; zz <= 1; zz++) { for (int yy = -1; yy <= 1; yy++) { for (int xx = -1; xx <= 1; xx++) { int3 neighbourPosJ = gridPosJ + mI3(xx, yy, zz); uint gridHashJ = calcGridHash(neighbourPosJ); uint startIndexJ = cellStart[gridHashJ]; if (startIndexJ != 0xffffffff) { // cell is not empty uint endIndexJ = cellEnd[gridHashJ]; for (uint k = startIndexJ; k < endIndexJ; k++) { Real3 pos_k = mR3(sortedPosRad[k]); Real3 dist3jk = Distance(pos_j, pos_k); Real djk = length(dist3jk); if (djk > RESOLUTION_LENGTH_MULT * h_j || k == j || k == i_idx || sortedRhoPreMu[k].w <= -2) continue; bool AlreadyHave2 = false; for (uint findCol = 1; findCol <= counter; findCol++) { if (numCol[findCol] == k) { AlreadyHave2 = true; continue; } } if (!AlreadyHave2) { numCol[counter] = k; counter++; } } } } } } } } } } } numContacts[i_idx] = counter + 10; } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Calc_summGradW(Real3* summGradW, // write Real4* sortedPosRad, Real4* sortedRhoPreMu, uint* cellStart, uint* cellEnd, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w <= -2) { return; } Real h_i = sortedPosRad[i_idx].w; // Real m_i = h_i * h_i * h_i * paramsD.rho0; Real3 pos_i = mR3(sortedPosRad[i_idx]); Real3 My_summgradW = mR3(0); // Real dT = paramsD.dT; int3 gridPos = calcGridPos(pos_i); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 pos_j = mR3(sortedPosRad[j]); Real3 dist3 = Distance(pos_i, pos_j); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j) continue; Real h_j = sortedPosRad[j].w; Real m_j = h_j * h_j * h_j * paramsD.rho0; Real h_ij = 0.5 * (h_j + h_i); Real3 grad_i_wij = GradWh(dist3, h_ij); My_summgradW += m_j * grad_i_wij; } } } } } summGradW[i_idx] = My_summgradW; } //-------------------------------------------------------------------------------------------------------------------------------- __device__ void Calc_BC_aij_Bi(const uint i_idx, Real* csrValA, uint* csrColIndA, unsigned long int* GlobalcsrColIndA, uint* numContacts, // The above 4 vectors are used for CSR form. Real* a_ii, // write Real* B_i, Real4* sortedPosRad, Real3* sortedVelMas, const Real4* sortedRhoPreMu, Real3* V_new, Real* p_old, Real3* Normals, Real* G_i, Real* sumWij_inv, Real4* qD, Real3* rigidSPH_MeshPos_LRF_D, Real3* posRigid_fsiBodies_D, Real4* velMassRigid_fsiBodies_D, Real3* omegaVelLRF_fsiBodies_D, Real3* accRigid_fsiBodies_D, Real3* omegaAccLRF_fsiBodies_D, uint* rigidIdentifierD, Real3* pos_fsi_fea_D, Real3* vel_fsi_fea_D, Real3* acc_fsi_fea_D, uint* FlexIdentifierD, const int numFlex1D, uint2* CableElementsNodesD, uint4* ShellElementsNodesD, int4 updatePortion, uint* gridMarkerIndexD, const uint* cellStart, const uint* cellEnd, const size_t numAllMarkers, bool IsSPARSE) { uint csrStartIdx = numContacts[i_idx] + 1; uint csrEndIdx = numContacts[i_idx + 1]; Real h_i = sortedPosRad[i_idx].w; // Real m_i = h_i * h_i * h_i * paramsD.rho0; Real3 my_normal = Normals[i_idx]; Real3 source_term = paramsD.gravity + paramsD.bodyForce3; // if (bceIndex >= numObjectsD.numRigidMarkers) { // return; // } // int Original_idx = gridMarkerIndexD[i_idx]; Real3 myAcc = mR3(0.0); Real3 V_prescribed = mR3(0.0); // if (!(sortedRhoPreMu[i_idx].w >= 0 && sortedRhoPreMu[i_idx].w <= 3)) // printf("type of marker is %f\n", sortedRhoPreMu[i_idx].w); BCE_Vel_Acc(i_idx, myAcc, V_prescribed, sortedPosRad, updatePortion, gridMarkerIndexD, qD, rigidSPH_MeshPos_LRF_D, posRigid_fsiBodies_D, velMassRigid_fsiBodies_D, omegaVelLRF_fsiBodies_D, accRigid_fsiBodies_D, omegaAccLRF_fsiBodies_D, rigidIdentifierD, pos_fsi_fea_D, vel_fsi_fea_D, acc_fsi_fea_D, FlexIdentifierD, numFlex1D, CableElementsNodesD, ShellElementsNodesD); for (int c = csrStartIdx; c < csrEndIdx; c++) { csrValA[c] = 0; csrColIndA[c] = i_idx; GlobalcsrColIndA[c] = i_idx + numAllMarkers * i_idx; } // if ((csrEndIdx - csrStartIdx) != uint(0)) { Real3 numeratorv = mR3(0); Real denumenator = 0; Real pRHS = 0; // Real Rho_i = sortedRhoPreMu[i_idx].x; Real3 pos_i = mR3(sortedPosRad[i_idx]); // get address in grid int3 gridPos = calcGridPos(pos_i); uint counter = 0; for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 pos_j = mR3(sortedPosRad[j]); Real3 dist3 = Distance(pos_i, pos_j); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * h_i || j == i_idx) continue; Real h_j = sortedPosRad[j].w; // Real m_j = h_j * h_j * h_j * paramsD.rho0; // Real rhoj = sortedRhoPreMu[j].x; Real h_ij = 0.5 * (h_j + h_i); Real Wd = W3h(d, h_ij); Real3 Vel_j = sortedVelMas[j]; if (paramsD.bceType != BceVersion::ADAMI) { if (sortedRhoPreMu[j].w == -1.0 || dot(my_normal, mR3(pos_i - pos_j)) > 0) { Real3 grad_i_wij = GradWh(dist3, h_ij); csrValA[csrStartIdx - 1] += dot(grad_i_wij, my_normal); csrValA[counter + csrStartIdx] = -dot(grad_i_wij, my_normal); csrColIndA[counter + csrStartIdx] = j; GlobalcsrColIndA[counter + csrStartIdx] = j + numAllMarkers * i_idx; counter++; if (sortedRhoPreMu[j].w != -1) continue; numeratorv += Vel_j * Wd; denumenator += Wd; } } else { if (sortedRhoPreMu[j].w != -1 || sortedRhoPreMu[j].w <= -2) continue; numeratorv += Vel_j * Wd; denumenator += Wd; pRHS += dot(source_term - myAcc, dist3) * sortedRhoPreMu[j].x * Wd; csrValA[counter + csrStartIdx] = -Wd; csrColIndA[counter + csrStartIdx] = j; GlobalcsrColIndA[counter + csrStartIdx] = j + numAllMarkers * i_idx; counter++; } } } } } } if (abs(denumenator) < EPSILON) { V_new[i_idx] = 2 * V_prescribed; B_i[i_idx] = 0; if (paramsD.bceType == BceVersion::ADAMI) { csrValA[csrStartIdx - 1] = a_ii[i_idx]; csrColIndA[csrStartIdx - 1] = i_idx; GlobalcsrColIndA[csrStartIdx - 1] = i_idx + numAllMarkers * i_idx; } } else { Real Scaling = a_ii[i_idx] / denumenator; V_new[i_idx] = 2 * V_prescribed - numeratorv / denumenator; if (paramsD.bceType == BceVersion::ADAMI) { B_i[i_idx] = pRHS; csrValA[csrStartIdx - 1] = denumenator; csrColIndA[csrStartIdx - 1] = i_idx; GlobalcsrColIndA[csrStartIdx - 1] = i_idx + numAllMarkers * i_idx; for (int i = csrStartIdx - 1; i < csrEndIdx; i++) csrValA[i] *= Scaling; B_i[i_idx] *= Scaling; } } if (paramsD.bceType != BceVersion::ADAMI) { Real Scaling = a_ii[i_idx]; if (abs(csrValA[csrStartIdx - 1]) > EPSILON) { Scaling = a_ii[i_idx]; // csrValA[csrStartIdx - 1]; for (int count = csrStartIdx - 1; count < csrEndIdx; count++) csrValA[count] *= Scaling; } else { clearRow(i_idx, csrStartIdx - 1, csrEndIdx, csrValA, B_i); for (int count = csrStartIdx - 1; count < csrEndIdx; count++) { int j = csrColIndA[counter]; Real3 pos_j = mR3(sortedPosRad[j]); Real3 dist3 = Distance(pos_i, pos_j); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * h_i || j == i_idx) { csrValA[count] = 0.0; continue; } Real h_j = sortedPosRad[j].w; Real h_ij = 0.5 * (h_j + h_i); Real Wd = W3h(d, h_ij); csrValA[count] = sumWij_inv[j] * Wd * Scaling; } csrValA[csrStartIdx - 1] -= 1.0 * Scaling; } B_i[i_idx] = 0.0 * Scaling; } sortedVelMas[i_idx] = V_new[i_idx]; } // namespace fsi //-------------------------------------------------------------------------------------------------------------------------------- //-------------------------------------------------------------------------------------------------------------------------------- __device__ void Calc_fluid_aij_Bi(const uint i_idx, Real* csrValA, uint* csrColIndA, unsigned long int* GlobalcsrColIndA, uint* numContacts, // The above 4 vectors are used for CSR form. Real* B_i, Real3* d_ii, // Read Real* a_ii, // Read Real* rho_np, // Read Real3* summGradW, Real4* sortedPosRad, Real4* sortedRhoPreMu, uint* cellStart, uint* cellEnd, Real delta_t, const int numAllMarkers, bool IsSPARSE) { Real3 pos_i = mR3(sortedPosRad[i_idx]); Real dT = delta_t; int counter = 0; // There is always one non-zero at each row- The marker itself B_i[i_idx] = paramsD.rho0 - rho_np[i_idx]; uint csrStartIdx = numContacts[i_idx] + 1; // Reserve the starting index for the A_ii uint csrEndIdx = numContacts[i_idx + 1]; Real h_i = sortedPosRad[i_idx].w; // Real m_i = h_i * h_i * h_i * paramsD.rho0; // for (int c = csrStartIdx; c < csrEndIdx; c++) { // csrValA[c] = a_ii[i_idx]; // csrColIndA[c] = i_idx; // GlobalcsrColIndA[c] = i_idx + numAllMarkers * i_idx; // } int3 gridPos = calcGridPos(pos_i); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; // Real Rho_i = sortedRhoPreMu[i_idx].x; for (uint j = startIndex; j < endIndex; j++) { Real3 pos_j = mR3(sortedPosRad[j]); Real3 dist3 = Distance(pos_i, pos_j); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j) continue; Real h_j = sortedPosRad[j].w; Real h_ij = 0.5 * (h_j + h_i); Real3 grad_i_wij = GradWh(dist3, h_ij); Real Rho_j = sortedRhoPreMu[j].x; Real m_j = h_j * h_j * h_j * paramsD.rho0; Real3 d_it = m_j * (-(dT * dT) / (Rho_j * Rho_j)) * grad_i_wij; Real My_a_ij_1 = m_j * dot(d_it, summGradW[i_idx]); Real My_a_ij_2 = m_j * dot(d_ii[j], grad_i_wij); Real My_a_ij_12 = My_a_ij_1 - My_a_ij_2; bool DONE1 = false; for (uint findCol = csrStartIdx; findCol < csrEndIdx; findCol++) { if (csrColIndA[findCol] == j) { csrValA[findCol] += My_a_ij_12; csrColIndA[findCol] = j; GlobalcsrColIndA[findCol] = j + numAllMarkers * i_idx; DONE1 = true; continue; } } if (!DONE1) { csrValA[counter + csrStartIdx] += My_a_ij_12; csrColIndA[counter + csrStartIdx] = j; GlobalcsrColIndA[counter + csrStartIdx] = j + numAllMarkers * i_idx; counter++; } int3 gridPosJ = calcGridPos(pos_j); for (int zz = -1; zz <= 1; zz++) { for (int yy = -1; yy <= 1; yy++) { for (int xx = -1; xx <= 1; xx++) { int3 neighbourPosJ = gridPosJ + mI3(xx, yy, zz); uint gridHashJ = calcGridHash(neighbourPosJ); uint startIndexJ = cellStart[gridHashJ]; if (startIndexJ != 0xffffffff) { // cell is not empty uint endIndexJ = cellEnd[gridHashJ]; for (uint k = startIndexJ; k < endIndexJ; k++) { Real3 pos_k = mR3(sortedPosRad[k]); Real3 dist3jk = Distance(pos_j, pos_k); Real djk = length(dist3jk); if (djk > RESOLUTION_LENGTH_MULT_IISPH * h_j || k == j || k == i_idx || sortedRhoPreMu[k].w <= -2) continue; Real h_k = sortedPosRad[j].w; Real h_jk = 0.5 * (h_j + h_k); Real3 grad_j_wjk = GradWh(dist3jk, h_jk); Real m_k = cube(sortedPosRad[k].w) * paramsD.rho0; Real Rho_k = sortedRhoPreMu[k].x; Real3 d_jk = m_k * (-(dT * dT) / (Rho_k * Rho_k)) * grad_j_wjk; Real My_a_ij_3 = m_j * dot(d_jk, grad_i_wij); bool DONE2 = false; for (uint findCol = csrStartIdx; findCol < csrEndIdx; findCol++) { if (csrColIndA[findCol] == k) { csrValA[findCol] -= My_a_ij_3; csrColIndA[findCol] = k; GlobalcsrColIndA[findCol] = k + numAllMarkers * i_idx; DONE2 = true; continue; } } if (!DONE2) { csrValA[counter + csrStartIdx] -= My_a_ij_3; csrColIndA[counter + csrStartIdx] = k; GlobalcsrColIndA[counter + csrStartIdx] = k + numAllMarkers * i_idx; counter++; } } } } } } } } } } } for (int myIdx = csrStartIdx; myIdx < csrEndIdx; myIdx++) { if (csrColIndA[myIdx] == i_idx) csrValA[myIdx] = a_ii[i_idx]; } csrValA[csrStartIdx - 1] = a_ii[i_idx]; csrColIndA[csrStartIdx - 1] = i_idx; GlobalcsrColIndA[csrStartIdx - 1] = i_idx + numAllMarkers * i_idx; if (sortedRhoPreMu[i_idx].x < 0.999 * paramsD.rho0) { csrValA[csrStartIdx - 1] = a_ii[i_idx]; for (int myIdx = csrStartIdx; myIdx < csrEndIdx; myIdx++) { csrValA[myIdx] = 0.0; B_i[i_idx] = 0.0; } } Real RHS = B_i[i_idx]; B_i[i_idx] = RHS; // fminf(0.0, RHS); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void FormAXB(Real* csrValA, uint* csrColIndA, unsigned long int* GlobalcsrColIndA, uint* numContacts, // The above 4 vectors are used for CSR form. Real* a_ij, // write Real* B_i, // write Real3* d_ii, // Read Real* a_ii, // Read Real3* summGradW, Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, Real3* V_new, Real* p_old, Real3* Normals, Real* G_i, Real* sumWij_inv, Real* rho_np, Real4* qD, Real3* rigidSPH_MeshPos_LRF_D, Real3* posRigid_fsiBodies_D, Real4* velMassRigid_fsiBodies_D, Real3* omegaVelLRF_fsiBodies_D, Real3* accRigid_fsiBodies_D, Real3* omegaAccLRF_fsiBodies_D, uint* rigidIdentifierD, Real3* pos_fsi_fea_D, Real3* vel_fsi_fea_D, Real3* acc_fsi_fea_D, uint* FlexIdentifierD, const int numFlex1D, uint2* CableElementsNodesD, uint4* ShellElementsNodesD, int4 updatePortion, uint* gridMarkerIndexD, uint* cellStart, uint* cellEnd, Real delta_t, const size_t numAllMarkers, bool IsSPARSE, volatile bool* isError) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } // Real m_0 = paramsD.markerMass; // Real RHO_0 = paramsD.rho0; // Real dT = paramsD.dT; // Real3 gravity = paramsD.gravity; int TYPE_OF_NARKER = sortedRhoPreMu[i_idx].w; if (TYPE_OF_NARKER <= -2) { B_i[i_idx] = 0; uint csrStartIdx = numContacts[i_idx]; // This needs to be check to see if it messes up the condition number of the matrix csrValA[csrStartIdx] = 1.0; csrColIndA[csrStartIdx] = i_idx; GlobalcsrColIndA[csrStartIdx] = i_idx + numAllMarkers * i_idx; } else if (TYPE_OF_NARKER == -1) { Calc_fluid_aij_Bi(i_idx, csrValA, csrColIndA, GlobalcsrColIndA, numContacts, B_i, d_ii, a_ii, rho_np, summGradW, sortedPosRad, sortedRhoPreMu, cellStart, cellEnd, delta_t, numAllMarkers, true); } else if (TYPE_OF_NARKER > -1) Calc_BC_aij_Bi(i_idx, csrValA, csrColIndA, GlobalcsrColIndA, numContacts, a_ii, B_i, sortedPosRad, sortedVelMas, sortedRhoPreMu, V_new, p_old, Normals, G_i, sumWij_inv, qD, rigidSPH_MeshPos_LRF_D, posRigid_fsiBodies_D, velMassRigid_fsiBodies_D, omegaVelLRF_fsiBodies_D, accRigid_fsiBodies_D, omegaAccLRF_fsiBodies_D, rigidIdentifierD, pos_fsi_fea_D, vel_fsi_fea_D, acc_fsi_fea_D, FlexIdentifierD, numFlex1D, CableElementsNodesD, ShellElementsNodesD, updatePortion, gridMarkerIndexD, cellStart, cellEnd, numAllMarkers, true); } //-------------------------------------------------------------------------------------------------------------------------------- //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Calc_Pressure_AXB_USING_CSR(Real* csrValA, Real* a_ii, uint* csrColIndA, uint* numContacts, Real4* sortedRhoPreMu, Real* sumWij_inv, Real3* sortedVelMas, Real3* V_new, Real* p_old, Real* B_i, // Read Real* Residuals, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w <= -2) { return; } // Real RHO_0 = paramsD.rho0; // bool ClampPressure = paramsD.ClampPressure; // Real Max_Pressure = paramsD.Max_Pressure; uint startIdx = numContacts[i_idx] + 1; // numContacts[i_idx] is the diagonal itself uint endIdx = numContacts[i_idx + 1]; Real aij_pj = 0; // Real error = aij_pj + sortedRhoPreMu[i_idx].y * csrValA[startIdx - 1] - B_i[i_idx]; for (int myIdx = startIdx; myIdx < endIdx; myIdx++) { if (csrColIndA[myIdx] != i_idx) aij_pj += csrValA[myIdx] * p_old[csrColIndA[myIdx]]; } Real RHS = B_i[i_idx]; Residuals[i_idx] = abs(RHS - aij_pj - p_old[i_idx] * csrValA[startIdx - 1]); sortedRhoPreMu[i_idx].y = (RHS - aij_pj) / csrValA[startIdx - 1]; // if (paramsD.ClampPressure && sortedRhoPreMu[i_idx].y < 0) // sortedRhoPreMu[i_idx].y = 0; if (!isfinite(aij_pj)) { printf("a_ij *p_j became Nan in Calc_Pressure_AXB_USING_CSR "); } } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Calc_Pressure(Real* a_ii, // Read Real3* d_ii, // Read Real3* dij_pj, // Read Real* rho_np, // Read Real* rho_p, // Write Real* Residuals, Real3* F_p, Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, Real4* qD, Real3* rigidSPH_MeshPos_LRF_D, Real3* posRigid_fsiBodies_D, Real4* velMassRigid_fsiBodies_D, Real3* omegaVelLRF_fsiBodies_D, Real3* accRigid_fsiBodies_D, Real3* omegaAccLRF_fsiBodies_D, uint* rigidIdentifierD, Real3* pos_fsi_fea_D, Real3* vel_fsi_fea_D, Real3* acc_fsi_fea_D, uint* FlexIdentifierD, const int numFlex1D, uint2* CableElementsNodesD, uint4* ShellElementsNodesD, int4 updatePortion, uint* gridMarkerIndexD, Real* p_old, Real3* V_new, uint* cellStart, uint* cellEnd, Real delta_t, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w <= -2) { return; } Real h_i = sortedPosRad[i_idx].w; Real m_i = h_i * h_i * h_i * paramsD.rho0; Real RHO_0 = paramsD.rho0; Real dT = delta_t; Real3 source_term = paramsD.gravity + paramsD.bodyForce3; if (sortedRhoPreMu[i_idx].x < EPSILON) { printf("(Calc_Pressure)My density is %f in Calc_Pressure\n", sortedRhoPreMu[i_idx].x); } int myType = sortedRhoPreMu[i_idx].w; Real Rho_i = sortedRhoPreMu[i_idx].x; Real p_i = p_old[i_idx]; Real3 pos_i = mR3(sortedPosRad[i_idx]); Real p_new = 0; Real my_rho_p = 0; Real3 F_i_p = F_p[i_idx]; if (myType == -1) { if (Rho_i < 0.999 * RHO_0) { p_new = 0; Residuals[i_idx] = 0; } else { Real3 my_dij_pj = dij_pj[i_idx]; Real sum_dij_pj = 0; // This is the first summation term in the expression for the pressure. Real sum_djj_pj = 0; // This is the second summation term in the expression for the pressure. Real sum_djk_pk = 0; // This is the last summation term in the expression for the pressure. int3 gridPosI = calcGridPos(pos_i); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPosI = gridPosI + mI3(x, y, z); uint gridHashI = calcGridHash(neighbourPosI); // get start of bucket for this cell uint startIndexI = cellStart[gridHashI]; if (startIndexI != 0xffffffff) { uint endIndexI = cellEnd[gridHashI]; for (uint j = startIndexI; j < endIndexI; j++) { Real3 pos_j = mR3(sortedPosRad[j]); Real3 dist3ij = Distance(pos_i, pos_j); Real dij = length(dist3ij); if (dij > RESOLUTION_LENGTH_MULT * paramsD.HSML || i_idx == j || sortedRhoPreMu[j].w <= -2) continue; // Real Rho_j = sortedRhoPreMu[j].x; Real p_j_old = p_old[j]; Real h_j = sortedPosRad[j].w; Real m_j = h_j * h_j * h_j * paramsD.rho0; Real3 djj = d_ii[j]; Real3 F_j_p = F_p[j]; Real h_ij = 0.5 * (h_j + h_i); Real3 grad_i_wij = GradWh(dist3ij, h_ij); Real3 d_ji = m_i * (-(dT * dT) / (Rho_i * Rho_i)) * (-grad_i_wij); Real3 djk_pk = dij_pj[j] - d_ji * p_i; sum_dij_pj += m_j * dot(my_dij_pj, grad_i_wij); sum_djj_pj += m_j * dot(djj, grad_i_wij) * p_j_old; sum_djk_pk += m_j * dot(djk_pk, grad_i_wij); my_rho_p += (dT * dT) * m_j * dot((F_i_p / m_i - F_j_p / m_j), grad_i_wij); } } } } } // Real RHS = fminf(0.0, RHO_0 - rho_np[i_idx]); Real RHS = RHO_0 - rho_np[i_idx]; Real aij_pj = +sum_dij_pj - sum_djj_pj - sum_djk_pk; p_new = (RHS - aij_pj) / a_ii[i_idx]; Residuals[i_idx] = abs(RHS - aij_pj - p_old[i_idx] * a_ii[i_idx]); // sortedRhoPreMu[i_idx].x = aij_pj + p_new * a_ii[i_idx] + RHO_0 - RHS; } } else { // Do Adami BC Real3 myAcc = mR3(0); Real3 V_prescribed = mR3(0); BCE_Vel_Acc(i_idx, myAcc, V_prescribed, sortedPosRad, updatePortion, gridMarkerIndexD, qD, rigidSPH_MeshPos_LRF_D, posRigid_fsiBodies_D, velMassRigid_fsiBodies_D, omegaVelLRF_fsiBodies_D, accRigid_fsiBodies_D, omegaAccLRF_fsiBodies_D, rigidIdentifierD, pos_fsi_fea_D, vel_fsi_fea_D, acc_fsi_fea_D, FlexIdentifierD, numFlex1D, CableElementsNodesD, ShellElementsNodesD); Real3 numeratorv = mR3(0); Real denumenator = 0; Real numeratorp = 0; Real3 Vel_i; // get address in grid int3 gridPos = calcGridPos(pos_i); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 pos_j = mR3(sortedPosRad[j]); Real3 dist3 = Distance(pos_i, pos_j); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * paramsD.HSML || sortedRhoPreMu[j].w != -1) continue; // OLD VELOCITY IS SHOULD BE OBDATED NOT THE NEW ONE!!!!! Real3 Vel_j = sortedVelMas[j]; Real p_j = p_old[j]; Real3 F_j_p = F_p[j]; Real h_j = sortedPosRad[j].w; Real m_j = h_j * h_j * h_j * paramsD.rho0; // Real rhoj = sortedRhoPreMu[j].x; Real h_ij = 0.5 * (h_j + h_i); Real Wd = W3h(d, h_ij); numeratorv += Vel_j * Wd; numeratorp += p_j * Wd + dot(source_term - myAcc, dist3) * sortedRhoPreMu[j].x * Wd; denumenator += Wd; Real3 TobeUsed = (F_i_p / m_i - F_j_p / m_j); my_rho_p += (dT * dT) * m_j * dot(TobeUsed, GradWh(dist3, h_ij)); if (isnan(numeratorp)) printf("Something is wrong here..., %f\n", numeratorp); } } } } } if (abs(denumenator) < EPSILON) { p_new = 0; Vel_i = 2 * V_prescribed; } else { Vel_i = 2 * V_prescribed - numeratorv / denumenator; p_new = numeratorp / denumenator; } Residuals[i_idx] = abs(numeratorp - denumenator * p_old[i_idx]) * a_ii[i_idx]; V_new[i_idx] = Vel_i; } // if (paramsD.ClampPressure && p_new < 0.0) // p_new = 0.0; rho_p[i_idx] = my_rho_p; sortedRhoPreMu[i_idx].y = p_new; } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Update_AND_Calc_Res(Real3* sortedVelMas, Real4* sortedRhoPreMu, Real* p_old, Real3* V_new, Real* rho_p, Real* rho_np, Real* Residuals, const size_t numAllMarkers, const int Iteration, Real params_relaxation, bool IsSPARSE, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w <= -2) { return; } // p_i = (1 - relax) * p_old_i + relax * p_i; sortedRhoPreMu[i_idx].y = (1 - params_relaxation) * p_old[i_idx] + params_relaxation * sortedRhoPreMu[i_idx].y; // if(!paramsD.USE_LinearSolver) // p_old[i_idx] = sortedRhoPreMu[i_idx].y; // if (paramsD.ClampPressure && sortedRhoPreMu[i_idx].y < 0) // sortedRhoPreMu[i_idx].y = 0; // Real AbsRes = abs(sortedRhoPreMu[i_idx].y - p_old[i_idx]); // Real Updated_rho = rho_np[i_idx] + rho_p[i_idx]; // Real rho_res = abs(1000 - sortedRhoPreMu[i_idx].x); // Hard-coded for now Real p_res = 0; // p_res = abs(sortedRhoPreMu[i_idx].y - p_old[i_idx]) / (abs(p_old[i_idx]) + 0.00001); p_res = abs(sortedRhoPreMu[i_idx].y - p_old[i_idx]); p_old[i_idx] = sortedRhoPreMu[i_idx].y; Residuals[i_idx] = p_res; } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void CalcForces(Real3* new_vel, // Write Real4* derivVelRhoD, Real4* sortedPosRad, // Read Real3* sortedVelMas, // Read Real4* sortedRhoPreMu, Real* sumWij_inv, Real* p_old, Real3* r_shift, uint* cellStart, uint* cellEnd, Real delta_t, size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w <= -2) { sortedRhoPreMu[i_idx].x = 0; sortedRhoPreMu[i_idx].y = 0; sortedRhoPreMu[i_idx].z = 0; return; } // if (sortedRhoPreMu[i_idx].w > -1) { // return; // } Real mu_0 = paramsD.mu0; Real h_i = sortedPosRad[i_idx].w; Real m_i = h_i * h_i * h_i * paramsD.rho0; Real dT = delta_t; Real3 source_term = paramsD.gravity + paramsD.bodyForce3; Real epsilon = paramsD.epsMinMarkersDis; Real3 posi = mR3(sortedPosRad[i_idx]); Real3 Veli = sortedVelMas[i_idx]; Real p_i; // if (sortedRhoPreMu[i_idx].w == -1) p_i = sortedRhoPreMu[i_idx].y; // else // p_i = p_old[i_idx]; Real rho_i = sortedRhoPreMu[i_idx].x; Real3 F_i_mu = mR3(0); Real3 F_i_surface_tension = mR3(0); Real3 F_i_p = mR3(0); if ((sortedRhoPreMu[i_idx].x > 3 * paramsD.rho0 || sortedRhoPreMu[i_idx].x < 0) && sortedRhoPreMu[i_idx].w < 0) printf("too large/small density marker %d, type=%f\n", i_idx, sortedRhoPreMu[i_idx].w); Real r0 = 0; int Ni = 0; Real mi_bar = 0; Real3 inner_sum = mR3(0); int3 gridPos = calcGridPos(posi); // get address in grid for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posj = mR3(sortedPosRad[j]); Real3 rij = Distance(posi, posj); Real d = length(rij); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j) continue; Real3 eij = rij / d; Real h_j = sortedPosRad[j].w; Real m_j = h_j * h_j * h_j * paramsD.rho0; mi_bar += m_j; Ni++; r0 += d; inner_sum += m_j * rij / (d * d * d); Real h_ij = 0.5 * (h_j + h_i); Real Wd = m_j * W3h(d, h_ij); Real3 grad_ij = GradWh(rij, h_ij); Real3 Velj = sortedVelMas[j]; Real p_j = sortedRhoPreMu[j].y; Real rho_j = sortedRhoPreMu[j].x; Real3 V_ij = (Veli - Velj); // Only Consider (fluid-fluid + fluid-solid) or Solid-Fluid Interaction if (sortedRhoPreMu[i_idx].w < 0 || (sortedRhoPreMu[i_idx].w >= 0 && sortedRhoPreMu[j].w < 0)) F_i_p += -m_j * ((p_i / (rho_i * rho_i)) + (p_j / (rho_j * rho_j))) * grad_ij; Real Rho_bar = (rho_j + rho_i) * 0.5; // Real nu = mu_0 * paramsD.HSML * 320 / Rho_bar; // Real3 muNumerator = nu * fminf(0.0, dot(rij, V_ij)) * grad_ij; Real3 muNumerator = 2 * mu_0 * dot(rij, grad_ij) * V_ij; Real muDenominator = (Rho_bar * Rho_bar) * (d * d + paramsD.HSML * paramsD.HSML * epsilon); // Only Consider (fluid-fluid + fluid-solid) or Solid-Fluid Interaction if (sortedRhoPreMu[i_idx].w < 0 || (sortedRhoPreMu[i_idx].w >= 0 && sortedRhoPreMu[j].w < 0)) // if ((sortedRhoPreMu[i_idx].w < 0 && sortedRhoPreMu[j].w < 0)) F_i_mu += m_j * muNumerator / muDenominator; if (!isfinite(length(F_i_mu))) { printf("F_i_np in CalcForces returns Nan or Inf"); } } } } if (Ni != 0) { r0 /= Ni; mi_bar /= Ni; } if (mi_bar > EPSILON) r_shift[i_idx] = paramsD.beta_shifting * r0 * r0 * paramsD.v_Max * dT / mi_bar * inner_sum; // Forces are per unit mass at this point. derivVelRhoD[i_idx] = mR4((F_i_p + F_i_mu) * m_i); // Add the source_term only to the fluid markers if (sortedRhoPreMu[i_idx].w == -1) { derivVelRhoD[i_idx] = derivVelRhoD[i_idx] + mR4(source_term) * m_i; } new_vel[i_idx] = Veli + dT * mR3(derivVelRhoD[i_idx]) / m_i + r_shift[i_idx] / dT; if (!isfinite(length(new_vel[i_idx])) || !isfinite(length(derivVelRhoD[i_idx])) || !isfinite(length(r_shift[i_idx]))) printf("%d= new_vel=%.2f,derivVelRhoD=%.2f,r_shift=%.2f, F_i_p=%f, F_i_mu=%f\n", i_idx, length(new_vel[i_idx]), length(derivVelRhoD[i_idx]), length(r_shift[i_idx]), length(F_i_p), length(F_i_mu)); } } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void FinalizePressure(Real4* sortedPosRad, // Read Real4* sortedRhoPreMu, Real* p_old, Real3* F_p, // Write uint* cellStart, uint* cellEnd, size_t numAllMarkers, Real p_shift, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w <= -2) { return; } if (!(isfinite(sortedRhoPreMu[i_idx].x) && isfinite(sortedRhoPreMu[i_idx].y) && isfinite(sortedRhoPreMu[i_idx].z) && isfinite(sortedRhoPreMu[i_idx].w))) { printf("rhoPreMu is NAN: thrown from FinalizePressure ! %f,%f,%f\\n", sortedRhoPreMu[i_idx].x, sortedRhoPreMu[i_idx].y, sortedRhoPreMu[i_idx].z); sortedRhoPreMu[i_idx].y = 0.0; } // if (p_shift < 0) sortedRhoPreMu[i_idx].y = p_old[i_idx] + ((paramsD.ClampPressure) ? paramsD.BASEPRES : 0.0); //- p_shift; if (paramsD.ClampPressure && sortedRhoPreMu[i_idx].y < 0) sortedRhoPreMu[i_idx].y = 0; // if (sortedRhoPreMu[i_idx].y < 0) // sortedRhoPreMu[i_idx].y = (p_old[i_idx] > 0) ? p_old[i_idx] : 0.0; if (sortedRhoPreMu[i_idx].y > paramsD.Max_Pressure) sortedRhoPreMu[i_idx].y = paramsD.Max_Pressure; } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForceIISPH::calcPressureIISPH(std::shared_ptr<FsiBodiesDataD> otherFsiBodiesD, thrust::device_vector<Real3> pos_fsi_fea_D, thrust::device_vector<Real3> vel_fsi_fea_D, thrust::device_vector<Real3> acc_fsi_fea_D, thrust::device_vector<Real> sumWij_inv, thrust::device_vector<Real>& p_old, thrust::device_vector<Real3> Normals, thrust::device_vector<Real> G_i, thrust::device_vector<Real>& Color) { // Real RES = paramsH->PPE_res; PPESolutionType mySolutionType = paramsH->PPE_Solution_type; std::cout << "time step in calcPressureIISPH " << paramsH->dT << std::endl; double total_step_timeClock = clock(); bool *isErrorH, *isErrorD; isErrorH = (bool*)malloc(sizeof(bool)); hipMalloc((void**)&isErrorD, sizeof(bool)); *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); //------------------------------------------------------------------------ // thread per particle uint numThreads, numBlocks; size_t numAllMarkers = (int)numObjectsH->numAllMarkers; computeGridSize((uint)numAllMarkers, 256, numBlocks, numThreads); *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); thrust::device_vector<Real3> d_ii(numAllMarkers); thrust::device_vector<Real3> V_np(numAllMarkers); thrust::fill(d_ii.begin(), d_ii.end(), mR3(0.0)); thrust::fill(V_np.begin(), V_np.end(), mR3(0.0)); *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); hipLaunchKernelGGL(( V_i_np__AND__d_ii_kernel), dim3(numBlocks), dim3(numThreads), 0, 0, mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(d_ii), mR3CAST(V_np), R1CAST(sumWij_inv), R1CAST(G_i), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), paramsH->dT, numAllMarkers, isErrorD); hipDeviceSynchronize(); cudaCheckError(); hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after F_i_np__AND__d_ii_kernel!\n"); } thrust::device_vector<Real> a_ii(numAllMarkers); thrust::device_vector<Real> rho_np(numAllMarkers); thrust::fill(a_ii.begin(), a_ii.end(), 0.0); thrust::fill(rho_np.begin(), rho_np.end(), 0.0); thrust::fill(p_old.begin(), p_old.end(), 0.0); thrust::device_vector<Real3> summGradW(numAllMarkers); *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); hipLaunchKernelGGL(( Rho_np_AND_a_ii_AND_sum_m_GradW), dim3(numBlocks), dim3(numThreads), 0, 0, mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(rho_np), R1CAST(a_ii), R1CAST(p_old), mR3CAST(V_np), mR3CAST(d_ii), mR3CAST(summGradW), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), paramsH->dT, numAllMarkers, isErrorD); hipDeviceSynchronize(); cudaCheckError(); hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after F_i_np__AND__d_ii_kernel!\n"); } thrust::device_vector<Real3> V_new(numAllMarkers); thrust::fill(V_new.begin(), V_new.end(), mR3(0.0)); thrust::device_vector<Real> a_ij; thrust::device_vector<Real> B_i(numAllMarkers); thrust::device_vector<uint> csrColIndA; thrust::device_vector<uint> numContacts(numAllMarkers); thrust::device_vector<unsigned long int> GlobalcsrColIndA; thrust::device_vector<Real> csrValA; double durationFormAXB; size_t end_fluid = numObjectsH->numGhostMarkers + numObjectsH->numHelperMarkers + numObjectsH->numFluidMarkers; size_t end_bndry = end_fluid + numObjectsH->numBoundaryMarkers; size_t end_rigid = end_bndry + numObjectsH->numRigidMarkers; size_t end_flex = end_rigid + numObjectsH->numFlexMarkers; int4 updatePortion = mI4((int)end_fluid, (int)end_bndry, (int)end_rigid, (int)end_flex); uint NNZ; if (mySolutionType == PPESolutionType::FORM_SPARSE_MATRIX) { thrust::fill(a_ij.begin(), a_ij.end(), 0.0); thrust::fill(B_i.begin(), B_i.end(), 0.0); // thrust::fill(summGradW.begin(), summGradW.end(), mR3(0.0)); thrust::fill(numContacts.begin(), numContacts.end(), 0.0); //------------------------------------------------------------------------ //------------- MatrixJacobi //------------------------------------------------------------------------ bool SPARSE_FLAG = true; double FormAXBClock = clock(); thrust::device_vector<Real> Residuals(numAllMarkers); thrust::fill(Residuals.begin(), Residuals.end(), 1.0); thrust::device_vector<Real> rho_p(numAllMarkers); thrust::fill(rho_p.begin(), rho_p.end(), 0.0); *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); hipLaunchKernelGGL(( CalcNumber_Contacts), dim3(numBlocks), dim3(numThreads), 0, 0, U1CAST(numContacts), mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numAllMarkers, isErrorD); hipDeviceSynchronize(); cudaCheckError(); hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after CalcNumber_Contacts!\n"); } uint MAX_CONTACT = thrust::reduce(numContacts.begin(), numContacts.end(), 0, thrust::maximum<Real>()); std::cout << "Max contact between SPH particles: " << MAX_CONTACT << std::endl; uint LastVal = numContacts[numAllMarkers - 1]; thrust::exclusive_scan(numContacts.begin(), numContacts.end(), numContacts.begin()); numContacts.push_back(LastVal + numContacts[numAllMarkers - 1]); NNZ = numContacts[numAllMarkers]; csrValA.resize(NNZ); csrColIndA.resize(NNZ); GlobalcsrColIndA.resize(NNZ); thrust::fill(csrValA.begin(), csrValA.end(), 0.0); thrust::fill(GlobalcsrColIndA.begin(), GlobalcsrColIndA.end(), 0.0); thrust::fill(csrColIndA.begin(), csrColIndA.end(), 0.0); hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); std::cout << "updatePortion of BC: " << updatePortion.x << " " << updatePortion.y << " " << updatePortion.z << " " << updatePortion.w << "\n "; hipLaunchKernelGGL(( FormAXB), dim3(numBlocks), dim3(numThreads), 0, 0, R1CAST(csrValA), U1CAST(csrColIndA), LU1CAST(GlobalcsrColIndA), U1CAST(numContacts), R1CAST(a_ij), R1CAST(B_i), mR3CAST(d_ii), R1CAST(a_ii), mR3CAST(summGradW), mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(V_new), R1CAST(p_old), mR3CAST(Normals), R1CAST(G_i), R1CAST(sumWij_inv), R1CAST(rho_np), mR4CAST(otherFsiBodiesD->q_fsiBodies_D), mR3CAST(fsiGeneralData->rigidSPH_MeshPos_LRF_D), mR3CAST(otherFsiBodiesD->posRigid_fsiBodies_D), mR4CAST(otherFsiBodiesD->velMassRigid_fsiBodies_D), mR3CAST(otherFsiBodiesD->omegaVelLRF_fsiBodies_D), mR3CAST(otherFsiBodiesD->accRigid_fsiBodies_D), mR3CAST(otherFsiBodiesD->omegaAccLRF_fsiBodies_D), U1CAST(fsiGeneralData->rigidIdentifierD), mR3CAST(pos_fsi_fea_D), mR3CAST(vel_fsi_fea_D), mR3CAST(acc_fsi_fea_D), U1CAST(fsiGeneralData->FlexIdentifierD), (int)numObjectsH->numFlexBodies1D, U2CAST(fsiGeneralData->CableElementsNodesD), U4CAST(fsiGeneralData->ShellElementsNodesD), updatePortion, U1CAST(markersProximityD->gridMarkerIndexD), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), paramsH->dT, numAllMarkers, SPARSE_FLAG, isErrorD); hipDeviceSynchronize(); cudaCheckError(); hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after F_i_np__AND__d_ii_kernel!\n"); } durationFormAXB = (clock() - FormAXBClock) / (double)CLOCKS_PER_SEC; } //------------------------------------------------------------------------ //------------- Iterative loop //------------------------------------------------------------------------ int Iteration = 0; Real MaxRes = 100; thrust::device_vector<Real> Residuals(numAllMarkers); thrust::fill(Residuals.begin(), Residuals.end(), 1.0); thrust::device_vector<Real3> dij_pj(numAllMarkers); thrust::fill(dij_pj.begin(), dij_pj.end(), mR3(0.0)); thrust::device_vector<Real3> F_p(numAllMarkers); thrust::fill(F_p.begin(), F_p.end(), mR3(0.0)); thrust::device_vector<Real> rho_p(numAllMarkers); thrust::fill(rho_p.begin(), rho_p.end(), 0.0); double LinearSystemClock = clock(); myLinearSolver->SetVerbose(paramsH->Verbose_monitoring); myLinearSolver->SetAbsRes(paramsH->LinearSolver_Abs_Tol); myLinearSolver->SetRelRes(paramsH->LinearSolver_Rel_Tol); myLinearSolver->SetIterationLimit(paramsH->LinearSolver_Max_Iter); if (paramsH->USE_LinearSolver) { if (paramsH->PPE_Solution_type != PPESolutionType::FORM_SPARSE_MATRIX) { printf( "You should paramsH->PPE_Solution_type == FORM_SPARSE_MATRIX in order to use the " "chrono_fsi linear " "solvers\n"); exit(0); } myLinearSolver->Solve((int)numAllMarkers, NNZ, R1CAST(csrValA), U1CAST(numContacts), U1CAST(csrColIndA), R1CAST(p_old), R1CAST(B_i)); cudaCheckError(); } else { while ((MaxRes > paramsH->LinearSolver_Abs_Tol || Iteration < 3) && Iteration < paramsH->LinearSolver_Max_Iter) { *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); hipLaunchKernelGGL(( Initialize_Variables), dim3(numBlocks), dim3(numThreads), 0, 0, mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(p_old), mR3CAST(sortedSphMarkersD->velMasD), mR3CAST(V_new), numAllMarkers, isErrorD); hipDeviceSynchronize(); cudaCheckError(); hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after Initialize_Variables!\n"); } if (mySolutionType == PPESolutionType::MATRIX_FREE) { *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); hipLaunchKernelGGL(( Calc_dij_pj), dim3(numBlocks), dim3(numThreads), 0, 0, mR3CAST(dij_pj), mR3CAST(F_p), mR3CAST(d_ii), mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(p_old), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), paramsH->dT, numAllMarkers, isErrorD); hipDeviceSynchronize(); cudaCheckError(); hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after Calc_dij_pj!\n"); } *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); hipLaunchKernelGGL(( Calc_Pressure), dim3(numBlocks), dim3(numThreads), 0, 0, R1CAST(a_ii), mR3CAST(d_ii), mR3CAST(dij_pj), R1CAST(rho_np), R1CAST(rho_p), R1CAST(Residuals), mR3CAST(F_p), mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), mR4CAST(otherFsiBodiesD->q_fsiBodies_D), mR3CAST(fsiGeneralData->rigidSPH_MeshPos_LRF_D), mR3CAST(otherFsiBodiesD->posRigid_fsiBodies_D), mR4CAST(otherFsiBodiesD->velMassRigid_fsiBodies_D), mR3CAST(otherFsiBodiesD->omegaVelLRF_fsiBodies_D), mR3CAST(otherFsiBodiesD->accRigid_fsiBodies_D), mR3CAST(otherFsiBodiesD->omegaAccLRF_fsiBodies_D), U1CAST(fsiGeneralData->rigidIdentifierD), mR3CAST(pos_fsi_fea_D), mR3CAST(vel_fsi_fea_D), mR3CAST(acc_fsi_fea_D), U1CAST(fsiGeneralData->FlexIdentifierD), (int)numObjectsH->numFlexBodies1D, U2CAST(fsiGeneralData->CableElementsNodesD), U4CAST(fsiGeneralData->ShellElementsNodesD), updatePortion, U1CAST(markersProximityD->gridMarkerIndexD), R1CAST(p_old), mR3CAST(V_new), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), paramsH->dT, numAllMarkers, isErrorD); hipDeviceSynchronize(); cudaCheckError(); hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after Calc_Pressure!\n"); } } if (mySolutionType == PPESolutionType::FORM_SPARSE_MATRIX) { *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); hipLaunchKernelGGL(( Calc_Pressure_AXB_USING_CSR), dim3(numBlocks), dim3(numThreads), 0, 0, R1CAST(csrValA), R1CAST(a_ii), U1CAST(csrColIndA), U1CAST(numContacts), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(sumWij_inv), mR3CAST(sortedSphMarkersD->velMasD), mR3CAST(V_new), R1CAST(p_old), R1CAST(B_i), R1CAST(Residuals), numAllMarkers, isErrorD); hipDeviceSynchronize(); cudaCheckError(); hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after Iterative_pressure_update!\n"); } } *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); hipLaunchKernelGGL(( Update_AND_Calc_Res), dim3(numBlocks), dim3(numThreads), 0, 0, mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(p_old), mR3CAST(V_new), R1CAST(rho_p), R1CAST(rho_np), R1CAST(Residuals), numAllMarkers, Iteration, paramsH->PPE_relaxation, false, isErrorD); hipDeviceSynchronize(); cudaCheckError(); hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after Iterative_pressure_update!\n"); } Iteration++; thrust::device_vector<Real>::iterator iter = thrust::max_element(Residuals.begin(), Residuals.end()); auto position = iter - Residuals.begin(); MaxRes = *iter; // MaxRes = // thrust::reduce(Residuals.begin(), Residuals.end(), 0.0, thrust::plus<Real>()) / // numObjectsH->numAllMarkers; // Real PMAX = thrust::reduce(p_old.begin(), p_old.end(), 0.0, thrust::maximum<Real>()); // MaxRes = thrust::reduce(Residuals.begin(), Residuals.end(), 0.0, thrust::plus<Real>()) / // numObjectsH->numAllMarkers; // MaxRes = thrust::reduce(Residuals.begin(), Residuals.end(), 0.0, thrust::maximum<Real>()); // Real R_np = thrust::reduce(rho_np.begin(), rho_np.end(), 0.0, thrust::plus<Real>()) / // rho_np.size(); // Real R_p = thrust::reduce(rho_p.begin(), rho_p.end(), 0.0, thrust::plus<Real>()) / // rho_p.size(); // if (paramsH->Verbose_monitoring) printf("Iter= %d, Res= %f\n", Iteration, MaxRes); } } thrust::device_vector<Real>::iterator iter = thrust::min_element(p_old.begin(), p_old.end()); auto position = iter - p_old.begin(); Real shift_p = *iter; // Real shift_p = 0; // This must be run if linear solver is used if (paramsH->USE_LinearSolver || paramsH->ClampPressure) { printf("Shifting pressure values by %f\n", -shift_p); *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); hipLaunchKernelGGL(( FinalizePressure), dim3(numBlocks), dim3(numThreads), 0, 0, mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(p_old), mR3CAST(F_p), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numAllMarkers, shift_p, isErrorD); hipDeviceSynchronize(); cudaCheckError(); hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after FinalizePressure!\n"); } } double durationLinearSystem = (clock() - LinearSystemClock) / (double)CLOCKS_PER_SEC; double durationtotal_step_time = (clock() - total_step_timeClock) / (double)CLOCKS_PER_SEC; printf("---------------IISPH CLOCK-------------------\n"); printf(" Total: %f \n FormAXB: %f\n Linear System: %f \n", durationtotal_step_time, durationFormAXB, durationLinearSystem); if (!paramsH->USE_LinearSolver) printf(" Iter (Jacobi+SOR)# = %d, to Res= %.3e \n", Iteration, MaxRes); if (paramsH->USE_LinearSolver) if (myLinearSolver->GetSolverStatus()) { std::cout << " Solver converged to " << myLinearSolver->GetResidual() << " tolerance"; std::cout << " after " << myLinearSolver->GetNumIterations() << " iterations" << std::endl; } else { std::cout << "Failed to converge after " << myLinearSolver->GetIterationLimit() << " iterations"; std::cout << " (" << myLinearSolver->GetResidual() << " final residual)" << std::endl; } //------------------------------------------------------------------------ //------------------------------------------------------------------------ hipFree(isErrorD); free(isErrorH); } void ChFsiForceIISPH::ForceSPH(std::shared_ptr<SphMarkerDataD> otherSphMarkersD, std::shared_ptr<FsiBodiesDataD> otherFsiBodiesD, std::shared_ptr<FsiMeshDataD> otherFsiMeshD) { sphMarkersD = otherSphMarkersD; int numAllMarkers = (int)numObjectsH->numAllMarkers; int numHelperMarkers = (int)numObjectsH->numHelperMarkers; fsiCollisionSystem->ArrangeData(sphMarkersD); thrust::device_vector<Real3>::iterator iter = thrust::max_element(sortedSphMarkersD->velMasD.begin(), sortedSphMarkersD->velMasD.end(), compare_Real3_mag()); Real MaxVel = length(*iter); if (paramsH->Adaptive_time_stepping) { Real dt_CFL = paramsH->Co_number * paramsH->HSML / MaxVel; Real dt_nu = 0.25 * paramsH->HSML * paramsH->HSML / (paramsH->mu0 / paramsH->rho0); Real dt_body = 0.25 * std::sqrt(paramsH->HSML / length(paramsH->bodyForce3 + paramsH->gravity)); Real dt = std::fmin(dt_body, std::fmin(dt_CFL, dt_nu)); if (dt / paramsH->dT_Max > 0.7 && dt / paramsH->dT_Max < 1) paramsH->dT = paramsH->dT_Max * 0.5; else paramsH->dT = std::fmin(dt, paramsH->dT_Max); CopyParams_NumberOfObjects(paramsH, numObjectsH); printf(" time step=%.3e, dt_Max=%.3e, dt_CFL=%.3e (CFL=%.2g), dt_nu=%.3e, dt_body=%.3e\n", paramsH->dT, paramsH->dT_Max, dt_CFL, paramsH->Co_number, dt_nu, dt_body); } bool *isErrorH, *isErrorD, *isErrorD2; isErrorH = (bool*)malloc(sizeof(bool)); hipMalloc((void**)&isErrorD, sizeof(bool)); hipMalloc((void**)&isErrorD2, sizeof(bool)); *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); hipMemcpy(isErrorD2, isErrorH, sizeof(bool), hipMemcpyHostToDevice); uint numThreads, numBlocks; computeGridSize(numAllMarkers, 256, numBlocks, numThreads); printf("numBlocks: %d, numThreads: %d, numAllMarker:%d \n", numBlocks, numThreads, numAllMarkers); thrust::device_vector<Real> Color(numAllMarkers); thrust::fill(Color.begin(), Color.end(), 1.0e10); thrust::device_vector<Real> _sumWij_inv(numAllMarkers); thrust::fill(_sumWij_inv.begin(), _sumWij_inv.end(), 0.0); thrust::device_vector<Real> G_i(numAllMarkers * 9); thrust::fill(G_i.begin(), G_i.end(), 0); *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); thrust::device_vector<uint> Contact_i(numAllMarkers); thrust::fill(Contact_i.begin(), Contact_i.end(), 0); hipLaunchKernelGGL(( calcRho_kernel), dim3(numBlocks), dim3(numThreads), 0, 0, mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(_sumWij_inv), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), U1CAST(Contact_i), numAllMarkers, isErrorD); hipDeviceSynchronize(); cudaCheckError(); hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after calcRho_kernel!\n"); } thrust::device_vector<Real3> Normals(numAllMarkers); hipLaunchKernelGGL(( calcNormalizedRho_kernel), dim3(numBlocks), dim3(numThreads), 0, 0, mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(_sumWij_inv), R1CAST(G_i), mR3CAST(Normals), R1CAST(Color), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numAllMarkers, isErrorD); hipDeviceSynchronize(); cudaCheckError(); hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after calcNormalizedRho_kernel!\n"); } thrust::device_vector<Real> p_old(numAllMarkers, 0.0); calcPressureIISPH(otherFsiBodiesD, otherFsiMeshD->pos_fsi_fea_D, otherFsiMeshD->vel_fsi_fea_D, otherFsiMeshD->acc_fsi_fea_D, _sumWij_inv, p_old, Normals, G_i, Color); //------------------------------------------------------------------------ // thread per particle // std::cout << "dT in ForceSPH after calcPressure: " << paramsH->dT << "\n"; double CalcForcesClock = clock(); thrust::fill(vel_vis_Sorted_D.begin(), vel_vis_Sorted_D.end(), mR3(0.0)); thrust::fill(derivVelRhoD_Sorted_D.begin(), derivVelRhoD_Sorted_D.end(), mR4(0.0)); thrust::fill(vel_XSPH_Sorted_D.begin(), vel_XSPH_Sorted_D.end(), mR3(0.0)); thrust::device_vector<Real3> dr_shift(numAllMarkers); thrust::fill(dr_shift.begin(), dr_shift.end(), mR3(0.0)); thrust::device_vector<Real3> NEW_Vel(numAllMarkers, mR3(0.0)); hipLaunchKernelGGL(( CalcForces), dim3(numBlocks), dim3(numThreads), 0, 0, mR3CAST(NEW_Vel), mR4CAST(derivVelRhoD_Sorted_D), mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(_sumWij_inv), R1CAST(p_old), mR3CAST(dr_shift), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), paramsH->dT, numAllMarkers, isErrorD); hipDeviceSynchronize(); cudaCheckError(); hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed in CalcForces!\n"); } double calcforce = (clock() - CalcForcesClock) / (double)CLOCKS_PER_SEC; printf(" Force Computation: %f \n", calcforce); double UpdateClock = clock(); sortedSphMarkersD->velMasD = NEW_Vel; hipLaunchKernelGGL(( UpdateDensity), dim3(numBlocks), dim3(numThreads), 0, 0, mR3CAST(vel_vis_Sorted_D), mR3CAST(vel_XSPH_Sorted_D), mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(_sumWij_inv), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numAllMarkers, isErrorD); hipDeviceSynchronize(); cudaCheckError(); hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed in CalcForces!\n"); } CopySortedToOriginal_NonInvasive_R3(fsiGeneralData->vel_XSPH_D, vel_XSPH_Sorted_D, markersProximityD->gridMarkerIndexD); CopySortedToOriginal_NonInvasive_R3(fsiGeneralData->vis_vel_SPH_D, vel_vis_Sorted_D, markersProximityD->gridMarkerIndexD); CopySortedToOriginal_NonInvasive_R3(sphMarkersD->velMasD, sortedSphMarkersD->velMasD, markersProximityD->gridMarkerIndexD); CopySortedToOriginal_NonInvasive_R4(sphMarkersD->posRadD, sortedSphMarkersD->posRadD, markersProximityD->gridMarkerIndexD); CopySortedToOriginal_NonInvasive_R4(sphMarkersD->rhoPresMuD, sortedSphMarkersD->rhoPresMuD, markersProximityD->gridMarkerIndexD); CopySortedToOriginal_NonInvasive_R4(fsiGeneralData->derivVelRhoD, derivVelRhoD_Sorted_D, markersProximityD->gridMarkerIndexD); printf(" Update information: %f \n", (clock() - UpdateClock) / (double)CLOCKS_PER_SEC); printf("----------------------------------------------\n"); } } // namespace fsi } // namespace chrono
986b4152c7b636da46bd93bbee5de26c76999d6d.cu
// ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Author: Milad Rakhsha // ============================================================================= #include <thrust/extrema.h> #include <thrust/sort.h> #include "chrono_fsi/physics/ChFsiForceIISPH.cuh" #include "chrono_fsi/physics/ChSphGeneral.cuh" #define RESOLUTION_LENGTH_MULT_IISPH 2.0 //========================================================================================================================================== namespace chrono { namespace fsi { // double precision atomic add function __device__ inline double datomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } ChFsiForceIISPH::ChFsiForceIISPH(std::shared_ptr<ChBce> otherBceWorker, std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD, std::shared_ptr<ProximityDataD> otherMarkersProximityD, std::shared_ptr<FsiGeneralData> otherFsiGeneralData, std::shared_ptr<SimParams> otherParamsH, std::shared_ptr<ChCounters> otherNumObjects, bool verb) : ChFsiForce(otherBceWorker, otherSortedSphMarkersD, otherMarkersProximityD, otherFsiGeneralData, otherParamsH, otherNumObjects, verb) {} ChFsiForceIISPH::~ChFsiForceIISPH() {} //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForceIISPH::Initialize() { ChFsiForce::Initialize(); cudaMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams)); cudaMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters)); cudaMemcpyFromSymbol(paramsH.get(), paramsD, sizeof(SimParams)); cudaDeviceSynchronize(); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void V_i_np__AND__d_ii_kernel(Real4* sortedPosRad, // input: sorted positions Real3* sortedVelMas, Real4* sortedRhoPreMu, Real3* d_ii, Real3* V_i_np, Real* sumWij_inv, Real* G_tensor, uint* cellStart, uint* cellEnd, Real delta_t, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers || sortedRhoPreMu[i_idx].w <= -2) { return; } // sortedRhoPreMu[i_idx].x = sortedRhoPreMu[i_idx].x / sumWij_inv[i_idx]; Real h_i = sortedPosRad[i_idx].w; Real m_i = h_i * h_i * h_i * paramsD.rho0; Real mu_0 = paramsD.mu0; Real epsilon = paramsD.epsMinMarkersDis; Real dT = delta_t; Real3 source_term = paramsD.gravity + paramsD.bodyForce3; Real RHO_0 = paramsD.rho0; if (sortedRhoPreMu[i_idx].x < EPSILON) { printf("density is %f,ref density= %f\n", sortedRhoPreMu[i_idx].x, RHO_0); } Real3 posi = mR3(sortedPosRad[i_idx]); Real3 Veli = sortedVelMas[i_idx]; Real Rhoi = sortedRhoPreMu[i_idx].x; Real3 My_d_ii = mR3(0); Real3 My_F_i_np = mR3(0); // get address in grid int3 gridPos = calcGridPos(posi); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posj = mR3(sortedPosRad[j]); Real3 rij = Distance(posi, posj); Real d = length(rij); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j) continue; Real3 eij = rij / d; Real3 Velj = sortedVelMas[j]; Real Rhoj = sortedRhoPreMu[j].x; Real h_j = sortedPosRad[j].w; if (Rhoj == 0) { printf("Bug F_i_np__AND__d_ii_kernel i=%d j=%d, hi=%f, hj=%f\n", i_idx, j, h_i, h_j); } Real m_j = h_j * h_j * h_j * paramsD.rho0; Real h_ij = 0.5 * (h_j + h_i); Real3 grad_ij = GradWh(rij, h_ij); My_d_ii += m_j * (-(dT * dT) / (Rhoi * Rhoi)) * grad_ij; Real Rho_bar = (Rhoj + Rhoi) * 0.5; Real3 V_ij = (Veli - Velj); // Real nu = mu_0 * paramsD.HSML * 320 / Rho_bar; // Real3 muNumerator = nu * fmin(0.0, dot(rij, V_ij)) * grad_ij; Real3 muNumerator = 2 * mu_0 * dot(rij, grad_ij) * V_ij; Real muDenominator = (Rho_bar * Rho_bar) * (d * d + h_ij * h_ij * epsilon); // if ((sortedRhoPreMu[i_idx].w < 0 && sortedRhoPreMu[j].w < 0)) // if (sortedRhoPreMu[i_idx].w < 0 || (sortedRhoPreMu[i_idx].w >= 0 && // sortedRhoPreMu[j].w < 0)) My_F_i_np += m_j * muNumerator / muDenominator; Real Wd = W3h(d, h_ij); My_F_i_np -= paramsD.kappa / m_i * m_j * Wd * rij; } } } } } // if (!paramsD.Conservative_Form) // My_F_i_np = mu_0 * LaplacainVi; My_F_i_np *= m_i; My_F_i_np += m_i * source_term; d_ii[i_idx] = My_d_ii; V_i_np[i_idx] = (My_F_i_np * dT + Veli); // This does not contain m_0? } //-------------------------------------------------------------------------------------------------------------------------------- //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Rho_np_AND_a_ii_AND_sum_m_GradW(Real4* sortedPosRad, Real4* sortedRhoPreMu, Real* rho_np, // Write Real* a_ii, // Write Real* p_old, // Write Real3* V_np, // Read Real3* d_ii, // Read Real3* sum_m_GradW, uint* cellStart, uint* cellEnd, Real delta_t, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers || sortedRhoPreMu[i_idx].w <= -2) { return; } Real h_i = sortedPosRad[i_idx].w; Real m_i = h_i * h_i * h_i * paramsD.rho0; Real3 posi = mR3(sortedPosRad[i_idx]); Real3 Veli_np = V_np[i_idx]; Real Rho_i = sortedRhoPreMu[i_idx].x; Real3 my_d_ii = d_ii[i_idx]; Real rho_temp = 0; Real my_a_ii = 0; Real3 My_sum_m_gradW = mR3(0); Real dT = delta_t; // get address in gridj int3 gridPos = calcGridPos(posi); // // examine neighbouring cells for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posj = mR3(sortedPosRad[j]); Real3 dist3 = Distance(posi, posj); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j) continue; Real h_j = sortedPosRad[j].w; Real m_j = h_j * h_j * h_j * paramsD.rho0; Real h_ij = 0.5 * (h_j + h_i); Real3 Velj_np = V_np[j]; Real3 grad_i_wij = GradWh(dist3, h_ij); rho_temp += m_j * dot((Veli_np - Velj_np), grad_i_wij); Real3 d_ji = m_i * (-(dT * dT) / (Rho_i * Rho_i)) * (-grad_i_wij); my_a_ii += m_j * dot((my_d_ii - d_ji), grad_i_wij); My_sum_m_gradW += m_j * grad_i_wij; } } } } } rho_np[i_idx] = dT * rho_temp + sortedRhoPreMu[i_idx].x; // Note: a_ii can become zero and when this can cause divide by 0 issues for free particles a_ii[i_idx] = abs(my_a_ii) > EPSILON ? my_a_ii : 1.0; sum_m_GradW[i_idx] = My_sum_m_gradW; p_old[i_idx] = sortedRhoPreMu[i_idx].y; // = 1000; // Note that this is outside of the for loop } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Calc_dij_pj(Real3* dij_pj, // write Real3* F_p, // Write Real3* d_ii, // Read Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, Real* p_old, uint* cellStart, uint* cellEnd, Real delta_t, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers || sortedRhoPreMu[i_idx].w <= -2) { return; } Real h_i = sortedPosRad[i_idx].w; Real m_i = h_i * h_i * h_i * paramsD.rho0; Real3 my_F_p = mR3(0); Real p_i_old = p_old[i_idx]; Real3 pos_i = mR3(sortedPosRad[i_idx]); Real Rho_i = sortedRhoPreMu[i_idx].x; if (sortedRhoPreMu[i_idx].x < EPSILON) { printf("(Calc_dij_pj) My density is %f in Calc_dij_pj\n", sortedRhoPreMu[i_idx].x); } Real dT = delta_t; Real3 My_dij_pj = mR3(0); int3 gridPos = calcGridPos(pos_i); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 pos_j = mR3(sortedPosRad[j]); Real3 dist3 = Distance(pos_i, pos_j); Real d = length(dist3); ////CHECK THIS CONDITION!!! if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j) continue; Real h_j = sortedPosRad[j].w; Real m_j = h_j * h_j * h_j * paramsD.rho0; Real h_ij = 0.5 * (h_j + h_i); Real3 grad_i_wij = GradWh(dist3, h_ij); Real Rho_j = sortedRhoPreMu[j].x; Real p_j_old = p_old[j]; My_dij_pj += m_j * (-(dT * dT) / (Rho_j * Rho_j)) * grad_i_wij * p_j_old; my_F_p += m_j * ((p_i_old / (Rho_i * Rho_i)) + (p_j_old / (Rho_j * Rho_j))) * grad_i_wij; } } } } } dij_pj[i_idx] = My_dij_pj; F_p[i_idx] = -m_i * my_F_p; } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void CalcNumber_Contacts(uint* numContacts, Real4* sortedPosRad, Real4* sortedRhoPreMu, uint* cellStart, uint* cellEnd, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w <= -2) { numContacts[i_idx] = 1; return; } Real h_i = sortedPosRad[i_idx].w; // Real m_i = h_i * h_i * h_i * paramsD.rho0; int myType = sortedRhoPreMu[i_idx].w; Real3 pos_i = mR3(sortedPosRad[i_idx]); uint numCol[400]; int counter = 1; numCol[0] = i_idx; // The first one is always the idx of the marker itself int3 gridPos = calcGridPos(pos_i); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 pos_j = mR3(sortedPosRad[j]); Real3 dist3 = Distance(pos_i, pos_j); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j) continue; bool AlreadyHave = false; for (uint findCol = 1; findCol <= counter; findCol++) { if (numCol[findCol] == j) { AlreadyHave = true; continue; } } // Room for improvment ... if (!AlreadyHave) { numCol[counter] = j; counter++; // Do not count BCE-BCE interactions... if (myType >= 0 && sortedRhoPreMu[j].w >= 0 && paramsD.bceType == BceVersion::ADAMI) counter--; } if (myType != -1) // For BCE no need to go deeper than this... continue; Real h_j = sortedPosRad[j].w; int3 gridPosJ = calcGridPos(pos_j); for (int zz = -1; zz <= 1; zz++) { for (int yy = -1; yy <= 1; yy++) { for (int xx = -1; xx <= 1; xx++) { int3 neighbourPosJ = gridPosJ + mI3(xx, yy, zz); uint gridHashJ = calcGridHash(neighbourPosJ); uint startIndexJ = cellStart[gridHashJ]; if (startIndexJ != 0xffffffff) { // cell is not empty uint endIndexJ = cellEnd[gridHashJ]; for (uint k = startIndexJ; k < endIndexJ; k++) { Real3 pos_k = mR3(sortedPosRad[k]); Real3 dist3jk = Distance(pos_j, pos_k); Real djk = length(dist3jk); if (djk > RESOLUTION_LENGTH_MULT * h_j || k == j || k == i_idx || sortedRhoPreMu[k].w <= -2) continue; bool AlreadyHave2 = false; for (uint findCol = 1; findCol <= counter; findCol++) { if (numCol[findCol] == k) { AlreadyHave2 = true; continue; } } if (!AlreadyHave2) { numCol[counter] = k; counter++; } } } } } } } } } } } numContacts[i_idx] = counter + 10; } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Calc_summGradW(Real3* summGradW, // write Real4* sortedPosRad, Real4* sortedRhoPreMu, uint* cellStart, uint* cellEnd, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w <= -2) { return; } Real h_i = sortedPosRad[i_idx].w; // Real m_i = h_i * h_i * h_i * paramsD.rho0; Real3 pos_i = mR3(sortedPosRad[i_idx]); Real3 My_summgradW = mR3(0); // Real dT = paramsD.dT; int3 gridPos = calcGridPos(pos_i); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 pos_j = mR3(sortedPosRad[j]); Real3 dist3 = Distance(pos_i, pos_j); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j) continue; Real h_j = sortedPosRad[j].w; Real m_j = h_j * h_j * h_j * paramsD.rho0; Real h_ij = 0.5 * (h_j + h_i); Real3 grad_i_wij = GradWh(dist3, h_ij); My_summgradW += m_j * grad_i_wij; } } } } } summGradW[i_idx] = My_summgradW; } //-------------------------------------------------------------------------------------------------------------------------------- __device__ void Calc_BC_aij_Bi(const uint i_idx, Real* csrValA, uint* csrColIndA, unsigned long int* GlobalcsrColIndA, uint* numContacts, // The above 4 vectors are used for CSR form. Real* a_ii, // write Real* B_i, Real4* sortedPosRad, Real3* sortedVelMas, const Real4* sortedRhoPreMu, Real3* V_new, Real* p_old, Real3* Normals, Real* G_i, Real* sumWij_inv, Real4* qD, Real3* rigidSPH_MeshPos_LRF_D, Real3* posRigid_fsiBodies_D, Real4* velMassRigid_fsiBodies_D, Real3* omegaVelLRF_fsiBodies_D, Real3* accRigid_fsiBodies_D, Real3* omegaAccLRF_fsiBodies_D, uint* rigidIdentifierD, Real3* pos_fsi_fea_D, Real3* vel_fsi_fea_D, Real3* acc_fsi_fea_D, uint* FlexIdentifierD, const int numFlex1D, uint2* CableElementsNodesD, uint4* ShellElementsNodesD, int4 updatePortion, uint* gridMarkerIndexD, const uint* cellStart, const uint* cellEnd, const size_t numAllMarkers, bool IsSPARSE) { uint csrStartIdx = numContacts[i_idx] + 1; uint csrEndIdx = numContacts[i_idx + 1]; Real h_i = sortedPosRad[i_idx].w; // Real m_i = h_i * h_i * h_i * paramsD.rho0; Real3 my_normal = Normals[i_idx]; Real3 source_term = paramsD.gravity + paramsD.bodyForce3; // if (bceIndex >= numObjectsD.numRigidMarkers) { // return; // } // int Original_idx = gridMarkerIndexD[i_idx]; Real3 myAcc = mR3(0.0); Real3 V_prescribed = mR3(0.0); // if (!(sortedRhoPreMu[i_idx].w >= 0 && sortedRhoPreMu[i_idx].w <= 3)) // printf("type of marker is %f\n", sortedRhoPreMu[i_idx].w); BCE_Vel_Acc(i_idx, myAcc, V_prescribed, sortedPosRad, updatePortion, gridMarkerIndexD, qD, rigidSPH_MeshPos_LRF_D, posRigid_fsiBodies_D, velMassRigid_fsiBodies_D, omegaVelLRF_fsiBodies_D, accRigid_fsiBodies_D, omegaAccLRF_fsiBodies_D, rigidIdentifierD, pos_fsi_fea_D, vel_fsi_fea_D, acc_fsi_fea_D, FlexIdentifierD, numFlex1D, CableElementsNodesD, ShellElementsNodesD); for (int c = csrStartIdx; c < csrEndIdx; c++) { csrValA[c] = 0; csrColIndA[c] = i_idx; GlobalcsrColIndA[c] = i_idx + numAllMarkers * i_idx; } // if ((csrEndIdx - csrStartIdx) != uint(0)) { Real3 numeratorv = mR3(0); Real denumenator = 0; Real pRHS = 0; // Real Rho_i = sortedRhoPreMu[i_idx].x; Real3 pos_i = mR3(sortedPosRad[i_idx]); // get address in grid int3 gridPos = calcGridPos(pos_i); uint counter = 0; for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 pos_j = mR3(sortedPosRad[j]); Real3 dist3 = Distance(pos_i, pos_j); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * h_i || j == i_idx) continue; Real h_j = sortedPosRad[j].w; // Real m_j = h_j * h_j * h_j * paramsD.rho0; // Real rhoj = sortedRhoPreMu[j].x; Real h_ij = 0.5 * (h_j + h_i); Real Wd = W3h(d, h_ij); Real3 Vel_j = sortedVelMas[j]; if (paramsD.bceType != BceVersion::ADAMI) { if (sortedRhoPreMu[j].w == -1.0 || dot(my_normal, mR3(pos_i - pos_j)) > 0) { Real3 grad_i_wij = GradWh(dist3, h_ij); csrValA[csrStartIdx - 1] += dot(grad_i_wij, my_normal); csrValA[counter + csrStartIdx] = -dot(grad_i_wij, my_normal); csrColIndA[counter + csrStartIdx] = j; GlobalcsrColIndA[counter + csrStartIdx] = j + numAllMarkers * i_idx; counter++; if (sortedRhoPreMu[j].w != -1) continue; numeratorv += Vel_j * Wd; denumenator += Wd; } } else { if (sortedRhoPreMu[j].w != -1 || sortedRhoPreMu[j].w <= -2) continue; numeratorv += Vel_j * Wd; denumenator += Wd; pRHS += dot(source_term - myAcc, dist3) * sortedRhoPreMu[j].x * Wd; csrValA[counter + csrStartIdx] = -Wd; csrColIndA[counter + csrStartIdx] = j; GlobalcsrColIndA[counter + csrStartIdx] = j + numAllMarkers * i_idx; counter++; } } } } } } if (abs(denumenator) < EPSILON) { V_new[i_idx] = 2 * V_prescribed; B_i[i_idx] = 0; if (paramsD.bceType == BceVersion::ADAMI) { csrValA[csrStartIdx - 1] = a_ii[i_idx]; csrColIndA[csrStartIdx - 1] = i_idx; GlobalcsrColIndA[csrStartIdx - 1] = i_idx + numAllMarkers * i_idx; } } else { Real Scaling = a_ii[i_idx] / denumenator; V_new[i_idx] = 2 * V_prescribed - numeratorv / denumenator; if (paramsD.bceType == BceVersion::ADAMI) { B_i[i_idx] = pRHS; csrValA[csrStartIdx - 1] = denumenator; csrColIndA[csrStartIdx - 1] = i_idx; GlobalcsrColIndA[csrStartIdx - 1] = i_idx + numAllMarkers * i_idx; for (int i = csrStartIdx - 1; i < csrEndIdx; i++) csrValA[i] *= Scaling; B_i[i_idx] *= Scaling; } } if (paramsD.bceType != BceVersion::ADAMI) { Real Scaling = a_ii[i_idx]; if (abs(csrValA[csrStartIdx - 1]) > EPSILON) { Scaling = a_ii[i_idx]; // csrValA[csrStartIdx - 1]; for (int count = csrStartIdx - 1; count < csrEndIdx; count++) csrValA[count] *= Scaling; } else { clearRow(i_idx, csrStartIdx - 1, csrEndIdx, csrValA, B_i); for (int count = csrStartIdx - 1; count < csrEndIdx; count++) { int j = csrColIndA[counter]; Real3 pos_j = mR3(sortedPosRad[j]); Real3 dist3 = Distance(pos_i, pos_j); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * h_i || j == i_idx) { csrValA[count] = 0.0; continue; } Real h_j = sortedPosRad[j].w; Real h_ij = 0.5 * (h_j + h_i); Real Wd = W3h(d, h_ij); csrValA[count] = sumWij_inv[j] * Wd * Scaling; } csrValA[csrStartIdx - 1] -= 1.0 * Scaling; } B_i[i_idx] = 0.0 * Scaling; } sortedVelMas[i_idx] = V_new[i_idx]; } // namespace fsi //-------------------------------------------------------------------------------------------------------------------------------- //-------------------------------------------------------------------------------------------------------------------------------- __device__ void Calc_fluid_aij_Bi(const uint i_idx, Real* csrValA, uint* csrColIndA, unsigned long int* GlobalcsrColIndA, uint* numContacts, // The above 4 vectors are used for CSR form. Real* B_i, Real3* d_ii, // Read Real* a_ii, // Read Real* rho_np, // Read Real3* summGradW, Real4* sortedPosRad, Real4* sortedRhoPreMu, uint* cellStart, uint* cellEnd, Real delta_t, const int numAllMarkers, bool IsSPARSE) { Real3 pos_i = mR3(sortedPosRad[i_idx]); Real dT = delta_t; int counter = 0; // There is always one non-zero at each row- The marker itself B_i[i_idx] = paramsD.rho0 - rho_np[i_idx]; uint csrStartIdx = numContacts[i_idx] + 1; // Reserve the starting index for the A_ii uint csrEndIdx = numContacts[i_idx + 1]; Real h_i = sortedPosRad[i_idx].w; // Real m_i = h_i * h_i * h_i * paramsD.rho0; // for (int c = csrStartIdx; c < csrEndIdx; c++) { // csrValA[c] = a_ii[i_idx]; // csrColIndA[c] = i_idx; // GlobalcsrColIndA[c] = i_idx + numAllMarkers * i_idx; // } int3 gridPos = calcGridPos(pos_i); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; // Real Rho_i = sortedRhoPreMu[i_idx].x; for (uint j = startIndex; j < endIndex; j++) { Real3 pos_j = mR3(sortedPosRad[j]); Real3 dist3 = Distance(pos_i, pos_j); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j) continue; Real h_j = sortedPosRad[j].w; Real h_ij = 0.5 * (h_j + h_i); Real3 grad_i_wij = GradWh(dist3, h_ij); Real Rho_j = sortedRhoPreMu[j].x; Real m_j = h_j * h_j * h_j * paramsD.rho0; Real3 d_it = m_j * (-(dT * dT) / (Rho_j * Rho_j)) * grad_i_wij; Real My_a_ij_1 = m_j * dot(d_it, summGradW[i_idx]); Real My_a_ij_2 = m_j * dot(d_ii[j], grad_i_wij); Real My_a_ij_12 = My_a_ij_1 - My_a_ij_2; bool DONE1 = false; for (uint findCol = csrStartIdx; findCol < csrEndIdx; findCol++) { if (csrColIndA[findCol] == j) { csrValA[findCol] += My_a_ij_12; csrColIndA[findCol] = j; GlobalcsrColIndA[findCol] = j + numAllMarkers * i_idx; DONE1 = true; continue; } } if (!DONE1) { csrValA[counter + csrStartIdx] += My_a_ij_12; csrColIndA[counter + csrStartIdx] = j; GlobalcsrColIndA[counter + csrStartIdx] = j + numAllMarkers * i_idx; counter++; } int3 gridPosJ = calcGridPos(pos_j); for (int zz = -1; zz <= 1; zz++) { for (int yy = -1; yy <= 1; yy++) { for (int xx = -1; xx <= 1; xx++) { int3 neighbourPosJ = gridPosJ + mI3(xx, yy, zz); uint gridHashJ = calcGridHash(neighbourPosJ); uint startIndexJ = cellStart[gridHashJ]; if (startIndexJ != 0xffffffff) { // cell is not empty uint endIndexJ = cellEnd[gridHashJ]; for (uint k = startIndexJ; k < endIndexJ; k++) { Real3 pos_k = mR3(sortedPosRad[k]); Real3 dist3jk = Distance(pos_j, pos_k); Real djk = length(dist3jk); if (djk > RESOLUTION_LENGTH_MULT_IISPH * h_j || k == j || k == i_idx || sortedRhoPreMu[k].w <= -2) continue; Real h_k = sortedPosRad[j].w; Real h_jk = 0.5 * (h_j + h_k); Real3 grad_j_wjk = GradWh(dist3jk, h_jk); Real m_k = cube(sortedPosRad[k].w) * paramsD.rho0; Real Rho_k = sortedRhoPreMu[k].x; Real3 d_jk = m_k * (-(dT * dT) / (Rho_k * Rho_k)) * grad_j_wjk; Real My_a_ij_3 = m_j * dot(d_jk, grad_i_wij); bool DONE2 = false; for (uint findCol = csrStartIdx; findCol < csrEndIdx; findCol++) { if (csrColIndA[findCol] == k) { csrValA[findCol] -= My_a_ij_3; csrColIndA[findCol] = k; GlobalcsrColIndA[findCol] = k + numAllMarkers * i_idx; DONE2 = true; continue; } } if (!DONE2) { csrValA[counter + csrStartIdx] -= My_a_ij_3; csrColIndA[counter + csrStartIdx] = k; GlobalcsrColIndA[counter + csrStartIdx] = k + numAllMarkers * i_idx; counter++; } } } } } } } } } } } for (int myIdx = csrStartIdx; myIdx < csrEndIdx; myIdx++) { if (csrColIndA[myIdx] == i_idx) csrValA[myIdx] = a_ii[i_idx]; } csrValA[csrStartIdx - 1] = a_ii[i_idx]; csrColIndA[csrStartIdx - 1] = i_idx; GlobalcsrColIndA[csrStartIdx - 1] = i_idx + numAllMarkers * i_idx; if (sortedRhoPreMu[i_idx].x < 0.999 * paramsD.rho0) { csrValA[csrStartIdx - 1] = a_ii[i_idx]; for (int myIdx = csrStartIdx; myIdx < csrEndIdx; myIdx++) { csrValA[myIdx] = 0.0; B_i[i_idx] = 0.0; } } Real RHS = B_i[i_idx]; B_i[i_idx] = RHS; // fminf(0.0, RHS); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void FormAXB(Real* csrValA, uint* csrColIndA, unsigned long int* GlobalcsrColIndA, uint* numContacts, // The above 4 vectors are used for CSR form. Real* a_ij, // write Real* B_i, // write Real3* d_ii, // Read Real* a_ii, // Read Real3* summGradW, Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, Real3* V_new, Real* p_old, Real3* Normals, Real* G_i, Real* sumWij_inv, Real* rho_np, Real4* qD, Real3* rigidSPH_MeshPos_LRF_D, Real3* posRigid_fsiBodies_D, Real4* velMassRigid_fsiBodies_D, Real3* omegaVelLRF_fsiBodies_D, Real3* accRigid_fsiBodies_D, Real3* omegaAccLRF_fsiBodies_D, uint* rigidIdentifierD, Real3* pos_fsi_fea_D, Real3* vel_fsi_fea_D, Real3* acc_fsi_fea_D, uint* FlexIdentifierD, const int numFlex1D, uint2* CableElementsNodesD, uint4* ShellElementsNodesD, int4 updatePortion, uint* gridMarkerIndexD, uint* cellStart, uint* cellEnd, Real delta_t, const size_t numAllMarkers, bool IsSPARSE, volatile bool* isError) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } // Real m_0 = paramsD.markerMass; // Real RHO_0 = paramsD.rho0; // Real dT = paramsD.dT; // Real3 gravity = paramsD.gravity; int TYPE_OF_NARKER = sortedRhoPreMu[i_idx].w; if (TYPE_OF_NARKER <= -2) { B_i[i_idx] = 0; uint csrStartIdx = numContacts[i_idx]; // This needs to be check to see if it messes up the condition number of the matrix csrValA[csrStartIdx] = 1.0; csrColIndA[csrStartIdx] = i_idx; GlobalcsrColIndA[csrStartIdx] = i_idx + numAllMarkers * i_idx; } else if (TYPE_OF_NARKER == -1) { Calc_fluid_aij_Bi(i_idx, csrValA, csrColIndA, GlobalcsrColIndA, numContacts, B_i, d_ii, a_ii, rho_np, summGradW, sortedPosRad, sortedRhoPreMu, cellStart, cellEnd, delta_t, numAllMarkers, true); } else if (TYPE_OF_NARKER > -1) Calc_BC_aij_Bi(i_idx, csrValA, csrColIndA, GlobalcsrColIndA, numContacts, a_ii, B_i, sortedPosRad, sortedVelMas, sortedRhoPreMu, V_new, p_old, Normals, G_i, sumWij_inv, qD, rigidSPH_MeshPos_LRF_D, posRigid_fsiBodies_D, velMassRigid_fsiBodies_D, omegaVelLRF_fsiBodies_D, accRigid_fsiBodies_D, omegaAccLRF_fsiBodies_D, rigidIdentifierD, pos_fsi_fea_D, vel_fsi_fea_D, acc_fsi_fea_D, FlexIdentifierD, numFlex1D, CableElementsNodesD, ShellElementsNodesD, updatePortion, gridMarkerIndexD, cellStart, cellEnd, numAllMarkers, true); } //-------------------------------------------------------------------------------------------------------------------------------- //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Calc_Pressure_AXB_USING_CSR(Real* csrValA, Real* a_ii, uint* csrColIndA, uint* numContacts, Real4* sortedRhoPreMu, Real* sumWij_inv, Real3* sortedVelMas, Real3* V_new, Real* p_old, Real* B_i, // Read Real* Residuals, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w <= -2) { return; } // Real RHO_0 = paramsD.rho0; // bool ClampPressure = paramsD.ClampPressure; // Real Max_Pressure = paramsD.Max_Pressure; uint startIdx = numContacts[i_idx] + 1; // numContacts[i_idx] is the diagonal itself uint endIdx = numContacts[i_idx + 1]; Real aij_pj = 0; // Real error = aij_pj + sortedRhoPreMu[i_idx].y * csrValA[startIdx - 1] - B_i[i_idx]; for (int myIdx = startIdx; myIdx < endIdx; myIdx++) { if (csrColIndA[myIdx] != i_idx) aij_pj += csrValA[myIdx] * p_old[csrColIndA[myIdx]]; } Real RHS = B_i[i_idx]; Residuals[i_idx] = abs(RHS - aij_pj - p_old[i_idx] * csrValA[startIdx - 1]); sortedRhoPreMu[i_idx].y = (RHS - aij_pj) / csrValA[startIdx - 1]; // if (paramsD.ClampPressure && sortedRhoPreMu[i_idx].y < 0) // sortedRhoPreMu[i_idx].y = 0; if (!isfinite(aij_pj)) { printf("a_ij *p_j became Nan in Calc_Pressure_AXB_USING_CSR "); } } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Calc_Pressure(Real* a_ii, // Read Real3* d_ii, // Read Real3* dij_pj, // Read Real* rho_np, // Read Real* rho_p, // Write Real* Residuals, Real3* F_p, Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, Real4* qD, Real3* rigidSPH_MeshPos_LRF_D, Real3* posRigid_fsiBodies_D, Real4* velMassRigid_fsiBodies_D, Real3* omegaVelLRF_fsiBodies_D, Real3* accRigid_fsiBodies_D, Real3* omegaAccLRF_fsiBodies_D, uint* rigidIdentifierD, Real3* pos_fsi_fea_D, Real3* vel_fsi_fea_D, Real3* acc_fsi_fea_D, uint* FlexIdentifierD, const int numFlex1D, uint2* CableElementsNodesD, uint4* ShellElementsNodesD, int4 updatePortion, uint* gridMarkerIndexD, Real* p_old, Real3* V_new, uint* cellStart, uint* cellEnd, Real delta_t, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w <= -2) { return; } Real h_i = sortedPosRad[i_idx].w; Real m_i = h_i * h_i * h_i * paramsD.rho0; Real RHO_0 = paramsD.rho0; Real dT = delta_t; Real3 source_term = paramsD.gravity + paramsD.bodyForce3; if (sortedRhoPreMu[i_idx].x < EPSILON) { printf("(Calc_Pressure)My density is %f in Calc_Pressure\n", sortedRhoPreMu[i_idx].x); } int myType = sortedRhoPreMu[i_idx].w; Real Rho_i = sortedRhoPreMu[i_idx].x; Real p_i = p_old[i_idx]; Real3 pos_i = mR3(sortedPosRad[i_idx]); Real p_new = 0; Real my_rho_p = 0; Real3 F_i_p = F_p[i_idx]; if (myType == -1) { if (Rho_i < 0.999 * RHO_0) { p_new = 0; Residuals[i_idx] = 0; } else { Real3 my_dij_pj = dij_pj[i_idx]; Real sum_dij_pj = 0; // This is the first summation term in the expression for the pressure. Real sum_djj_pj = 0; // This is the second summation term in the expression for the pressure. Real sum_djk_pk = 0; // This is the last summation term in the expression for the pressure. int3 gridPosI = calcGridPos(pos_i); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPosI = gridPosI + mI3(x, y, z); uint gridHashI = calcGridHash(neighbourPosI); // get start of bucket for this cell uint startIndexI = cellStart[gridHashI]; if (startIndexI != 0xffffffff) { uint endIndexI = cellEnd[gridHashI]; for (uint j = startIndexI; j < endIndexI; j++) { Real3 pos_j = mR3(sortedPosRad[j]); Real3 dist3ij = Distance(pos_i, pos_j); Real dij = length(dist3ij); if (dij > RESOLUTION_LENGTH_MULT * paramsD.HSML || i_idx == j || sortedRhoPreMu[j].w <= -2) continue; // Real Rho_j = sortedRhoPreMu[j].x; Real p_j_old = p_old[j]; Real h_j = sortedPosRad[j].w; Real m_j = h_j * h_j * h_j * paramsD.rho0; Real3 djj = d_ii[j]; Real3 F_j_p = F_p[j]; Real h_ij = 0.5 * (h_j + h_i); Real3 grad_i_wij = GradWh(dist3ij, h_ij); Real3 d_ji = m_i * (-(dT * dT) / (Rho_i * Rho_i)) * (-grad_i_wij); Real3 djk_pk = dij_pj[j] - d_ji * p_i; sum_dij_pj += m_j * dot(my_dij_pj, grad_i_wij); sum_djj_pj += m_j * dot(djj, grad_i_wij) * p_j_old; sum_djk_pk += m_j * dot(djk_pk, grad_i_wij); my_rho_p += (dT * dT) * m_j * dot((F_i_p / m_i - F_j_p / m_j), grad_i_wij); } } } } } // Real RHS = fminf(0.0, RHO_0 - rho_np[i_idx]); Real RHS = RHO_0 - rho_np[i_idx]; Real aij_pj = +sum_dij_pj - sum_djj_pj - sum_djk_pk; p_new = (RHS - aij_pj) / a_ii[i_idx]; Residuals[i_idx] = abs(RHS - aij_pj - p_old[i_idx] * a_ii[i_idx]); // sortedRhoPreMu[i_idx].x = aij_pj + p_new * a_ii[i_idx] + RHO_0 - RHS; } } else { // Do Adami BC Real3 myAcc = mR3(0); Real3 V_prescribed = mR3(0); BCE_Vel_Acc(i_idx, myAcc, V_prescribed, sortedPosRad, updatePortion, gridMarkerIndexD, qD, rigidSPH_MeshPos_LRF_D, posRigid_fsiBodies_D, velMassRigid_fsiBodies_D, omegaVelLRF_fsiBodies_D, accRigid_fsiBodies_D, omegaAccLRF_fsiBodies_D, rigidIdentifierD, pos_fsi_fea_D, vel_fsi_fea_D, acc_fsi_fea_D, FlexIdentifierD, numFlex1D, CableElementsNodesD, ShellElementsNodesD); Real3 numeratorv = mR3(0); Real denumenator = 0; Real numeratorp = 0; Real3 Vel_i; // get address in grid int3 gridPos = calcGridPos(pos_i); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 pos_j = mR3(sortedPosRad[j]); Real3 dist3 = Distance(pos_i, pos_j); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * paramsD.HSML || sortedRhoPreMu[j].w != -1) continue; // OLD VELOCITY IS SHOULD BE OBDATED NOT THE NEW ONE!!!!! Real3 Vel_j = sortedVelMas[j]; Real p_j = p_old[j]; Real3 F_j_p = F_p[j]; Real h_j = sortedPosRad[j].w; Real m_j = h_j * h_j * h_j * paramsD.rho0; // Real rhoj = sortedRhoPreMu[j].x; Real h_ij = 0.5 * (h_j + h_i); Real Wd = W3h(d, h_ij); numeratorv += Vel_j * Wd; numeratorp += p_j * Wd + dot(source_term - myAcc, dist3) * sortedRhoPreMu[j].x * Wd; denumenator += Wd; Real3 TobeUsed = (F_i_p / m_i - F_j_p / m_j); my_rho_p += (dT * dT) * m_j * dot(TobeUsed, GradWh(dist3, h_ij)); if (isnan(numeratorp)) printf("Something is wrong here..., %f\n", numeratorp); } } } } } if (abs(denumenator) < EPSILON) { p_new = 0; Vel_i = 2 * V_prescribed; } else { Vel_i = 2 * V_prescribed - numeratorv / denumenator; p_new = numeratorp / denumenator; } Residuals[i_idx] = abs(numeratorp - denumenator * p_old[i_idx]) * a_ii[i_idx]; V_new[i_idx] = Vel_i; } // if (paramsD.ClampPressure && p_new < 0.0) // p_new = 0.0; rho_p[i_idx] = my_rho_p; sortedRhoPreMu[i_idx].y = p_new; } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Update_AND_Calc_Res(Real3* sortedVelMas, Real4* sortedRhoPreMu, Real* p_old, Real3* V_new, Real* rho_p, Real* rho_np, Real* Residuals, const size_t numAllMarkers, const int Iteration, Real params_relaxation, bool IsSPARSE, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w <= -2) { return; } // p_i = (1 - relax) * p_old_i + relax * p_i; sortedRhoPreMu[i_idx].y = (1 - params_relaxation) * p_old[i_idx] + params_relaxation * sortedRhoPreMu[i_idx].y; // if(!paramsD.USE_LinearSolver) // p_old[i_idx] = sortedRhoPreMu[i_idx].y; // if (paramsD.ClampPressure && sortedRhoPreMu[i_idx].y < 0) // sortedRhoPreMu[i_idx].y = 0; // Real AbsRes = abs(sortedRhoPreMu[i_idx].y - p_old[i_idx]); // Real Updated_rho = rho_np[i_idx] + rho_p[i_idx]; // Real rho_res = abs(1000 - sortedRhoPreMu[i_idx].x); // Hard-coded for now Real p_res = 0; // p_res = abs(sortedRhoPreMu[i_idx].y - p_old[i_idx]) / (abs(p_old[i_idx]) + 0.00001); p_res = abs(sortedRhoPreMu[i_idx].y - p_old[i_idx]); p_old[i_idx] = sortedRhoPreMu[i_idx].y; Residuals[i_idx] = p_res; } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void CalcForces(Real3* new_vel, // Write Real4* derivVelRhoD, Real4* sortedPosRad, // Read Real3* sortedVelMas, // Read Real4* sortedRhoPreMu, Real* sumWij_inv, Real* p_old, Real3* r_shift, uint* cellStart, uint* cellEnd, Real delta_t, size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w <= -2) { sortedRhoPreMu[i_idx].x = 0; sortedRhoPreMu[i_idx].y = 0; sortedRhoPreMu[i_idx].z = 0; return; } // if (sortedRhoPreMu[i_idx].w > -1) { // return; // } Real mu_0 = paramsD.mu0; Real h_i = sortedPosRad[i_idx].w; Real m_i = h_i * h_i * h_i * paramsD.rho0; Real dT = delta_t; Real3 source_term = paramsD.gravity + paramsD.bodyForce3; Real epsilon = paramsD.epsMinMarkersDis; Real3 posi = mR3(sortedPosRad[i_idx]); Real3 Veli = sortedVelMas[i_idx]; Real p_i; // if (sortedRhoPreMu[i_idx].w == -1) p_i = sortedRhoPreMu[i_idx].y; // else // p_i = p_old[i_idx]; Real rho_i = sortedRhoPreMu[i_idx].x; Real3 F_i_mu = mR3(0); Real3 F_i_surface_tension = mR3(0); Real3 F_i_p = mR3(0); if ((sortedRhoPreMu[i_idx].x > 3 * paramsD.rho0 || sortedRhoPreMu[i_idx].x < 0) && sortedRhoPreMu[i_idx].w < 0) printf("too large/small density marker %d, type=%f\n", i_idx, sortedRhoPreMu[i_idx].w); Real r0 = 0; int Ni = 0; Real mi_bar = 0; Real3 inner_sum = mR3(0); int3 gridPos = calcGridPos(posi); // get address in grid for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posj = mR3(sortedPosRad[j]); Real3 rij = Distance(posi, posj); Real d = length(rij); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j) continue; Real3 eij = rij / d; Real h_j = sortedPosRad[j].w; Real m_j = h_j * h_j * h_j * paramsD.rho0; mi_bar += m_j; Ni++; r0 += d; inner_sum += m_j * rij / (d * d * d); Real h_ij = 0.5 * (h_j + h_i); Real Wd = m_j * W3h(d, h_ij); Real3 grad_ij = GradWh(rij, h_ij); Real3 Velj = sortedVelMas[j]; Real p_j = sortedRhoPreMu[j].y; Real rho_j = sortedRhoPreMu[j].x; Real3 V_ij = (Veli - Velj); // Only Consider (fluid-fluid + fluid-solid) or Solid-Fluid Interaction if (sortedRhoPreMu[i_idx].w < 0 || (sortedRhoPreMu[i_idx].w >= 0 && sortedRhoPreMu[j].w < 0)) F_i_p += -m_j * ((p_i / (rho_i * rho_i)) + (p_j / (rho_j * rho_j))) * grad_ij; Real Rho_bar = (rho_j + rho_i) * 0.5; // Real nu = mu_0 * paramsD.HSML * 320 / Rho_bar; // Real3 muNumerator = nu * fminf(0.0, dot(rij, V_ij)) * grad_ij; Real3 muNumerator = 2 * mu_0 * dot(rij, grad_ij) * V_ij; Real muDenominator = (Rho_bar * Rho_bar) * (d * d + paramsD.HSML * paramsD.HSML * epsilon); // Only Consider (fluid-fluid + fluid-solid) or Solid-Fluid Interaction if (sortedRhoPreMu[i_idx].w < 0 || (sortedRhoPreMu[i_idx].w >= 0 && sortedRhoPreMu[j].w < 0)) // if ((sortedRhoPreMu[i_idx].w < 0 && sortedRhoPreMu[j].w < 0)) F_i_mu += m_j * muNumerator / muDenominator; if (!isfinite(length(F_i_mu))) { printf("F_i_np in CalcForces returns Nan or Inf"); } } } } if (Ni != 0) { r0 /= Ni; mi_bar /= Ni; } if (mi_bar > EPSILON) r_shift[i_idx] = paramsD.beta_shifting * r0 * r0 * paramsD.v_Max * dT / mi_bar * inner_sum; // Forces are per unit mass at this point. derivVelRhoD[i_idx] = mR4((F_i_p + F_i_mu) * m_i); // Add the source_term only to the fluid markers if (sortedRhoPreMu[i_idx].w == -1) { derivVelRhoD[i_idx] = derivVelRhoD[i_idx] + mR4(source_term) * m_i; } new_vel[i_idx] = Veli + dT * mR3(derivVelRhoD[i_idx]) / m_i + r_shift[i_idx] / dT; if (!isfinite(length(new_vel[i_idx])) || !isfinite(length(derivVelRhoD[i_idx])) || !isfinite(length(r_shift[i_idx]))) printf("%d= new_vel=%.2f,derivVelRhoD=%.2f,r_shift=%.2f, F_i_p=%f, F_i_mu=%f\n", i_idx, length(new_vel[i_idx]), length(derivVelRhoD[i_idx]), length(r_shift[i_idx]), length(F_i_p), length(F_i_mu)); } } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void FinalizePressure(Real4* sortedPosRad, // Read Real4* sortedRhoPreMu, Real* p_old, Real3* F_p, // Write uint* cellStart, uint* cellEnd, size_t numAllMarkers, Real p_shift, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w <= -2) { return; } if (!(isfinite(sortedRhoPreMu[i_idx].x) && isfinite(sortedRhoPreMu[i_idx].y) && isfinite(sortedRhoPreMu[i_idx].z) && isfinite(sortedRhoPreMu[i_idx].w))) { printf("rhoPreMu is NAN: thrown from FinalizePressure ! %f,%f,%f\\n", sortedRhoPreMu[i_idx].x, sortedRhoPreMu[i_idx].y, sortedRhoPreMu[i_idx].z); sortedRhoPreMu[i_idx].y = 0.0; } // if (p_shift < 0) sortedRhoPreMu[i_idx].y = p_old[i_idx] + ((paramsD.ClampPressure) ? paramsD.BASEPRES : 0.0); //- p_shift; if (paramsD.ClampPressure && sortedRhoPreMu[i_idx].y < 0) sortedRhoPreMu[i_idx].y = 0; // if (sortedRhoPreMu[i_idx].y < 0) // sortedRhoPreMu[i_idx].y = (p_old[i_idx] > 0) ? p_old[i_idx] : 0.0; if (sortedRhoPreMu[i_idx].y > paramsD.Max_Pressure) sortedRhoPreMu[i_idx].y = paramsD.Max_Pressure; } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForceIISPH::calcPressureIISPH(std::shared_ptr<FsiBodiesDataD> otherFsiBodiesD, thrust::device_vector<Real3> pos_fsi_fea_D, thrust::device_vector<Real3> vel_fsi_fea_D, thrust::device_vector<Real3> acc_fsi_fea_D, thrust::device_vector<Real> sumWij_inv, thrust::device_vector<Real>& p_old, thrust::device_vector<Real3> Normals, thrust::device_vector<Real> G_i, thrust::device_vector<Real>& Color) { // Real RES = paramsH->PPE_res; PPESolutionType mySolutionType = paramsH->PPE_Solution_type; std::cout << "time step in calcPressureIISPH " << paramsH->dT << std::endl; double total_step_timeClock = clock(); bool *isErrorH, *isErrorD; isErrorH = (bool*)malloc(sizeof(bool)); cudaMalloc((void**)&isErrorD, sizeof(bool)); *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); //------------------------------------------------------------------------ // thread per particle uint numThreads, numBlocks; size_t numAllMarkers = (int)numObjectsH->numAllMarkers; computeGridSize((uint)numAllMarkers, 256, numBlocks, numThreads); *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); thrust::device_vector<Real3> d_ii(numAllMarkers); thrust::device_vector<Real3> V_np(numAllMarkers); thrust::fill(d_ii.begin(), d_ii.end(), mR3(0.0)); thrust::fill(V_np.begin(), V_np.end(), mR3(0.0)); *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); V_i_np__AND__d_ii_kernel<<<numBlocks, numThreads>>>( mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(d_ii), mR3CAST(V_np), R1CAST(sumWij_inv), R1CAST(G_i), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), paramsH->dT, numAllMarkers, isErrorD); cudaDeviceSynchronize(); cudaCheckError(); cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after F_i_np__AND__d_ii_kernel!\n"); } thrust::device_vector<Real> a_ii(numAllMarkers); thrust::device_vector<Real> rho_np(numAllMarkers); thrust::fill(a_ii.begin(), a_ii.end(), 0.0); thrust::fill(rho_np.begin(), rho_np.end(), 0.0); thrust::fill(p_old.begin(), p_old.end(), 0.0); thrust::device_vector<Real3> summGradW(numAllMarkers); *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); Rho_np_AND_a_ii_AND_sum_m_GradW<<<numBlocks, numThreads>>>( mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(rho_np), R1CAST(a_ii), R1CAST(p_old), mR3CAST(V_np), mR3CAST(d_ii), mR3CAST(summGradW), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), paramsH->dT, numAllMarkers, isErrorD); cudaDeviceSynchronize(); cudaCheckError(); cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after F_i_np__AND__d_ii_kernel!\n"); } thrust::device_vector<Real3> V_new(numAllMarkers); thrust::fill(V_new.begin(), V_new.end(), mR3(0.0)); thrust::device_vector<Real> a_ij; thrust::device_vector<Real> B_i(numAllMarkers); thrust::device_vector<uint> csrColIndA; thrust::device_vector<uint> numContacts(numAllMarkers); thrust::device_vector<unsigned long int> GlobalcsrColIndA; thrust::device_vector<Real> csrValA; double durationFormAXB; size_t end_fluid = numObjectsH->numGhostMarkers + numObjectsH->numHelperMarkers + numObjectsH->numFluidMarkers; size_t end_bndry = end_fluid + numObjectsH->numBoundaryMarkers; size_t end_rigid = end_bndry + numObjectsH->numRigidMarkers; size_t end_flex = end_rigid + numObjectsH->numFlexMarkers; int4 updatePortion = mI4((int)end_fluid, (int)end_bndry, (int)end_rigid, (int)end_flex); uint NNZ; if (mySolutionType == PPESolutionType::FORM_SPARSE_MATRIX) { thrust::fill(a_ij.begin(), a_ij.end(), 0.0); thrust::fill(B_i.begin(), B_i.end(), 0.0); // thrust::fill(summGradW.begin(), summGradW.end(), mR3(0.0)); thrust::fill(numContacts.begin(), numContacts.end(), 0.0); //------------------------------------------------------------------------ //------------- MatrixJacobi //------------------------------------------------------------------------ bool SPARSE_FLAG = true; double FormAXBClock = clock(); thrust::device_vector<Real> Residuals(numAllMarkers); thrust::fill(Residuals.begin(), Residuals.end(), 1.0); thrust::device_vector<Real> rho_p(numAllMarkers); thrust::fill(rho_p.begin(), rho_p.end(), 0.0); *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); CalcNumber_Contacts<<<numBlocks, numThreads>>>( U1CAST(numContacts), mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numAllMarkers, isErrorD); cudaDeviceSynchronize(); cudaCheckError(); cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after CalcNumber_Contacts!\n"); } uint MAX_CONTACT = thrust::reduce(numContacts.begin(), numContacts.end(), 0, thrust::maximum<Real>()); std::cout << "Max contact between SPH particles: " << MAX_CONTACT << std::endl; uint LastVal = numContacts[numAllMarkers - 1]; thrust::exclusive_scan(numContacts.begin(), numContacts.end(), numContacts.begin()); numContacts.push_back(LastVal + numContacts[numAllMarkers - 1]); NNZ = numContacts[numAllMarkers]; csrValA.resize(NNZ); csrColIndA.resize(NNZ); GlobalcsrColIndA.resize(NNZ); thrust::fill(csrValA.begin(), csrValA.end(), 0.0); thrust::fill(GlobalcsrColIndA.begin(), GlobalcsrColIndA.end(), 0.0); thrust::fill(csrColIndA.begin(), csrColIndA.end(), 0.0); cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); std::cout << "updatePortion of BC: " << updatePortion.x << " " << updatePortion.y << " " << updatePortion.z << " " << updatePortion.w << "\n "; FormAXB<<<numBlocks, numThreads>>>( R1CAST(csrValA), U1CAST(csrColIndA), LU1CAST(GlobalcsrColIndA), U1CAST(numContacts), R1CAST(a_ij), R1CAST(B_i), mR3CAST(d_ii), R1CAST(a_ii), mR3CAST(summGradW), mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(V_new), R1CAST(p_old), mR3CAST(Normals), R1CAST(G_i), R1CAST(sumWij_inv), R1CAST(rho_np), mR4CAST(otherFsiBodiesD->q_fsiBodies_D), mR3CAST(fsiGeneralData->rigidSPH_MeshPos_LRF_D), mR3CAST(otherFsiBodiesD->posRigid_fsiBodies_D), mR4CAST(otherFsiBodiesD->velMassRigid_fsiBodies_D), mR3CAST(otherFsiBodiesD->omegaVelLRF_fsiBodies_D), mR3CAST(otherFsiBodiesD->accRigid_fsiBodies_D), mR3CAST(otherFsiBodiesD->omegaAccLRF_fsiBodies_D), U1CAST(fsiGeneralData->rigidIdentifierD), mR3CAST(pos_fsi_fea_D), mR3CAST(vel_fsi_fea_D), mR3CAST(acc_fsi_fea_D), U1CAST(fsiGeneralData->FlexIdentifierD), (int)numObjectsH->numFlexBodies1D, U2CAST(fsiGeneralData->CableElementsNodesD), U4CAST(fsiGeneralData->ShellElementsNodesD), updatePortion, U1CAST(markersProximityD->gridMarkerIndexD), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), paramsH->dT, numAllMarkers, SPARSE_FLAG, isErrorD); cudaDeviceSynchronize(); cudaCheckError(); cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after F_i_np__AND__d_ii_kernel!\n"); } durationFormAXB = (clock() - FormAXBClock) / (double)CLOCKS_PER_SEC; } //------------------------------------------------------------------------ //------------- Iterative loop //------------------------------------------------------------------------ int Iteration = 0; Real MaxRes = 100; thrust::device_vector<Real> Residuals(numAllMarkers); thrust::fill(Residuals.begin(), Residuals.end(), 1.0); thrust::device_vector<Real3> dij_pj(numAllMarkers); thrust::fill(dij_pj.begin(), dij_pj.end(), mR3(0.0)); thrust::device_vector<Real3> F_p(numAllMarkers); thrust::fill(F_p.begin(), F_p.end(), mR3(0.0)); thrust::device_vector<Real> rho_p(numAllMarkers); thrust::fill(rho_p.begin(), rho_p.end(), 0.0); double LinearSystemClock = clock(); myLinearSolver->SetVerbose(paramsH->Verbose_monitoring); myLinearSolver->SetAbsRes(paramsH->LinearSolver_Abs_Tol); myLinearSolver->SetRelRes(paramsH->LinearSolver_Rel_Tol); myLinearSolver->SetIterationLimit(paramsH->LinearSolver_Max_Iter); if (paramsH->USE_LinearSolver) { if (paramsH->PPE_Solution_type != PPESolutionType::FORM_SPARSE_MATRIX) { printf( "You should paramsH->PPE_Solution_type == FORM_SPARSE_MATRIX in order to use the " "chrono_fsi linear " "solvers\n"); exit(0); } myLinearSolver->Solve((int)numAllMarkers, NNZ, R1CAST(csrValA), U1CAST(numContacts), U1CAST(csrColIndA), R1CAST(p_old), R1CAST(B_i)); cudaCheckError(); } else { while ((MaxRes > paramsH->LinearSolver_Abs_Tol || Iteration < 3) && Iteration < paramsH->LinearSolver_Max_Iter) { *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); Initialize_Variables<<<numBlocks, numThreads>>>(mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(p_old), mR3CAST(sortedSphMarkersD->velMasD), mR3CAST(V_new), numAllMarkers, isErrorD); cudaDeviceSynchronize(); cudaCheckError(); cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after Initialize_Variables!\n"); } if (mySolutionType == PPESolutionType::MATRIX_FREE) { *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); Calc_dij_pj<<<numBlocks, numThreads>>>( mR3CAST(dij_pj), mR3CAST(F_p), mR3CAST(d_ii), mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(p_old), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), paramsH->dT, numAllMarkers, isErrorD); cudaDeviceSynchronize(); cudaCheckError(); cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after Calc_dij_pj!\n"); } *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); Calc_Pressure<<<numBlocks, numThreads>>>( R1CAST(a_ii), mR3CAST(d_ii), mR3CAST(dij_pj), R1CAST(rho_np), R1CAST(rho_p), R1CAST(Residuals), mR3CAST(F_p), mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), mR4CAST(otherFsiBodiesD->q_fsiBodies_D), mR3CAST(fsiGeneralData->rigidSPH_MeshPos_LRF_D), mR3CAST(otherFsiBodiesD->posRigid_fsiBodies_D), mR4CAST(otherFsiBodiesD->velMassRigid_fsiBodies_D), mR3CAST(otherFsiBodiesD->omegaVelLRF_fsiBodies_D), mR3CAST(otherFsiBodiesD->accRigid_fsiBodies_D), mR3CAST(otherFsiBodiesD->omegaAccLRF_fsiBodies_D), U1CAST(fsiGeneralData->rigidIdentifierD), mR3CAST(pos_fsi_fea_D), mR3CAST(vel_fsi_fea_D), mR3CAST(acc_fsi_fea_D), U1CAST(fsiGeneralData->FlexIdentifierD), (int)numObjectsH->numFlexBodies1D, U2CAST(fsiGeneralData->CableElementsNodesD), U4CAST(fsiGeneralData->ShellElementsNodesD), updatePortion, U1CAST(markersProximityD->gridMarkerIndexD), R1CAST(p_old), mR3CAST(V_new), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), paramsH->dT, numAllMarkers, isErrorD); cudaDeviceSynchronize(); cudaCheckError(); cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after Calc_Pressure!\n"); } } if (mySolutionType == PPESolutionType::FORM_SPARSE_MATRIX) { *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); Calc_Pressure_AXB_USING_CSR<<<numBlocks, numThreads>>>( R1CAST(csrValA), R1CAST(a_ii), U1CAST(csrColIndA), U1CAST(numContacts), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(sumWij_inv), mR3CAST(sortedSphMarkersD->velMasD), mR3CAST(V_new), R1CAST(p_old), R1CAST(B_i), R1CAST(Residuals), numAllMarkers, isErrorD); cudaDeviceSynchronize(); cudaCheckError(); cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after Iterative_pressure_update!\n"); } } *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); Update_AND_Calc_Res<<<numBlocks, numThreads>>>( mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(p_old), mR3CAST(V_new), R1CAST(rho_p), R1CAST(rho_np), R1CAST(Residuals), numAllMarkers, Iteration, paramsH->PPE_relaxation, false, isErrorD); cudaDeviceSynchronize(); cudaCheckError(); cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after Iterative_pressure_update!\n"); } Iteration++; thrust::device_vector<Real>::iterator iter = thrust::max_element(Residuals.begin(), Residuals.end()); auto position = iter - Residuals.begin(); MaxRes = *iter; // MaxRes = // thrust::reduce(Residuals.begin(), Residuals.end(), 0.0, thrust::plus<Real>()) / // numObjectsH->numAllMarkers; // Real PMAX = thrust::reduce(p_old.begin(), p_old.end(), 0.0, thrust::maximum<Real>()); // MaxRes = thrust::reduce(Residuals.begin(), Residuals.end(), 0.0, thrust::plus<Real>()) / // numObjectsH->numAllMarkers; // MaxRes = thrust::reduce(Residuals.begin(), Residuals.end(), 0.0, thrust::maximum<Real>()); // Real R_np = thrust::reduce(rho_np.begin(), rho_np.end(), 0.0, thrust::plus<Real>()) / // rho_np.size(); // Real R_p = thrust::reduce(rho_p.begin(), rho_p.end(), 0.0, thrust::plus<Real>()) / // rho_p.size(); // if (paramsH->Verbose_monitoring) printf("Iter= %d, Res= %f\n", Iteration, MaxRes); } } thrust::device_vector<Real>::iterator iter = thrust::min_element(p_old.begin(), p_old.end()); auto position = iter - p_old.begin(); Real shift_p = *iter; // Real shift_p = 0; // This must be run if linear solver is used if (paramsH->USE_LinearSolver || paramsH->ClampPressure) { printf("Shifting pressure values by %f\n", -shift_p); *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); FinalizePressure<<<numBlocks, numThreads>>>( mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(p_old), mR3CAST(F_p), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numAllMarkers, shift_p, isErrorD); cudaDeviceSynchronize(); cudaCheckError(); cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after FinalizePressure!\n"); } } double durationLinearSystem = (clock() - LinearSystemClock) / (double)CLOCKS_PER_SEC; double durationtotal_step_time = (clock() - total_step_timeClock) / (double)CLOCKS_PER_SEC; printf("---------------IISPH CLOCK-------------------\n"); printf(" Total: %f \n FormAXB: %f\n Linear System: %f \n", durationtotal_step_time, durationFormAXB, durationLinearSystem); if (!paramsH->USE_LinearSolver) printf(" Iter (Jacobi+SOR)# = %d, to Res= %.3e \n", Iteration, MaxRes); if (paramsH->USE_LinearSolver) if (myLinearSolver->GetSolverStatus()) { std::cout << " Solver converged to " << myLinearSolver->GetResidual() << " tolerance"; std::cout << " after " << myLinearSolver->GetNumIterations() << " iterations" << std::endl; } else { std::cout << "Failed to converge after " << myLinearSolver->GetIterationLimit() << " iterations"; std::cout << " (" << myLinearSolver->GetResidual() << " final residual)" << std::endl; } //------------------------------------------------------------------------ //------------------------------------------------------------------------ cudaFree(isErrorD); free(isErrorH); } void ChFsiForceIISPH::ForceSPH(std::shared_ptr<SphMarkerDataD> otherSphMarkersD, std::shared_ptr<FsiBodiesDataD> otherFsiBodiesD, std::shared_ptr<FsiMeshDataD> otherFsiMeshD) { sphMarkersD = otherSphMarkersD; int numAllMarkers = (int)numObjectsH->numAllMarkers; int numHelperMarkers = (int)numObjectsH->numHelperMarkers; fsiCollisionSystem->ArrangeData(sphMarkersD); thrust::device_vector<Real3>::iterator iter = thrust::max_element(sortedSphMarkersD->velMasD.begin(), sortedSphMarkersD->velMasD.end(), compare_Real3_mag()); Real MaxVel = length(*iter); if (paramsH->Adaptive_time_stepping) { Real dt_CFL = paramsH->Co_number * paramsH->HSML / MaxVel; Real dt_nu = 0.25 * paramsH->HSML * paramsH->HSML / (paramsH->mu0 / paramsH->rho0); Real dt_body = 0.25 * std::sqrt(paramsH->HSML / length(paramsH->bodyForce3 + paramsH->gravity)); Real dt = std::fmin(dt_body, std::fmin(dt_CFL, dt_nu)); if (dt / paramsH->dT_Max > 0.7 && dt / paramsH->dT_Max < 1) paramsH->dT = paramsH->dT_Max * 0.5; else paramsH->dT = std::fmin(dt, paramsH->dT_Max); CopyParams_NumberOfObjects(paramsH, numObjectsH); printf(" time step=%.3e, dt_Max=%.3e, dt_CFL=%.3e (CFL=%.2g), dt_nu=%.3e, dt_body=%.3e\n", paramsH->dT, paramsH->dT_Max, dt_CFL, paramsH->Co_number, dt_nu, dt_body); } bool *isErrorH, *isErrorD, *isErrorD2; isErrorH = (bool*)malloc(sizeof(bool)); cudaMalloc((void**)&isErrorD, sizeof(bool)); cudaMalloc((void**)&isErrorD2, sizeof(bool)); *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); cudaMemcpy(isErrorD2, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); uint numThreads, numBlocks; computeGridSize(numAllMarkers, 256, numBlocks, numThreads); printf("numBlocks: %d, numThreads: %d, numAllMarker:%d \n", numBlocks, numThreads, numAllMarkers); thrust::device_vector<Real> Color(numAllMarkers); thrust::fill(Color.begin(), Color.end(), 1.0e10); thrust::device_vector<Real> _sumWij_inv(numAllMarkers); thrust::fill(_sumWij_inv.begin(), _sumWij_inv.end(), 0.0); thrust::device_vector<Real> G_i(numAllMarkers * 9); thrust::fill(G_i.begin(), G_i.end(), 0); *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); thrust::device_vector<uint> Contact_i(numAllMarkers); thrust::fill(Contact_i.begin(), Contact_i.end(), 0); calcRho_kernel<<<numBlocks, numThreads>>>( mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(_sumWij_inv), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), U1CAST(Contact_i), numAllMarkers, isErrorD); cudaDeviceSynchronize(); cudaCheckError(); cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after calcRho_kernel!\n"); } thrust::device_vector<Real3> Normals(numAllMarkers); calcNormalizedRho_kernel<<<numBlocks, numThreads>>>( mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(_sumWij_inv), R1CAST(G_i), mR3CAST(Normals), R1CAST(Color), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numAllMarkers, isErrorD); cudaDeviceSynchronize(); cudaCheckError(); cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed after calcNormalizedRho_kernel!\n"); } thrust::device_vector<Real> p_old(numAllMarkers, 0.0); calcPressureIISPH(otherFsiBodiesD, otherFsiMeshD->pos_fsi_fea_D, otherFsiMeshD->vel_fsi_fea_D, otherFsiMeshD->acc_fsi_fea_D, _sumWij_inv, p_old, Normals, G_i, Color); //------------------------------------------------------------------------ // thread per particle // std::cout << "dT in ForceSPH after calcPressure: " << paramsH->dT << "\n"; double CalcForcesClock = clock(); thrust::fill(vel_vis_Sorted_D.begin(), vel_vis_Sorted_D.end(), mR3(0.0)); thrust::fill(derivVelRhoD_Sorted_D.begin(), derivVelRhoD_Sorted_D.end(), mR4(0.0)); thrust::fill(vel_XSPH_Sorted_D.begin(), vel_XSPH_Sorted_D.end(), mR3(0.0)); thrust::device_vector<Real3> dr_shift(numAllMarkers); thrust::fill(dr_shift.begin(), dr_shift.end(), mR3(0.0)); thrust::device_vector<Real3> NEW_Vel(numAllMarkers, mR3(0.0)); CalcForces<<<numBlocks, numThreads>>>(mR3CAST(NEW_Vel), mR4CAST(derivVelRhoD_Sorted_D), mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(_sumWij_inv), R1CAST(p_old), mR3CAST(dr_shift), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), paramsH->dT, numAllMarkers, isErrorD); cudaDeviceSynchronize(); cudaCheckError(); cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed in CalcForces!\n"); } double calcforce = (clock() - CalcForcesClock) / (double)CLOCKS_PER_SEC; printf(" Force Computation: %f \n", calcforce); double UpdateClock = clock(); sortedSphMarkersD->velMasD = NEW_Vel; UpdateDensity<<<numBlocks, numThreads>>>( mR3CAST(vel_vis_Sorted_D), mR3CAST(vel_XSPH_Sorted_D), mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(_sumWij_inv), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numAllMarkers, isErrorD); cudaDeviceSynchronize(); cudaCheckError(); cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed in CalcForces!\n"); } CopySortedToOriginal_NonInvasive_R3(fsiGeneralData->vel_XSPH_D, vel_XSPH_Sorted_D, markersProximityD->gridMarkerIndexD); CopySortedToOriginal_NonInvasive_R3(fsiGeneralData->vis_vel_SPH_D, vel_vis_Sorted_D, markersProximityD->gridMarkerIndexD); CopySortedToOriginal_NonInvasive_R3(sphMarkersD->velMasD, sortedSphMarkersD->velMasD, markersProximityD->gridMarkerIndexD); CopySortedToOriginal_NonInvasive_R4(sphMarkersD->posRadD, sortedSphMarkersD->posRadD, markersProximityD->gridMarkerIndexD); CopySortedToOriginal_NonInvasive_R4(sphMarkersD->rhoPresMuD, sortedSphMarkersD->rhoPresMuD, markersProximityD->gridMarkerIndexD); CopySortedToOriginal_NonInvasive_R4(fsiGeneralData->derivVelRhoD, derivVelRhoD_Sorted_D, markersProximityD->gridMarkerIndexD); printf(" Update information: %f \n", (clock() - UpdateClock) / (double)CLOCKS_PER_SEC); printf("----------------------------------------------\n"); } } // namespace fsi } // namespace chrono
3f4a928aff4c50dec796371c694d347a22e50b6c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 3 * of the programming guide with some additions like error checking. * */ #include "../../matrix/constantes_gpu.h" #include "stdio.h" //#define OFFSET 0.00 #define OFFSET 0.15 #define signc(_X) ((_X<0.0)?0:1) // 1 => NEGATIF #define fabsc(_X) ((_X<0.0)?-_X:_X) // #define invc(S,_X) ((S==1)?_X:-_X) // SI 1=> ALORS CHANGEMENT DE SIGNE __global__ void __launch_bounds__(128, 2) LDPC_Sched_Stage_1_MS(float var_nodes[_N], float var_mesgs[_M], unsigned int PosNoeudsVariable[_M], unsigned int loops ) { int i = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; int ii = blockDim.x * blockDim.y * gridDim.x; // A VERIFIER !!!! __shared__ unsigned int iTable[DEG_1]; float tab_vContr[DEG_1]; // // ON UTILISE UNE PETITE ASTUCE AFIN D'ACCELERER LA SIMULATION DU DECODEUR // loops -= 1; { float *p_msg1w = var_mesgs + i; // POINTEUR MESG_C_2_V (pour l'criture) unsigned int *p_indice_nod1 = PosNoeudsVariable; for (int z = 0; z <DEG_1_COMPUTATIONS; z++) { int sign_du_check = 0; float min1 = 1000000.0; float min2 = 1000000.0; __syncthreads(); if( threadIdx.x < DEG_1){ iTable[threadIdx.x] = p_indice_nod1[threadIdx.x]; } __syncthreads(); p_indice_nod1 += DEG_1; #pragma unroll for (int j = 0; j<DEG_1; j++) { int iAddr = iTable[j] * ii + i; // Ieme INDEX (NODE INDICE) tab_vContr[j] = var_nodes[ iAddr ]; // CALCUL DE LA Ieme CONTRIBUTION min2 = fminf(min2, fmaxf(fabsf(tab_vContr[j]), min1)); min1 = fminf(min1, fabsf(tab_vContr[j])); sign_du_check = sign_du_check ^ signc(tab_vContr[j]); } float cste_1 = min2; float cste_2 = min1; #pragma unroll for (int j = 0; j < DEG_1; j++) { float tab_esultat; if (fabsf(tab_vContr[j]) == min1) { tab_esultat = cste_1; } else { tab_esultat = cste_2; } int sign_msg = sign_du_check ^ signc(tab_vContr[j]); float msg_sortant= invc(sign_msg, tab_esultat); *p_msg1w = msg_sortant; p_msg1w += ii; var_nodes[ iTable[j] * ii + i ] = tab_vContr[j] + msg_sortant; } } #if NB_DEGRES > 1 for (int z = 0; z <DEG_2_COMPUTATIONS; z++) { int sign_du_check = 0; float min1 = 1000000.0; float min2 = 1000000.0; __syncthreads(); if( threadIdx.x < DEG_2){ iTable[threadIdx.x] = p_indice_nod1[threadIdx.x]; } __syncthreads(); p_indice_nod1 += DEG_2; #pragma unroll for (int j = 0; j<DEG_2; j++) { int iAddr = iTable[j] * ii + i; // Ieme INDEX (NODE INDICE) tab_vContr[j] = var_nodes[ iAddr ]; // CALCUL DE LA Ieme CONTRIBUTION min2 = fminf(min2, fmaxf(fabsf(tab_vContr[j]), min1)); min1 = fminf(min1, fabsf(tab_vContr[j])); sign_du_check = sign_du_check ^ signc(tab_vContr[j]); } float cste_1 = min2; float cste_2 = min1; #pragma unroll for (int j = 0; j < DEG_2; j++) { float tab_esultat; if (fabsf(tab_vContr[j]) == min1) { tab_esultat = cste_1; } else { tab_esultat = cste_2; } int sign_msg = sign_du_check ^ signc(tab_vContr[j]); //!sign_bit float msg_sortant = invc(sign_msg, tab_esultat); *p_msg1w = msg_sortant; p_msg1w += ii; var_nodes[ iTable[j] * ii + i ] = tab_vContr[j] + msg_sortant; } } #endif } while( loops-- ){ float *p_msg1r = var_mesgs + i; float *p_msg1w = var_mesgs + i; const unsigned int *p_indice_nod1 = PosNoeudsVariable; for (int z = 0; z <DEG_1_COMPUTATIONS; z++) { int sign_du_check = 0; float min1 = 1000000.0; float min2 = 1000000.0; __syncthreads(); if( threadIdx.x < DEG_1){ iTable[threadIdx.x] = p_indice_nod1[threadIdx.x]; } __syncthreads(); p_indice_nod1 += DEG_1; // // ON PREFETCH LES DONNEES (VN) // #pragma unroll for (int j = 0; j<DEG_1; j++) { int iAddr = iTable[j] * ii + i; tab_vContr[j] = var_nodes[ iAddr ]; } // // ON PREFETCH LES DONNEES (MSG) // float prefetch_msg[DEG_1]; #pragma unroll for (int j = 0; j<DEG_1; j++) { prefetch_msg[j] = (*p_msg1r); p_msg1r += ii; } // // ON CALCULE LES CONTRIBUTIONS // #pragma unroll for (int j = 0; j<DEG_1; j++) { tab_vContr[j] = tab_vContr[j] - prefetch_msg[j]; } // // ON CALCULE LA VALEUR DE CN // #pragma unroll for (int j = 0; j<DEG_1; j++) { min2 = fminf(min2, fmaxf(fabsf(tab_vContr[j]), min1)); min1 = fminf(min1, fabsf(tab_vContr[j])); sign_du_check = sign_du_check ^ signc(tab_vContr[j]); } float cste_1 = min2; float cste_2 = min1; float output_msg[DEG_1]; #pragma unroll for (int j = 0; j < DEG_1; j++) { float tab_esultat; if (fabsf(tab_vContr[j]) == min1) { tab_esultat = cste_1; } else { tab_esultat = cste_2; } int sign_msg = sign_du_check ^ signc(tab_vContr[j]); output_msg[j] = invc(sign_msg, tab_esultat); } // // ON TRANSMET LES VALEURS DE VN/MSG // #pragma unroll for (int j = 0; j < DEG_1; j++) { int iAddr = iTable[j] * ii + i; var_nodes[ iAddr ] = tab_vContr[j] + output_msg[j]; *p_msg1w = output_msg[j]; p_msg1w += ii; } } #if NB_DEGRES > 1 for (int z = 0; z <DEG_2_COMPUTATIONS; z++) { int sign_du_check = 0; float min1 = 1000000.0; float min2 = 1000000.0; __syncthreads(); if( threadIdx.x < DEG_2){ iTable[threadIdx.x] = p_indice_nod1[threadIdx.x]; } __syncthreads(); p_indice_nod1 += DEG_2; #pragma unroll for (int j = 0; j<DEG_2; j++) { int iAddr = iTable[j] * ii + i; // Ieme INDEX (NODE INDICE) tab_vContr[j] = var_nodes[ iAddr ] - (*p_msg1r); // CALCUL DE LA Ieme CONTRIBUTION p_msg1r += ii; min2 = fminf(min2, fmaxf(fabsf(tab_vContr[j]), min1)); min1 = fminf(min1, fabsf(tab_vContr[j])); sign_du_check = sign_du_check ^ signc(tab_vContr[j]); } float cste_1 = min2; float cste_2 = min1; #pragma unroll for (int j = 0; j < DEG_2; j++) { float tab_esultat; if (fabsf(tab_vContr[j]) == min1) { tab_esultat = cste_1; } else { tab_esultat = cste_2; } int sign_msg = sign_du_check ^ signc(tab_vContr[j]); //!sign_bit float msg_sortant = invc(sign_msg, tab_esultat); *p_msg1w = msg_sortant; p_msg1w += ii; var_nodes[ iTable[j] * ii + i ] = tab_vContr[j] + msg_sortant; } } #endif } }
3f4a928aff4c50dec796371c694d347a22e50b6c.cu
/* * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 3 * of the programming guide with some additions like error checking. * */ #include "../../matrix/constantes_gpu.h" #include "stdio.h" //#define OFFSET 0.00 #define OFFSET 0.15 #define signc(_X) ((_X<0.0)?0:1) // 1 => NEGATIF #define fabsc(_X) ((_X<0.0)?-_X:_X) // #define invc(S,_X) ((S==1)?_X:-_X) // SI 1=> ALORS CHANGEMENT DE SIGNE __global__ void __launch_bounds__(128, 2) LDPC_Sched_Stage_1_MS(float var_nodes[_N], float var_mesgs[_M], unsigned int PosNoeudsVariable[_M], unsigned int loops ) { int i = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; int ii = blockDim.x * blockDim.y * gridDim.x; // A VERIFIER !!!! __shared__ unsigned int iTable[DEG_1]; float tab_vContr[DEG_1]; // // ON UTILISE UNE PETITE ASTUCE AFIN D'ACCELERER LA SIMULATION DU DECODEUR // loops -= 1; { float *p_msg1w = var_mesgs + i; // POINTEUR MESG_C_2_V (pour l'�criture) unsigned int *p_indice_nod1 = PosNoeudsVariable; for (int z = 0; z <DEG_1_COMPUTATIONS; z++) { int sign_du_check = 0; float min1 = 1000000.0; float min2 = 1000000.0; __syncthreads(); if( threadIdx.x < DEG_1){ iTable[threadIdx.x] = p_indice_nod1[threadIdx.x]; } __syncthreads(); p_indice_nod1 += DEG_1; #pragma unroll for (int j = 0; j<DEG_1; j++) { int iAddr = iTable[j] * ii + i; // Ieme INDEX (NODE INDICE) tab_vContr[j] = var_nodes[ iAddr ]; // CALCUL DE LA Ieme CONTRIBUTION min2 = fminf(min2, fmaxf(fabsf(tab_vContr[j]), min1)); min1 = fminf(min1, fabsf(tab_vContr[j])); sign_du_check = sign_du_check ^ signc(tab_vContr[j]); } float cste_1 = min2; float cste_2 = min1; #pragma unroll for (int j = 0; j < DEG_1; j++) { float tab_esultat; if (fabsf(tab_vContr[j]) == min1) { tab_esultat = cste_1; } else { tab_esultat = cste_2; } int sign_msg = sign_du_check ^ signc(tab_vContr[j]); float msg_sortant= invc(sign_msg, tab_esultat); *p_msg1w = msg_sortant; p_msg1w += ii; var_nodes[ iTable[j] * ii + i ] = tab_vContr[j] + msg_sortant; } } #if NB_DEGRES > 1 for (int z = 0; z <DEG_2_COMPUTATIONS; z++) { int sign_du_check = 0; float min1 = 1000000.0; float min2 = 1000000.0; __syncthreads(); if( threadIdx.x < DEG_2){ iTable[threadIdx.x] = p_indice_nod1[threadIdx.x]; } __syncthreads(); p_indice_nod1 += DEG_2; #pragma unroll for (int j = 0; j<DEG_2; j++) { int iAddr = iTable[j] * ii + i; // Ieme INDEX (NODE INDICE) tab_vContr[j] = var_nodes[ iAddr ]; // CALCUL DE LA Ieme CONTRIBUTION min2 = fminf(min2, fmaxf(fabsf(tab_vContr[j]), min1)); min1 = fminf(min1, fabsf(tab_vContr[j])); sign_du_check = sign_du_check ^ signc(tab_vContr[j]); } float cste_1 = min2; float cste_2 = min1; #pragma unroll for (int j = 0; j < DEG_2; j++) { float tab_esultat; if (fabsf(tab_vContr[j]) == min1) { tab_esultat = cste_1; } else { tab_esultat = cste_2; } int sign_msg = sign_du_check ^ signc(tab_vContr[j]); //!sign_bit float msg_sortant = invc(sign_msg, tab_esultat); *p_msg1w = msg_sortant; p_msg1w += ii; var_nodes[ iTable[j] * ii + i ] = tab_vContr[j] + msg_sortant; } } #endif } while( loops-- ){ float *p_msg1r = var_mesgs + i; float *p_msg1w = var_mesgs + i; const unsigned int *p_indice_nod1 = PosNoeudsVariable; for (int z = 0; z <DEG_1_COMPUTATIONS; z++) { int sign_du_check = 0; float min1 = 1000000.0; float min2 = 1000000.0; __syncthreads(); if( threadIdx.x < DEG_1){ iTable[threadIdx.x] = p_indice_nod1[threadIdx.x]; } __syncthreads(); p_indice_nod1 += DEG_1; // // ON PREFETCH LES DONNEES (VN) // #pragma unroll for (int j = 0; j<DEG_1; j++) { int iAddr = iTable[j] * ii + i; tab_vContr[j] = var_nodes[ iAddr ]; } // // ON PREFETCH LES DONNEES (MSG) // float prefetch_msg[DEG_1]; #pragma unroll for (int j = 0; j<DEG_1; j++) { prefetch_msg[j] = (*p_msg1r); p_msg1r += ii; } // // ON CALCULE LES CONTRIBUTIONS // #pragma unroll for (int j = 0; j<DEG_1; j++) { tab_vContr[j] = tab_vContr[j] - prefetch_msg[j]; } // // ON CALCULE LA VALEUR DE CN // #pragma unroll for (int j = 0; j<DEG_1; j++) { min2 = fminf(min2, fmaxf(fabsf(tab_vContr[j]), min1)); min1 = fminf(min1, fabsf(tab_vContr[j])); sign_du_check = sign_du_check ^ signc(tab_vContr[j]); } float cste_1 = min2; float cste_2 = min1; float output_msg[DEG_1]; #pragma unroll for (int j = 0; j < DEG_1; j++) { float tab_esultat; if (fabsf(tab_vContr[j]) == min1) { tab_esultat = cste_1; } else { tab_esultat = cste_2; } int sign_msg = sign_du_check ^ signc(tab_vContr[j]); output_msg[j] = invc(sign_msg, tab_esultat); } // // ON TRANSMET LES VALEURS DE VN/MSG // #pragma unroll for (int j = 0; j < DEG_1; j++) { int iAddr = iTable[j] * ii + i; var_nodes[ iAddr ] = tab_vContr[j] + output_msg[j]; *p_msg1w = output_msg[j]; p_msg1w += ii; } } #if NB_DEGRES > 1 for (int z = 0; z <DEG_2_COMPUTATIONS; z++) { int sign_du_check = 0; float min1 = 1000000.0; float min2 = 1000000.0; __syncthreads(); if( threadIdx.x < DEG_2){ iTable[threadIdx.x] = p_indice_nod1[threadIdx.x]; } __syncthreads(); p_indice_nod1 += DEG_2; #pragma unroll for (int j = 0; j<DEG_2; j++) { int iAddr = iTable[j] * ii + i; // Ieme INDEX (NODE INDICE) tab_vContr[j] = var_nodes[ iAddr ] - (*p_msg1r); // CALCUL DE LA Ieme CONTRIBUTION p_msg1r += ii; min2 = fminf(min2, fmaxf(fabsf(tab_vContr[j]), min1)); min1 = fminf(min1, fabsf(tab_vContr[j])); sign_du_check = sign_du_check ^ signc(tab_vContr[j]); } float cste_1 = min2; float cste_2 = min1; #pragma unroll for (int j = 0; j < DEG_2; j++) { float tab_esultat; if (fabsf(tab_vContr[j]) == min1) { tab_esultat = cste_1; } else { tab_esultat = cste_2; } int sign_msg = sign_du_check ^ signc(tab_vContr[j]); //!sign_bit float msg_sortant = invc(sign_msg, tab_esultat); *p_msg1w = msg_sortant; p_msg1w += ii; var_nodes[ iTable[j] * ii + i ] = tab_vContr[j] + msg_sortant; } } #endif } }
7be92ae588b43c520a482bf9e208bc3f63a23845.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This work is part of the Core Imaging Library developed by Visual Analytics and Imaging System Group of the Science Technology Facilities Council, STFC Copyright 2017 Daniil Kazantsev Copyright 2017 Srikanth Nagella, Edoardo Pasca Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "LLT_ROF_GPU_core.h" #include "shared.h" /* CUDA implementation of Lysaker, Lundervold and Tai (LLT) model [1] combined with Rudin-Osher-Fatemi [2] TV regularisation penalty. * * This penalty can deliver visually pleasant piecewise-smooth recovery if regularisation parameters are selected well. * The rule of thumb for selection is to start with lambdaLLT = 0 (just the ROF-TV model) and then proceed to increase * lambdaLLT starting with smaller values. * * Input Parameters: * 1. U0 - original noise image/volume * 2. lambdaROF - ROF-related regularisation parameter * 3. lambdaLLT - LLT-related regularisation parameter * 4. tau - time-marching step * 5. iter - iterations number (for both models) * * Output: * Filtered/regularised image * * References: * [1] Lysaker, M., Lundervold, A. and Tai, X.C., 2003. Noise removal using fourth-order partial differential equation with applications to medical magnetic resonance images in space and time. IEEE Transactions on image processing, 12(12), pp.1579-1590. * [2] Rudin, Osher, Fatemi, "Nonlinear Total Variation based noise removal algorithms" */ #define BLKXSIZE 8 #define BLKYSIZE 8 #define BLKZSIZE 8 #define BLKXSIZE2D 16 #define BLKYSIZE2D 16 #define EPS_LLT 0.01 #define EPS_ROF 1.0e-12 #define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) ) #define MAX(x, y) (((x) > (y)) ? (x) : (y)) #define MIN(x, y) (((x) < (y)) ? (x) : (y)) __host__ __device__ int signLLT (float x) { return (x > 0) - (x < 0); } /*************************************************************************/ /**********************LLT-related functions *****************************/ /*************************************************************************/ __global__ void der2D_LLT_kernel(float *U, float *D1, float *D2, int dimX, int dimY) { int i_p, i_m, j_m, j_p; float dxx, dyy, denom_xx, denom_yy; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + dimX*j; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) { /* symmetric boundary conditions (Neuman) */ i_p = i + 1; if (i_p == dimX) i_p = i - 1; i_m = i - 1; if (i_m < 0) i_m = i + 1; j_p = j + 1; if (j_p == dimY) j_p = j - 1; j_m = j - 1; if (j_m < 0) j_m = j + 1; dxx = U[j*dimX+i_p] - 2.0f*U[index] + U[j*dimX+i_m]; dyy = U[j_p*dimX+i] - 2.0f*U[index] + U[j_m*dimX+i]; denom_xx = abs(dxx) + EPS_LLT; denom_yy = abs(dyy) + EPS_LLT; D1[index] = dxx / denom_xx; D2[index] = dyy / denom_yy; } } __global__ void der3D_LLT_kernel(float* U, float *D1, float *D2, float *D3, int dimX, int dimY, int dimZ) { int i_p, i_m, j_m, j_p, k_p, k_m; float dxx, dyy, dzz, denom_xx, denom_yy, denom_zz; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { /* symmetric boundary conditions (Neuman) */ i_p = i + 1; if (i_p == dimX) i_p = i - 1; i_m = i - 1; if (i_m < 0) i_m = i + 1; j_p = j + 1; if (j_p == dimY) j_p = j - 1; j_m = j - 1; if (j_m < 0) j_m = j + 1; k_p = k + 1; if (k_p == dimZ) k_p = k - 1; k_m = k - 1; if (k_m < 0) k_m = k + 1; int index = (dimX*dimY)*k + j*dimX+i; dxx = U[(dimX*dimY)*k + j*dimX+i_p] - 2.0f*U[index] + U[(dimX*dimY)*k + j*dimX+i_m]; dyy = U[(dimX*dimY)*k + j_p*dimX+i] - 2.0f*U[index] + U[(dimX*dimY)*k + j_m*dimX+i]; dzz = U[(dimX*dimY)*k_p + j*dimX+i] - 2.0f*U[index] + U[(dimX*dimY)*k_m + j*dimX+i]; denom_xx = abs(dxx) + EPS_LLT; denom_yy = abs(dyy) + EPS_LLT; denom_zz = abs(dzz) + EPS_LLT; D1[index] = dxx / denom_xx; D2[index] = dyy / denom_yy; D3[index] = dzz / denom_zz; } } /*************************************************************************/ /**********************ROF-related functions *****************************/ /*************************************************************************/ /* first-order differences 1 */ __global__ void D1_func2D_ROF_kernel(float* Input, float* D1, int N, int M) { int i1, j1, i2; float NOMx_1,NOMy_1,NOMy_0,denom1,denom2,T1; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + N*j; if ((i >= 0) && (i < N) && (j >= 0) && (j < M)) { /* boundary conditions (Neumann reflections) */ i1 = i + 1; if (i1 >= N) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= M) j1 = j-1; /* Forward-backward differences */ NOMx_1 = Input[j1*N + i] - Input[index]; /* x+ */ NOMy_1 = Input[j*N + i1] - Input[index]; /* y+ */ NOMy_0 = Input[index] - Input[j*N + i2]; /* y- */ denom1 = NOMx_1*NOMx_1; denom2 = 0.5f*(signLLT((float)NOMy_1) + signLLT((float)NOMy_0))*(MIN(abs((float)NOMy_1),abs((float)NOMy_0))); denom2 = denom2*denom2; T1 = sqrt(denom1 + denom2 + EPS_ROF); D1[index] = NOMx_1/T1; } } /* differences 2 */ __global__ void D2_func2D_ROF_kernel(float* Input, float* D2, int N, int M) { int i1, j1, j2; float NOMx_1,NOMy_1,NOMx_0,denom1,denom2,T2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + N*j; if ((i >= 0) && (i < (N)) && (j >= 0) && (j < (M))) { /* boundary conditions (Neumann reflections) */ i1 = i + 1; if (i1 >= N) i1 = i-1; j1 = j + 1; if (j1 >= M) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; /* Forward-backward differences */ NOMx_1 = Input[j1*N + i] - Input[index]; /* x+ */ NOMy_1 = Input[j*N + i1] - Input[index]; /* y+ */ NOMx_0 = Input[index] - Input[j2*N + i]; /* x- */ denom1 = NOMy_1*NOMy_1; denom2 = 0.5f*(signLLT((float)NOMx_1) + signLLT((float)NOMx_0))*(MIN(abs((float)NOMx_1),abs((float)NOMx_0))); denom2 = denom2*denom2; T2 = sqrt(denom1 + denom2 + EPS_ROF); D2[index] = NOMy_1/T2; } } /* differences 1 */ __global__ void D1_func3D_ROF_kernel(float* Input, float* D1, int dimX, int dimY, int dimZ) { float NOMx_1, NOMy_1, NOMy_0, NOMz_1, NOMz_0, denom1, denom2,denom3, T1; int i1,i2,k1,j1,j2,k2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (dimX*dimY)*k + j*dimX+i; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { /* symmetric boundary conditions (Neuman) */ i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; k1 = k + 1; if (k1 >= dimZ) k1 = k-1; k2 = k - 1; if (k2 < 0) k2 = k+1; /* Forward-backward differences */ NOMx_1 = Input[(dimX*dimY)*k + j1*dimX + i] - Input[index]; /* x+ */ NOMy_1 = Input[(dimX*dimY)*k + j*dimX + i1] - Input[index]; /* y+ */ NOMy_0 = Input[index] - Input[(dimX*dimY)*k + j*dimX + i2]; /* y- */ NOMz_1 = Input[(dimX*dimY)*k1 + j*dimX + i] - Input[index]; /* z+ */ NOMz_0 = Input[index] - Input[(dimX*dimY)*k2 + j*dimX + i]; /* z- */ denom1 = NOMx_1*NOMx_1; denom2 = 0.5*(signLLT(NOMy_1) + signLLT(NOMy_0))*(MIN(abs(NOMy_1),abs(NOMy_0))); denom2 = denom2*denom2; denom3 = 0.5*(signLLT(NOMz_1) + signLLT(NOMz_0))*(MIN(abs(NOMz_1),abs(NOMz_0))); denom3 = denom3*denom3; T1 = sqrt(denom1 + denom2 + denom3 + EPS_ROF); D1[index] = NOMx_1/T1; } } /* differences 2 */ __global__ void D2_func3D_ROF_kernel(float* Input, float* D2, int dimX, int dimY, int dimZ) { float NOMx_1, NOMy_1, NOMx_0, NOMz_1, NOMz_0, denom1, denom2, denom3, T2; int i1,i2,k1,j1,j2,k2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (dimX*dimY)*k + j*dimX+i; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { /* symmetric boundary conditions (Neuman) */ i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; k1 = k + 1; if (k1 >= dimZ) k1 = k-1; k2 = k - 1; if (k2 < 0) k2 = k+1; /* Forward-backward differences */ NOMx_1 = Input[(dimX*dimY)*k + (j1)*dimX + i] - Input[index]; /* x+ */ NOMy_1 = Input[(dimX*dimY)*k + (j)*dimX + i1] - Input[index]; /* y+ */ NOMx_0 = Input[index] - Input[(dimX*dimY)*k + (j2)*dimX + i]; /* x- */ NOMz_1 = Input[(dimX*dimY)*k1 + j*dimX + i] - Input[index]; /* z+ */ NOMz_0 = Input[index] - Input[(dimX*dimY)*k2 + (j)*dimX + i]; /* z- */ denom1 = NOMy_1*NOMy_1; denom2 = 0.5*(signLLT(NOMx_1) + signLLT(NOMx_0))*(MIN(abs(NOMx_1),abs(NOMx_0))); denom2 = denom2*denom2; denom3 = 0.5*(signLLT(NOMz_1) + signLLT(NOMz_0))*(MIN(abs(NOMz_1),abs(NOMz_0))); denom3 = denom3*denom3; T2 = sqrt(denom1 + denom2 + denom3 + EPS_ROF); D2[index] = NOMy_1/T2; } } /* differences 3 */ __global__ void D3_func3D_ROF_kernel(float* Input, float* D3, int dimX, int dimY, int dimZ) { float NOMx_1, NOMy_1, NOMx_0, NOMy_0, NOMz_1, denom1, denom2, denom3, T3; int i1,i2,k1,j1,j2,k2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (dimX*dimY)*k + j*dimX+i; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; k1 = k + 1; if (k1 >= dimZ) k1 = k-1; k2 = k - 1; if (k2 < 0) k2 = k+1; /* Forward-backward differences */ NOMx_1 = Input[(dimX*dimY)*k + (j1)*dimX + i] - Input[index]; /* x+ */ NOMy_1 = Input[(dimX*dimY)*k + (j)*dimX + i1] - Input[index]; /* y+ */ NOMy_0 = Input[index] - Input[(dimX*dimY)*k + (j)*dimX + i2]; /* y- */ NOMx_0 = Input[index] - Input[(dimX*dimY)*k + (j2)*dimX + i]; /* x- */ NOMz_1 = Input[(dimX*dimY)*k1 + j*dimX + i] - Input[index]; /* z+ */ denom1 = NOMz_1*NOMz_1; denom2 = 0.5*(signLLT(NOMx_1) + signLLT(NOMx_0))*(MIN(abs(NOMx_1),abs(NOMx_0))); denom2 = denom2*denom2; denom3 = 0.5*(signLLT(NOMy_1) + signLLT(NOMy_0))*(MIN(abs(NOMy_1),abs(NOMy_0))); denom3 = denom3*denom3; T3 = sqrt(denom1 + denom2 + denom3 + EPS_ROF); D3[index] = NOMz_1/T3; } } /*************************************************************************/ /**********************ROF-LLT-related functions *************************/ /*************************************************************************/ __global__ void Update2D_LLT_ROF_kernel(float *U0, float *U, float *D1_LLT, float *D2_LLT, float *D1_ROF, float *D2_ROF, float lambdaROF, float lambdaLLT, float tau, int dimX, int dimY) { int i_p, i_m, j_m, j_p; float div, laplc, dxx, dyy, dv1, dv2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + dimX*j; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) { /* symmetric boundary conditions (Neuman) */ i_p = i + 1; if (i_p == dimX) i_p = i - 1; i_m = i - 1; if (i_m < 0) i_m = i + 1; j_p = j + 1; if (j_p == dimY) j_p = j - 1; j_m = j - 1; if (j_m < 0) j_m = j + 1; index = j*dimX+i; /*LLT-related part*/ dxx = D1_LLT[j*dimX+i_p] - 2.0f*D1_LLT[index] + D1_LLT[j*dimX+i_m]; dyy = D2_LLT[j_p*dimX+i] - 2.0f*D2_LLT[index] + D2_LLT[j_m*dimX+i]; laplc = dxx + dyy; /*build Laplacian*/ /*ROF-related part*/ dv1 = D1_ROF[index] - D1_ROF[j_m*dimX + i]; dv2 = D2_ROF[index] - D2_ROF[j*dimX + i_m]; div = dv1 + dv2; /*build Divirgent*/ /*combine all into one cost function to minimise */ U[index] += tau*(2.0f*lambdaROF*(div) - lambdaLLT*(laplc) - (U[index] - U0[index])); } } __global__ void Update3D_LLT_ROF_kernel(float *U0, float *U, float *D1_LLT, float *D2_LLT, float *D3_LLT, float *D1_ROF, float *D2_ROF, float *D3_ROF, float lambdaROF, float lambdaLLT, float tau, int dimX, int dimY, int dimZ) { int i_p, i_m, j_m, j_p, k_p, k_m; float div, laplc, dxx, dyy, dzz, dv1, dv2, dv3; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { /* symmetric boundary conditions (Neuman) */ i_p = i + 1; if (i_p == dimX) i_p = i - 1; i_m = i - 1; if (i_m < 0) i_m = i + 1; j_p = j + 1; if (j_p == dimY) j_p = j - 1; j_m = j - 1; if (j_m < 0) j_m = j + 1; k_p = k + 1; if (k_p == dimZ) k_p = k - 1; k_m = k - 1; if (k_m < 0) k_m = k + 1; int index = (dimX*dimY)*k + j*dimX+i; /*LLT-related part*/ dxx = D1_LLT[(dimX*dimY)*k + j*dimX+i_p] - 2.0f*D1_LLT[index] + D1_LLT[(dimX*dimY)*k + j*dimX+i_m]; dyy = D2_LLT[(dimX*dimY)*k + j_p*dimX+i] - 2.0f*D2_LLT[index] + D2_LLT[(dimX*dimY)*k + j_m*dimX+i]; dzz = D3_LLT[(dimX*dimY)*k_p + j*dimX+i] - 2.0f*D3_LLT[index] + D3_LLT[(dimX*dimY)*k_m + j*dimX+i]; laplc = dxx + dyy + dzz; /*build Laplacian*/ /*ROF-related part*/ dv1 = D1_ROF[index] - D1_ROF[(dimX*dimY)*k + j_m*dimX+i]; dv2 = D2_ROF[index] - D2_ROF[(dimX*dimY)*k + j*dimX+i_m]; dv3 = D3_ROF[index] - D3_ROF[(dimX*dimY)*k_m + j*dimX+i]; div = dv1 + dv2 + dv3; /*build Divirgent*/ /*combine all into one cost function to minimise */ U[index] += tau*(2.0f*lambdaROF*(div) - lambdaLLT*(laplc) - (U[index] - U0[index])); } } /*******************************************************************/ /************************ HOST FUNCTION ****************************/ /*******************************************************************/ extern "C" int LLT_ROF_GPU_main(float *Input, float *Output, float lambdaROF, float lambdaLLT, int iterationsNumb, float tau, int N, int M, int Z) { // set up device int dev = 0; int DimTotal; DimTotal = N*M*Z; CHECK(hipSetDevice(dev)); float *d_input, *d_update; float *D1_LLT=NULL, *D2_LLT=NULL, *D3_LLT=NULL, *D1_ROF=NULL, *D2_ROF=NULL, *D3_ROF=NULL; if (Z == 0) {Z = 1;} CHECK(hipMalloc((void**)&d_input,DimTotal*sizeof(float))); CHECK(hipMalloc((void**)&d_update,DimTotal*sizeof(float))); CHECK(hipMalloc((void**)&D1_LLT,DimTotal*sizeof(float))); CHECK(hipMalloc((void**)&D2_LLT,DimTotal*sizeof(float))); CHECK(hipMalloc((void**)&D3_LLT,DimTotal*sizeof(float))); CHECK(hipMalloc((void**)&D1_ROF,DimTotal*sizeof(float))); CHECK(hipMalloc((void**)&D2_ROF,DimTotal*sizeof(float))); CHECK(hipMalloc((void**)&D3_ROF,DimTotal*sizeof(float))); CHECK(hipMemcpy(d_input,Input,DimTotal*sizeof(float),hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_update,Input,DimTotal*sizeof(float),hipMemcpyHostToDevice)); if (Z == 1) { // TV - 2D case dim3 dimBlock(BLKXSIZE2D,BLKYSIZE2D); dim3 dimGrid(idivup(N,BLKXSIZE2D), idivup(M,BLKYSIZE2D)); for(int n=0; n < iterationsNumb; n++) { /****************ROF******************/ /* calculate first-order differences */ hipLaunchKernelGGL(( D1_func2D_ROF_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, D1_ROF, N, M); CHECK(hipDeviceSynchronize()); hipLaunchKernelGGL(( D2_func2D_ROF_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, D2_ROF, N, M); CHECK(hipDeviceSynchronize()); /****************LLT******************/ /* estimate second-order derrivatives */ hipLaunchKernelGGL(( der2D_LLT_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, D1_LLT, D2_LLT, N, M); /* Joint update for ROF and LLT models */ hipLaunchKernelGGL(( Update2D_LLT_ROF_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input, d_update, D1_LLT, D2_LLT, D1_ROF, D2_ROF, lambdaROF, lambdaLLT, tau, N, M); CHECK(hipDeviceSynchronize()); } } else { // 3D case dim3 dimBlock(BLKXSIZE,BLKYSIZE,BLKZSIZE); dim3 dimGrid(idivup(N,BLKXSIZE), idivup(M,BLKYSIZE),idivup(Z,BLKXSIZE)); for(int n=0; n < iterationsNumb; n++) { /****************ROF******************/ /* calculate first-order differences */ hipLaunchKernelGGL(( D1_func3D_ROF_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, D1_ROF, N, M, Z); CHECK(hipDeviceSynchronize()); hipLaunchKernelGGL(( D2_func3D_ROF_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, D2_ROF, N, M, Z); CHECK(hipDeviceSynchronize()); hipLaunchKernelGGL(( D3_func3D_ROF_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, D3_ROF, N, M, Z); CHECK(hipDeviceSynchronize()); /****************LLT******************/ /* estimate second-order derrivatives */ hipLaunchKernelGGL(( der3D_LLT_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, D1_LLT, D2_LLT, D3_LLT, N, M, Z); /* Joint update for ROF and LLT models */ hipLaunchKernelGGL(( Update3D_LLT_ROF_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input, d_update, D1_LLT, D2_LLT, D3_LLT, D1_ROF, D2_ROF, D3_ROF, lambdaROF, lambdaLLT, tau, N, M, Z); CHECK(hipDeviceSynchronize()); } } CHECK(hipMemcpy(Output,d_update,DimTotal*sizeof(float),hipMemcpyDeviceToHost)); CHECK(hipFree(d_input)); CHECK(hipFree(d_update)); CHECK(hipFree(D1_LLT)); CHECK(hipFree(D2_LLT)); CHECK(hipFree(D3_LLT)); CHECK(hipFree(D1_ROF)); CHECK(hipFree(D2_ROF)); CHECK(hipFree(D3_ROF)); return 0; }
7be92ae588b43c520a482bf9e208bc3f63a23845.cu
/* This work is part of the Core Imaging Library developed by Visual Analytics and Imaging System Group of the Science Technology Facilities Council, STFC Copyright 2017 Daniil Kazantsev Copyright 2017 Srikanth Nagella, Edoardo Pasca Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "LLT_ROF_GPU_core.h" #include "shared.h" /* CUDA implementation of Lysaker, Lundervold and Tai (LLT) model [1] combined with Rudin-Osher-Fatemi [2] TV regularisation penalty. * * This penalty can deliver visually pleasant piecewise-smooth recovery if regularisation parameters are selected well. * The rule of thumb for selection is to start with lambdaLLT = 0 (just the ROF-TV model) and then proceed to increase * lambdaLLT starting with smaller values. * * Input Parameters: * 1. U0 - original noise image/volume * 2. lambdaROF - ROF-related regularisation parameter * 3. lambdaLLT - LLT-related regularisation parameter * 4. tau - time-marching step * 5. iter - iterations number (for both models) * * Output: * Filtered/regularised image * * References: * [1] Lysaker, M., Lundervold, A. and Tai, X.C., 2003. Noise removal using fourth-order partial differential equation with applications to medical magnetic resonance images in space and time. IEEE Transactions on image processing, 12(12), pp.1579-1590. * [2] Rudin, Osher, Fatemi, "Nonlinear Total Variation based noise removal algorithms" */ #define BLKXSIZE 8 #define BLKYSIZE 8 #define BLKZSIZE 8 #define BLKXSIZE2D 16 #define BLKYSIZE2D 16 #define EPS_LLT 0.01 #define EPS_ROF 1.0e-12 #define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) ) #define MAX(x, y) (((x) > (y)) ? (x) : (y)) #define MIN(x, y) (((x) < (y)) ? (x) : (y)) __host__ __device__ int signLLT (float x) { return (x > 0) - (x < 0); } /*************************************************************************/ /**********************LLT-related functions *****************************/ /*************************************************************************/ __global__ void der2D_LLT_kernel(float *U, float *D1, float *D2, int dimX, int dimY) { int i_p, i_m, j_m, j_p; float dxx, dyy, denom_xx, denom_yy; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + dimX*j; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) { /* symmetric boundary conditions (Neuman) */ i_p = i + 1; if (i_p == dimX) i_p = i - 1; i_m = i - 1; if (i_m < 0) i_m = i + 1; j_p = j + 1; if (j_p == dimY) j_p = j - 1; j_m = j - 1; if (j_m < 0) j_m = j + 1; dxx = U[j*dimX+i_p] - 2.0f*U[index] + U[j*dimX+i_m]; dyy = U[j_p*dimX+i] - 2.0f*U[index] + U[j_m*dimX+i]; denom_xx = abs(dxx) + EPS_LLT; denom_yy = abs(dyy) + EPS_LLT; D1[index] = dxx / denom_xx; D2[index] = dyy / denom_yy; } } __global__ void der3D_LLT_kernel(float* U, float *D1, float *D2, float *D3, int dimX, int dimY, int dimZ) { int i_p, i_m, j_m, j_p, k_p, k_m; float dxx, dyy, dzz, denom_xx, denom_yy, denom_zz; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { /* symmetric boundary conditions (Neuman) */ i_p = i + 1; if (i_p == dimX) i_p = i - 1; i_m = i - 1; if (i_m < 0) i_m = i + 1; j_p = j + 1; if (j_p == dimY) j_p = j - 1; j_m = j - 1; if (j_m < 0) j_m = j + 1; k_p = k + 1; if (k_p == dimZ) k_p = k - 1; k_m = k - 1; if (k_m < 0) k_m = k + 1; int index = (dimX*dimY)*k + j*dimX+i; dxx = U[(dimX*dimY)*k + j*dimX+i_p] - 2.0f*U[index] + U[(dimX*dimY)*k + j*dimX+i_m]; dyy = U[(dimX*dimY)*k + j_p*dimX+i] - 2.0f*U[index] + U[(dimX*dimY)*k + j_m*dimX+i]; dzz = U[(dimX*dimY)*k_p + j*dimX+i] - 2.0f*U[index] + U[(dimX*dimY)*k_m + j*dimX+i]; denom_xx = abs(dxx) + EPS_LLT; denom_yy = abs(dyy) + EPS_LLT; denom_zz = abs(dzz) + EPS_LLT; D1[index] = dxx / denom_xx; D2[index] = dyy / denom_yy; D3[index] = dzz / denom_zz; } } /*************************************************************************/ /**********************ROF-related functions *****************************/ /*************************************************************************/ /* first-order differences 1 */ __global__ void D1_func2D_ROF_kernel(float* Input, float* D1, int N, int M) { int i1, j1, i2; float NOMx_1,NOMy_1,NOMy_0,denom1,denom2,T1; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + N*j; if ((i >= 0) && (i < N) && (j >= 0) && (j < M)) { /* boundary conditions (Neumann reflections) */ i1 = i + 1; if (i1 >= N) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= M) j1 = j-1; /* Forward-backward differences */ NOMx_1 = Input[j1*N + i] - Input[index]; /* x+ */ NOMy_1 = Input[j*N + i1] - Input[index]; /* y+ */ NOMy_0 = Input[index] - Input[j*N + i2]; /* y- */ denom1 = NOMx_1*NOMx_1; denom2 = 0.5f*(signLLT((float)NOMy_1) + signLLT((float)NOMy_0))*(MIN(abs((float)NOMy_1),abs((float)NOMy_0))); denom2 = denom2*denom2; T1 = sqrt(denom1 + denom2 + EPS_ROF); D1[index] = NOMx_1/T1; } } /* differences 2 */ __global__ void D2_func2D_ROF_kernel(float* Input, float* D2, int N, int M) { int i1, j1, j2; float NOMx_1,NOMy_1,NOMx_0,denom1,denom2,T2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + N*j; if ((i >= 0) && (i < (N)) && (j >= 0) && (j < (M))) { /* boundary conditions (Neumann reflections) */ i1 = i + 1; if (i1 >= N) i1 = i-1; j1 = j + 1; if (j1 >= M) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; /* Forward-backward differences */ NOMx_1 = Input[j1*N + i] - Input[index]; /* x+ */ NOMy_1 = Input[j*N + i1] - Input[index]; /* y+ */ NOMx_0 = Input[index] - Input[j2*N + i]; /* x- */ denom1 = NOMy_1*NOMy_1; denom2 = 0.5f*(signLLT((float)NOMx_1) + signLLT((float)NOMx_0))*(MIN(abs((float)NOMx_1),abs((float)NOMx_0))); denom2 = denom2*denom2; T2 = sqrt(denom1 + denom2 + EPS_ROF); D2[index] = NOMy_1/T2; } } /* differences 1 */ __global__ void D1_func3D_ROF_kernel(float* Input, float* D1, int dimX, int dimY, int dimZ) { float NOMx_1, NOMy_1, NOMy_0, NOMz_1, NOMz_0, denom1, denom2,denom3, T1; int i1,i2,k1,j1,j2,k2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (dimX*dimY)*k + j*dimX+i; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { /* symmetric boundary conditions (Neuman) */ i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; k1 = k + 1; if (k1 >= dimZ) k1 = k-1; k2 = k - 1; if (k2 < 0) k2 = k+1; /* Forward-backward differences */ NOMx_1 = Input[(dimX*dimY)*k + j1*dimX + i] - Input[index]; /* x+ */ NOMy_1 = Input[(dimX*dimY)*k + j*dimX + i1] - Input[index]; /* y+ */ NOMy_0 = Input[index] - Input[(dimX*dimY)*k + j*dimX + i2]; /* y- */ NOMz_1 = Input[(dimX*dimY)*k1 + j*dimX + i] - Input[index]; /* z+ */ NOMz_0 = Input[index] - Input[(dimX*dimY)*k2 + j*dimX + i]; /* z- */ denom1 = NOMx_1*NOMx_1; denom2 = 0.5*(signLLT(NOMy_1) + signLLT(NOMy_0))*(MIN(abs(NOMy_1),abs(NOMy_0))); denom2 = denom2*denom2; denom3 = 0.5*(signLLT(NOMz_1) + signLLT(NOMz_0))*(MIN(abs(NOMz_1),abs(NOMz_0))); denom3 = denom3*denom3; T1 = sqrt(denom1 + denom2 + denom3 + EPS_ROF); D1[index] = NOMx_1/T1; } } /* differences 2 */ __global__ void D2_func3D_ROF_kernel(float* Input, float* D2, int dimX, int dimY, int dimZ) { float NOMx_1, NOMy_1, NOMx_0, NOMz_1, NOMz_0, denom1, denom2, denom3, T2; int i1,i2,k1,j1,j2,k2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (dimX*dimY)*k + j*dimX+i; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { /* symmetric boundary conditions (Neuman) */ i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; k1 = k + 1; if (k1 >= dimZ) k1 = k-1; k2 = k - 1; if (k2 < 0) k2 = k+1; /* Forward-backward differences */ NOMx_1 = Input[(dimX*dimY)*k + (j1)*dimX + i] - Input[index]; /* x+ */ NOMy_1 = Input[(dimX*dimY)*k + (j)*dimX + i1] - Input[index]; /* y+ */ NOMx_0 = Input[index] - Input[(dimX*dimY)*k + (j2)*dimX + i]; /* x- */ NOMz_1 = Input[(dimX*dimY)*k1 + j*dimX + i] - Input[index]; /* z+ */ NOMz_0 = Input[index] - Input[(dimX*dimY)*k2 + (j)*dimX + i]; /* z- */ denom1 = NOMy_1*NOMy_1; denom2 = 0.5*(signLLT(NOMx_1) + signLLT(NOMx_0))*(MIN(abs(NOMx_1),abs(NOMx_0))); denom2 = denom2*denom2; denom3 = 0.5*(signLLT(NOMz_1) + signLLT(NOMz_0))*(MIN(abs(NOMz_1),abs(NOMz_0))); denom3 = denom3*denom3; T2 = sqrt(denom1 + denom2 + denom3 + EPS_ROF); D2[index] = NOMy_1/T2; } } /* differences 3 */ __global__ void D3_func3D_ROF_kernel(float* Input, float* D3, int dimX, int dimY, int dimZ) { float NOMx_1, NOMy_1, NOMx_0, NOMy_0, NOMz_1, denom1, denom2, denom3, T3; int i1,i2,k1,j1,j2,k2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (dimX*dimY)*k + j*dimX+i; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; k1 = k + 1; if (k1 >= dimZ) k1 = k-1; k2 = k - 1; if (k2 < 0) k2 = k+1; /* Forward-backward differences */ NOMx_1 = Input[(dimX*dimY)*k + (j1)*dimX + i] - Input[index]; /* x+ */ NOMy_1 = Input[(dimX*dimY)*k + (j)*dimX + i1] - Input[index]; /* y+ */ NOMy_0 = Input[index] - Input[(dimX*dimY)*k + (j)*dimX + i2]; /* y- */ NOMx_0 = Input[index] - Input[(dimX*dimY)*k + (j2)*dimX + i]; /* x- */ NOMz_1 = Input[(dimX*dimY)*k1 + j*dimX + i] - Input[index]; /* z+ */ denom1 = NOMz_1*NOMz_1; denom2 = 0.5*(signLLT(NOMx_1) + signLLT(NOMx_0))*(MIN(abs(NOMx_1),abs(NOMx_0))); denom2 = denom2*denom2; denom3 = 0.5*(signLLT(NOMy_1) + signLLT(NOMy_0))*(MIN(abs(NOMy_1),abs(NOMy_0))); denom3 = denom3*denom3; T3 = sqrt(denom1 + denom2 + denom3 + EPS_ROF); D3[index] = NOMz_1/T3; } } /*************************************************************************/ /**********************ROF-LLT-related functions *************************/ /*************************************************************************/ __global__ void Update2D_LLT_ROF_kernel(float *U0, float *U, float *D1_LLT, float *D2_LLT, float *D1_ROF, float *D2_ROF, float lambdaROF, float lambdaLLT, float tau, int dimX, int dimY) { int i_p, i_m, j_m, j_p; float div, laplc, dxx, dyy, dv1, dv2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + dimX*j; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) { /* symmetric boundary conditions (Neuman) */ i_p = i + 1; if (i_p == dimX) i_p = i - 1; i_m = i - 1; if (i_m < 0) i_m = i + 1; j_p = j + 1; if (j_p == dimY) j_p = j - 1; j_m = j - 1; if (j_m < 0) j_m = j + 1; index = j*dimX+i; /*LLT-related part*/ dxx = D1_LLT[j*dimX+i_p] - 2.0f*D1_LLT[index] + D1_LLT[j*dimX+i_m]; dyy = D2_LLT[j_p*dimX+i] - 2.0f*D2_LLT[index] + D2_LLT[j_m*dimX+i]; laplc = dxx + dyy; /*build Laplacian*/ /*ROF-related part*/ dv1 = D1_ROF[index] - D1_ROF[j_m*dimX + i]; dv2 = D2_ROF[index] - D2_ROF[j*dimX + i_m]; div = dv1 + dv2; /*build Divirgent*/ /*combine all into one cost function to minimise */ U[index] += tau*(2.0f*lambdaROF*(div) - lambdaLLT*(laplc) - (U[index] - U0[index])); } } __global__ void Update3D_LLT_ROF_kernel(float *U0, float *U, float *D1_LLT, float *D2_LLT, float *D3_LLT, float *D1_ROF, float *D2_ROF, float *D3_ROF, float lambdaROF, float lambdaLLT, float tau, int dimX, int dimY, int dimZ) { int i_p, i_m, j_m, j_p, k_p, k_m; float div, laplc, dxx, dyy, dzz, dv1, dv2, dv3; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { /* symmetric boundary conditions (Neuman) */ i_p = i + 1; if (i_p == dimX) i_p = i - 1; i_m = i - 1; if (i_m < 0) i_m = i + 1; j_p = j + 1; if (j_p == dimY) j_p = j - 1; j_m = j - 1; if (j_m < 0) j_m = j + 1; k_p = k + 1; if (k_p == dimZ) k_p = k - 1; k_m = k - 1; if (k_m < 0) k_m = k + 1; int index = (dimX*dimY)*k + j*dimX+i; /*LLT-related part*/ dxx = D1_LLT[(dimX*dimY)*k + j*dimX+i_p] - 2.0f*D1_LLT[index] + D1_LLT[(dimX*dimY)*k + j*dimX+i_m]; dyy = D2_LLT[(dimX*dimY)*k + j_p*dimX+i] - 2.0f*D2_LLT[index] + D2_LLT[(dimX*dimY)*k + j_m*dimX+i]; dzz = D3_LLT[(dimX*dimY)*k_p + j*dimX+i] - 2.0f*D3_LLT[index] + D3_LLT[(dimX*dimY)*k_m + j*dimX+i]; laplc = dxx + dyy + dzz; /*build Laplacian*/ /*ROF-related part*/ dv1 = D1_ROF[index] - D1_ROF[(dimX*dimY)*k + j_m*dimX+i]; dv2 = D2_ROF[index] - D2_ROF[(dimX*dimY)*k + j*dimX+i_m]; dv3 = D3_ROF[index] - D3_ROF[(dimX*dimY)*k_m + j*dimX+i]; div = dv1 + dv2 + dv3; /*build Divirgent*/ /*combine all into one cost function to minimise */ U[index] += tau*(2.0f*lambdaROF*(div) - lambdaLLT*(laplc) - (U[index] - U0[index])); } } /*******************************************************************/ /************************ HOST FUNCTION ****************************/ /*******************************************************************/ extern "C" int LLT_ROF_GPU_main(float *Input, float *Output, float lambdaROF, float lambdaLLT, int iterationsNumb, float tau, int N, int M, int Z) { // set up device int dev = 0; int DimTotal; DimTotal = N*M*Z; CHECK(cudaSetDevice(dev)); float *d_input, *d_update; float *D1_LLT=NULL, *D2_LLT=NULL, *D3_LLT=NULL, *D1_ROF=NULL, *D2_ROF=NULL, *D3_ROF=NULL; if (Z == 0) {Z = 1;} CHECK(cudaMalloc((void**)&d_input,DimTotal*sizeof(float))); CHECK(cudaMalloc((void**)&d_update,DimTotal*sizeof(float))); CHECK(cudaMalloc((void**)&D1_LLT,DimTotal*sizeof(float))); CHECK(cudaMalloc((void**)&D2_LLT,DimTotal*sizeof(float))); CHECK(cudaMalloc((void**)&D3_LLT,DimTotal*sizeof(float))); CHECK(cudaMalloc((void**)&D1_ROF,DimTotal*sizeof(float))); CHECK(cudaMalloc((void**)&D2_ROF,DimTotal*sizeof(float))); CHECK(cudaMalloc((void**)&D3_ROF,DimTotal*sizeof(float))); CHECK(cudaMemcpy(d_input,Input,DimTotal*sizeof(float),cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_update,Input,DimTotal*sizeof(float),cudaMemcpyHostToDevice)); if (Z == 1) { // TV - 2D case dim3 dimBlock(BLKXSIZE2D,BLKYSIZE2D); dim3 dimGrid(idivup(N,BLKXSIZE2D), idivup(M,BLKYSIZE2D)); for(int n=0; n < iterationsNumb; n++) { /****************ROF******************/ /* calculate first-order differences */ D1_func2D_ROF_kernel<<<dimGrid,dimBlock>>>(d_update, D1_ROF, N, M); CHECK(cudaDeviceSynchronize()); D2_func2D_ROF_kernel<<<dimGrid,dimBlock>>>(d_update, D2_ROF, N, M); CHECK(cudaDeviceSynchronize()); /****************LLT******************/ /* estimate second-order derrivatives */ der2D_LLT_kernel<<<dimGrid,dimBlock>>>(d_update, D1_LLT, D2_LLT, N, M); /* Joint update for ROF and LLT models */ Update2D_LLT_ROF_kernel<<<dimGrid,dimBlock>>>(d_input, d_update, D1_LLT, D2_LLT, D1_ROF, D2_ROF, lambdaROF, lambdaLLT, tau, N, M); CHECK(cudaDeviceSynchronize()); } } else { // 3D case dim3 dimBlock(BLKXSIZE,BLKYSIZE,BLKZSIZE); dim3 dimGrid(idivup(N,BLKXSIZE), idivup(M,BLKYSIZE),idivup(Z,BLKXSIZE)); for(int n=0; n < iterationsNumb; n++) { /****************ROF******************/ /* calculate first-order differences */ D1_func3D_ROF_kernel<<<dimGrid,dimBlock>>>(d_update, D1_ROF, N, M, Z); CHECK(cudaDeviceSynchronize()); D2_func3D_ROF_kernel<<<dimGrid,dimBlock>>>(d_update, D2_ROF, N, M, Z); CHECK(cudaDeviceSynchronize()); D3_func3D_ROF_kernel<<<dimGrid,dimBlock>>>(d_update, D3_ROF, N, M, Z); CHECK(cudaDeviceSynchronize()); /****************LLT******************/ /* estimate second-order derrivatives */ der3D_LLT_kernel<<<dimGrid,dimBlock>>>(d_update, D1_LLT, D2_LLT, D3_LLT, N, M, Z); /* Joint update for ROF and LLT models */ Update3D_LLT_ROF_kernel<<<dimGrid,dimBlock>>>(d_input, d_update, D1_LLT, D2_LLT, D3_LLT, D1_ROF, D2_ROF, D3_ROF, lambdaROF, lambdaLLT, tau, N, M, Z); CHECK(cudaDeviceSynchronize()); } } CHECK(cudaMemcpy(Output,d_update,DimTotal*sizeof(float),cudaMemcpyDeviceToHost)); CHECK(cudaFree(d_input)); CHECK(cudaFree(d_update)); CHECK(cudaFree(D1_LLT)); CHECK(cudaFree(D2_LLT)); CHECK(cudaFree(D3_LLT)); CHECK(cudaFree(D1_ROF)); CHECK(cudaFree(D2_ROF)); CHECK(cudaFree(D3_ROF)); return 0; }
fd5ea73f6f620c2fafdc6879169daf42563f1bf6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "counting.h" #include <cstdio> #include <cassert> #include <thrust/scan.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #define max(a, b)((a)>(b)?(a):(b)) __device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; } __device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; } struct convert : public thrust::unary_function<char, int> { __host__ __device__ int operator()(char c) { if (c == '\n') return 0; return 1; } }; __device__ int Lowbit(int x) { return x&(-x); } __global__ void SimpleAlgo(const char* text, int *pos, int text_size) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < text_size) { if (text[i] == '\n') { pos[i] = 0; int j = i + 1; int counter = 1; while(text[j] != '\n' && j < text_size){ pos[j] = counter; counter ++; j ++; } } else if (i == 0 && text[i] != '\n') { int j = 0; int counter = 1; while (text[j] != '\n' && i < text_size) { pos[j] = counter; counter ++; j ++; } } } } __global__ void Algo(const char *text,int *pos, int text_size) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int tid = threadIdx.x; __shared__ int data[512]; if (i < text_size) { if (text[i] == '\n') data[tid] = 0; else data[tid] = 1; __syncthreads(); if (data[tid] != 0) { for (int j = 0; j < 9; j ++) { int look = data[tid]; if ((tid - look) < 0)break; data[tid] = data[tid] + data[tid - look]; __syncthreads(); } } __syncthreads(); pos[i] = data[tid]; } } __global__ void Final(int *pos, int text_size) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < text_size) { if (pos[i] != 0 ) { int look = pos[i]; if (i - look >= 0) atomicAdd(&pos[i], pos[i - look]); } } } __global__ void BITAlgo(const char* text, int *pos, int text_size){ const int tid = threadIdx.x; const int bid = blockIdx.x; __shared__ int data[512]; const int i = bid * blockDim.x + tid; /* if current thread is out of index, we won't do it */ if (i < text_size) { /* Initialize BIT array */ pos[i] = 0; /* Initialize original sequence */ if (text[i] == '\n') { data[tid] = i + 1; } else { data[tid] = 0; } __syncthreads(); /* Construct Max BIT */ //if(data[i] != -1) { // Set recurrent index int j = tid + 1; int val = data[tid]; while(j <= 512) { atomicMax(&data[j - 1], val); j += Lowbit(j); __syncthreads(); } /* Compute interval max */ j = tid + 1; int ans = 0; while (j >= 1) { ans = max(data[j-1], ans); j -= Lowbit(j); __syncthreads(); } pos[i] = ans; } } __global__ void SumBIT(int *pos, int text_size){ const int i = blockIdx.x * blockDim.x + threadIdx.x; const int bid = blockIdx.x; if (i < text_size) { if (bid != 0){ int j = (bid - 1) * blockDim.x + 511; atomicMax(&pos[i], pos[j]); } } } __global__ void ConvertBIT(int *pos, int text_size){ const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < text_size)pos[i] = i + 1 - pos[i]; } void CountPosition1(const char *text, int *pos, int text_size) { thrust::equal_to<int> binary_pred; thrust::plus<int> binary_op; thrust::device_ptr<const char> dev_ptr1(text); thrust::device_ptr<int> dev_ptr2(pos); /* Replace space to 0, others to 1 */ thrust::transform(thrust::device, dev_ptr1, dev_ptr1 + text_size, dev_ptr2, convert()); /* Segment prefix sum all position */ thrust::inclusive_scan_by_key(thrust::device, dev_ptr2, dev_ptr2 + text_size, dev_ptr2, dev_ptr2, binary_pred, binary_op); } void CountPosition2(const char *text, int *pos, int text_size) { //int *temp; //hipMalloc(&temp, sizeof(int) * text_size); int blocks = (text_size + 511)/512; hipLaunchKernelGGL(( Algo), dim3(blocks), dim3(512), 0, 0, text, pos, text_size); hipLaunchKernelGGL(( Final), dim3(blocks), dim3(512), 0, 0, pos, text_size); }
fd5ea73f6f620c2fafdc6879169daf42563f1bf6.cu
#include "counting.h" #include <cstdio> #include <cassert> #include <thrust/scan.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #define max(a, b)((a)>(b)?(a):(b)) __device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; } __device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; } struct convert : public thrust::unary_function<char, int> { __host__ __device__ int operator()(char c) { if (c == '\n') return 0; return 1; } }; __device__ int Lowbit(int x) { return x&(-x); } __global__ void SimpleAlgo(const char* text, int *pos, int text_size) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < text_size) { if (text[i] == '\n') { pos[i] = 0; int j = i + 1; int counter = 1; while(text[j] != '\n' && j < text_size){ pos[j] = counter; counter ++; j ++; } } else if (i == 0 && text[i] != '\n') { int j = 0; int counter = 1; while (text[j] != '\n' && i < text_size) { pos[j] = counter; counter ++; j ++; } } } } __global__ void Algo(const char *text,int *pos, int text_size) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int tid = threadIdx.x; __shared__ int data[512]; if (i < text_size) { if (text[i] == '\n') data[tid] = 0; else data[tid] = 1; __syncthreads(); if (data[tid] != 0) { for (int j = 0; j < 9; j ++) { int look = data[tid]; if ((tid - look) < 0)break; data[tid] = data[tid] + data[tid - look]; __syncthreads(); } } __syncthreads(); pos[i] = data[tid]; } } __global__ void Final(int *pos, int text_size) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < text_size) { if (pos[i] != 0 ) { int look = pos[i]; if (i - look >= 0) atomicAdd(&pos[i], pos[i - look]); } } } __global__ void BITAlgo(const char* text, int *pos, int text_size){ const int tid = threadIdx.x; const int bid = blockIdx.x; __shared__ int data[512]; const int i = bid * blockDim.x + tid; /* if current thread is out of index, we won't do it */ if (i < text_size) { /* Initialize BIT array */ pos[i] = 0; /* Initialize original sequence */ if (text[i] == '\n') { data[tid] = i + 1; } else { data[tid] = 0; } __syncthreads(); /* Construct Max BIT */ //if(data[i] != -1) { // Set recurrent index int j = tid + 1; int val = data[tid]; while(j <= 512) { atomicMax(&data[j - 1], val); j += Lowbit(j); __syncthreads(); } /* Compute interval max */ j = tid + 1; int ans = 0; while (j >= 1) { ans = max(data[j-1], ans); j -= Lowbit(j); __syncthreads(); } pos[i] = ans; } } __global__ void SumBIT(int *pos, int text_size){ const int i = blockIdx.x * blockDim.x + threadIdx.x; const int bid = blockIdx.x; if (i < text_size) { if (bid != 0){ int j = (bid - 1) * blockDim.x + 511; atomicMax(&pos[i], pos[j]); } } } __global__ void ConvertBIT(int *pos, int text_size){ const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < text_size)pos[i] = i + 1 - pos[i]; } void CountPosition1(const char *text, int *pos, int text_size) { thrust::equal_to<int> binary_pred; thrust::plus<int> binary_op; thrust::device_ptr<const char> dev_ptr1(text); thrust::device_ptr<int> dev_ptr2(pos); /* Replace space to 0, others to 1 */ thrust::transform(thrust::device, dev_ptr1, dev_ptr1 + text_size, dev_ptr2, convert()); /* Segment prefix sum all position */ thrust::inclusive_scan_by_key(thrust::device, dev_ptr2, dev_ptr2 + text_size, dev_ptr2, dev_ptr2, binary_pred, binary_op); } void CountPosition2(const char *text, int *pos, int text_size) { //int *temp; //cudaMalloc(&temp, sizeof(int) * text_size); int blocks = (text_size + 511)/512; Algo<<<blocks, 512>>>(text, pos, text_size); Final<<<blocks, 512>>>(pos, text_size); }
1568a3ac985c8face7d891c415ddc7f9ecbffd00.hip
// !!! This is a file automatically generated by hipify!!! #include "THHUNN.h" #include "TH/THHalf.h" #include "THHHalfAutoNumerics.cuh" #include <THH/THHApply.cuh> template <typename T> struct ELUupdateOutput_functor { const T negcoef_; const T poscoef_; const T negiptcoef_; ELUupdateOutput_functor(T negcoef, T poscoef, T negiptcoef) : negcoef_(negcoef) , poscoef_(poscoef) , negiptcoef_(negiptcoef) {} __device__ void operator()(T *output, const T *input) const { *output = *input <= 0 ? (exp(*input * negiptcoef_) - 1) * negcoef_ : *input * poscoef_; } }; // in-place variant template <typename T> struct ELUupdateOutputIP_functor { const T negcoef_; const T poscoef_; const T negiptcoef_; ELUupdateOutputIP_functor(T negcoef, T poscoef, T negiptcoef) : negcoef_(negcoef) , poscoef_(poscoef) , negiptcoef_(negiptcoef) {} __device__ void operator()(T *x) const { *x = *x <= 0 ? (exp(*x * negiptcoef_) - 1) * negcoef_ : *x * poscoef_; } }; template <typename T> struct ELUupdateGradInput_functor { const T negcoef_; const T poscoef_; const T negiptcoef_; ELUupdateGradInput_functor(T negcoef, T poscoef, T negiptcoef) : negcoef_(negcoef) , poscoef_(poscoef) , negiptcoef_(negiptcoef) {} __device__ void operator()(T *gradInput, const T *output, const T *gradOutput) const { *gradInput = (*output) <= 0 ? (*gradOutput * negiptcoef_ * (*output + negcoef_)) : (*gradOutput * poscoef_); } }; #include "generic/ELU.cu" #include "THHGenerateFloatTypes.h"
1568a3ac985c8face7d891c415ddc7f9ecbffd00.cu
#include "THCUNN.h" #include "TH/THHalf.h" #include "THCHalfAutoNumerics.cuh" #include <THC/THCApply.cuh> template <typename T> struct ELUupdateOutput_functor { const T negcoef_; const T poscoef_; const T negiptcoef_; ELUupdateOutput_functor(T negcoef, T poscoef, T negiptcoef) : negcoef_(negcoef) , poscoef_(poscoef) , negiptcoef_(negiptcoef) {} __device__ void operator()(T *output, const T *input) const { *output = *input <= 0 ? (exp(*input * negiptcoef_) - 1) * negcoef_ : *input * poscoef_; } }; // in-place variant template <typename T> struct ELUupdateOutputIP_functor { const T negcoef_; const T poscoef_; const T negiptcoef_; ELUupdateOutputIP_functor(T negcoef, T poscoef, T negiptcoef) : negcoef_(negcoef) , poscoef_(poscoef) , negiptcoef_(negiptcoef) {} __device__ void operator()(T *x) const { *x = *x <= 0 ? (exp(*x * negiptcoef_) - 1) * negcoef_ : *x * poscoef_; } }; template <typename T> struct ELUupdateGradInput_functor { const T negcoef_; const T poscoef_; const T negiptcoef_; ELUupdateGradInput_functor(T negcoef, T poscoef, T negiptcoef) : negcoef_(negcoef) , poscoef_(poscoef) , negiptcoef_(negiptcoef) {} __device__ void operator()(T *gradInput, const T *output, const T *gradOutput) const { *gradInput = (*output) <= 0 ? (*gradOutput * negiptcoef_ * (*output + negcoef_)) : (*gradOutput * poscoef_); } }; #include "generic/ELU.cu" #include "THCGenerateFloatTypes.h"
ecd217e28dfe5518cb6b5847fb08164f92d0da58.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<sys/time.h> #define MIMAX 129 #define MJMAX 65 #define MKMAX 65 #define NN 200 #define THREAD_NUM 127 /*static float p[MIMAX][MJMAX][MKMAX]; static float a[MIMAX][MJMAX][MKMAX][4]; static float b[MIMAX][MJMAX][MKMAX][3]; static float c[MIMAX][MJMAX][MKMAX][3]; static float bnd[MIMAX][MJMAX][MKMAX]; static float work1[MIMAX][MJMAX][MKMAX]; static float work2[MIMAX][MJMAX][MKMAX];*/ static int imax, jmax, kmax; static float omega; /*void initial_matrix(){ int i, j, k; for(i=0; i<imax; i++) for(j=0; j<jmax; j++) for(k=0; k<kmax; k++){ a[i][j][k][0] = 0.0; a[i][j][k][1] = 0.0; a[i][j][k][2] = 0.0; a[i][j][k][3] = 0.0; b[i][j][k][0] = 0.0; b[i][j][k][1] = 0.0; b[i][j][k][2] = 0.0; c[i][j][k][0] = 0.0; c[i][j][k][0]=0.0; c[i][j][k][1]=0.0; c[i][j][k][2]=0.0; p[i][j][k]=0.0; work1[i][j][k]=0.0; bnd[i][j][k]=0.0; } for(i=0 ; i<imax ; ++i) for(j=0 ; j<jmax ; ++j) for(k=0 ; k<kmax ; ++k){ a[i][j][k][0]=1.0; a[i][j][k][1]=1.0; a[i][j][k][2]=1.0; a[i][j][k][3]=1.0/6.0; b[i][j][k][0]=0.0; b[i][j][k][1]=0.0; b[i][j][k][2]=0.0; c[i][j][k][0]=1.0; c[i][j][k][1]=1.0; c[i][j][k][2]=1.0; p[i][j][k]=(float)(k*k)/(float)((kmax-1)*(kmax-1)); work1[i][j][k]=0.0; bnd[i][j][k]=1.0; } }*/ double second(){ struct timeval tm; double t; static int base_sec = 0, base_usec = 0; gettimeofday(&tm, NULL); if(base_sec == 0 && base_usec == 0){ base_sec = tm.tv_sec; base_usec = tm.tv_usec; t = 0.0; } else{ t = (double)(tm.tv_sec-base_sec) + ((double)(tm.tv_usec-base_usec))/1.0e6; } return t; } __global__ void jacobi(float *a0, float *a1, float *a2, float *a3, float *b0, float *b1, float *b2, float *c0, float *c1, float *c2, float *p, float *wrk1, float *wrk2, float *bnd, int nn, int imax, int jmax, int kmax, float omega, float *gosa){ int i, j, k, n; float s0, ss, temp; const int tid = threadIdx.x; const int size = (imax-1)/(imax-1); for(n=0;n<nn;++n){ temp=0.0; for(i=tid*size ; i<(tid+1)*size ; ++i) for(j=1 ; j<jmax-1 ; ++j) for(k=1 ; k<kmax-1 ; ++k){ s0 = a0[i*jmax*kmax+j*kmax+k] * p[(i+1)*jmax*kmax+j*kmax+k] + a1[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+(j+1)*kmax+k] + a2[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+j*kmax+(k+1)] + b0[i*jmax*kmax+j*kmax+k] * ( p[(i+1)*jmax*kmax+(j+1)*kmax+k] - p[(i+1)*jmax*kmax+(j-1)*kmax+k] - p[(i-1)*jmax*kmax+(j+1)*kmax+k] + p[(i-1)*jmax*kmax+(j-1)*kmax+k] ) + b1[i*jmax*kmax+j*kmax+k] * ( p[i*jmax*kmax+(j+1)*kmax+(k+1)] - p[i*jmax*kmax+(j-1)*kmax+(k+1)] - p[i*jmax*kmax+(j+1)*kmax+(k-1)] + p[i*jmax*kmax+(j-1)*kmax+(k-1)] ) + b2[i*jmax*kmax+j*kmax+k] * ( p[(i+1)*jmax*kmax+j*kmax+(k+1)] - p[(i-1)*jmax*kmax+j*kmax+(k+1)] - p[(i+1)*jmax*kmax+j*kmax+(k-1)] + p[(i-1)*jmax*kmax+j*kmax+(k-1)] ) + c0[i*jmax*kmax+j*kmax+k] * p[(i-1)*jmax*kmax+j*kmax+k] + c1[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+(j-1)*kmax+k] + c2[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+j*kmax+(k-1)] + wrk1[i*jmax*kmax+j*kmax+k]; ss = ( s0 * a3[i*jmax*kmax+j*kmax+k] - p[i*jmax*kmax+j*kmax+k] ) * bnd[i*jmax*kmax+j*kmax+k]; temp = temp + ss*ss; wrk2[i*jmax*kmax+j*kmax+k] = p[i*jmax*kmax+j*kmax+k] + omega * ss; } for(i=tid*size ; i<(tid+1)*size ; ++i) for(j=1 ; j<jmax-1 ; ++j) for(k=1 ; k<kmax-1 ; ++k) p[i*jmax*kmax+j*kmax+k] = wrk2[i*jmax*kmax+j*kmax+k]; } /* end n loop */ gosa[tid] = temp; } int main(){ int i, j, k; float final_gosa; double cpu0, cpu1, nflop, xmflops2, score; float gosa[THREAD_NUM]; /************************************/ float *p; float *a0, *a1, *a2, *a3; float *b0, *b1, *b2; float *c0, *c1, *c2; float *bnd; float *wrk1, *wrk2; /************************************/ imax = MIMAX-1; jmax = MJMAX-1; kmax = MKMAX-1; //int N_IJK = MIMAX*MJMAX*MKMAX; int N_IJK = imax*jmax*kmax; /************************************/ float *dev_p; float *dev_a0, *dev_a1, *dev_a2, *dev_a3; float *dev_b0, *dev_b1, *dev_b2; float *dev_c0, *dev_c1, *dev_c2; float *dev_bnd; float *dev_wrk1, *dev_wrk2; float *dev_gosa; /************************************/ omega = 0.8; //initial_maxtrix(); /******allocate mem on CPU***********/ a0 = (float*)malloc(sizeof(float)*N_IJK); a1 = (float*)malloc(sizeof(float)*N_IJK); a2 = (float*)malloc(sizeof(float)*N_IJK); a3 = (float*)malloc(sizeof(float)*N_IJK); b0 = (float*)malloc(sizeof(float)*N_IJK); b1 = (float*)malloc(sizeof(float)*N_IJK); b2 = (float*)malloc(sizeof(float)*N_IJK); c0 = (float*)malloc(sizeof(float)*N_IJK); c1 = (float*)malloc(sizeof(float)*N_IJK); c2 = (float*)malloc(sizeof(float)*N_IJK); p = (float*)malloc(sizeof(float)*N_IJK); wrk1 = (float*)malloc(sizeof(float)*N_IJK); wrk2 = (float*)malloc(sizeof(float)*N_IJK); bnd = (float*)malloc(sizeof(float)*N_IJK); //gosa = (float*)malloc(sizeof(float)); /************************************/ /******allocate mem on GPU***********/ hipMalloc((void**)&dev_a0, N_IJK*sizeof(float)); hipMalloc((void**)&dev_a1, N_IJK*sizeof(float)); hipMalloc((void**)&dev_a2, N_IJK*sizeof(float)); hipMalloc((void**)&dev_a3, N_IJK*sizeof(float)); hipMalloc((void**)&dev_b0, N_IJK*sizeof(float)); hipMalloc((void**)&dev_b1, N_IJK*sizeof(float)); hipMalloc((void**)&dev_b2, N_IJK*sizeof(float)); hipMalloc((void**)&dev_c0, N_IJK*sizeof(float)); hipMalloc((void**)&dev_c1, N_IJK*sizeof(float)); hipMalloc((void**)&dev_c2, N_IJK*sizeof(float)); hipMalloc((void**)&dev_p, N_IJK*sizeof(float)); hipMalloc((void**)&dev_bnd, N_IJK*sizeof(float)); hipMalloc((void**)&dev_wrk1, N_IJK*sizeof(float)); hipMalloc((void**)&dev_wrk2, N_IJK*sizeof(float)); hipMalloc((void**)&dev_gosa, sizeof(float)*THREAD_NUM); /************************************/ /*****Initialize*********************/ //int i,j,k; for(i=0 ; i<imax ; ++i) for(j=0 ; j<jmax ; ++j) for(k=0 ; k<kmax ; ++k){ a0[i*jmax*kmax+j*kmax+k]=0.0; a1[i*jmax*kmax+j*kmax+k]=0.0; a2[i*jmax*kmax+j*kmax+k]=0.0; a3[i*jmax*kmax+j*kmax+k]=0.0; b0[i*jmax*kmax+j*kmax+k]=0.0; b1[i*jmax*kmax+j*kmax+k]=0.0; b2[i*jmax*kmax+j*kmax+k]=0.0; c0[i*jmax*kmax+j*kmax+k]=0.0; c1[i*jmax*kmax+j*kmax+k]=0.0; c2[i*jmax*kmax+j*kmax+k]=0.0; p[i*jmax*kmax+j*kmax+k]=0.0; wrk1[i*jmax*kmax+j*kmax+k]=0.0; bnd[i*jmax*kmax+j*kmax+k]=0.0; } for(i=0 ; i<imax ; ++i) for(j=0 ; j<jmax ; ++j) for(k=0 ; k<kmax ; ++k){ a0[i*jmax*kmax+j*kmax+k]=1.0; a1[i*jmax*kmax+j*kmax+k]=1.0; a2[i*jmax*kmax+j*kmax+k]=1.0; a3[i*jmax*kmax+j*kmax+k]=1.0/6.0; b0[i*jmax*kmax+j*kmax+k]=0.0; b1[i*jmax*kmax+j*kmax+k]=0.0; b2[i*jmax*kmax+j*kmax+k]=0.0; c0[i*jmax*kmax+j*kmax+k]=1.0; c1[i*jmax*kmax+j*kmax+k]=1.0; c2[i*jmax*kmax+j*kmax+k]=1.0; p[i*jmax*kmax+j*kmax+k]=(float)(k*k)/(float)((kmax-1)*(kmax-1)); wrk1[i*jmax*kmax+j*kmax+k]=0.0; bnd[i*jmax*kmax+j*kmax+k]=1.0; } /************************************/ /*****copy array to device mem*******/ hipMemcpy(dev_a0, a0, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_a1, a1, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_a2, a2, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_a3, a3, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_b0, b0, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_b1, b1, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_b2, b2, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_c0, c0, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_c1, c1, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_c2, c2, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_wrk1, wrk1, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_wrk2, wrk2, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_bnd, bnd, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_p, p, N_IJK*sizeof(float), hipMemcpyHostToDevice); //hipMemcpy(dev_gosa, gosa, sizeof(float), hipMemcpyHostToDevice); /************************************/ printf("mimax = %d mjmax = %d mkmax = %d\n", MIMAX, MJMAX, MKMAX); printf("imax = %d jmax = %d kmax = %d\n", imax, jmax, kmax); cpu0 = second(); /**measuring**/ hipLaunchKernelGGL(( jacobi), dim3(1),dim3(THREAD_NUM), 0, 0, dev_a0, dev_a1, dev_a2, dev_a3, dev_b0, dev_b1, dev_b2, dev_c0, dev_c1, dev_c2, dev_p, dev_wrk1, dev_wrk2, dev_bnd, NN, imax, jmax, kmax, omega, dev_gosa); hipDeviceSynchronize(); cpu1 = second(); hipMemcpy(&gosa, dev_gosa, sizeof(float)*THREAD_NUM, hipMemcpyDeviceToHost); /******Free mem on the GPU**********/ hipFree(dev_a0); hipFree(dev_a1); hipFree(dev_a2); hipFree(dev_a3); hipFree(dev_b0); hipFree(dev_b1); hipFree(dev_b2); hipFree(dev_c0); hipFree(dev_c1); hipFree(dev_c2); hipFree(dev_p); hipFree(dev_wrk1); hipFree(dev_wrk2); hipFree(dev_bnd); hipFree(dev_gosa); /************************************/ /********Final sum of gosa***********/ for(int gosa_index=0; gosa_index<THREAD_NUM; gosa_index++){ final_gosa += gosa[gosa_index]; //printf("Gosa%d: %e \n", gosa_index, gosa[gosa_index]); } /************************************/ nflop = (kmax-2)*(jmax-2)*(imax-2)*34; if(cpu1 != 0.0) xmflops2 = nflop/cpu1*1.0e-6*(float)NN; score = xmflops2/32.27; printf("gpu: %f sec.\n", cpu1); printf("Loop executed for %d times\n", NN); printf("Gosa: %e \n", final_gosa); //printf("MFLOPS measured: %f\n", xmflops2); //printf("Score: %f\n", score); return(0); } /*thread_num = 127*/ /*tid = threadIdx.x+1 size = (imax-1)/(imax-1)*/ /*for 1~200 for i=tid*size; i<(tid+1)*size; i++ for j for k */ /*hipMalloc(dev_gosa, sizeof(float)*thread_num)*/ /*sum*/ /* http://www2.kimicat.com/%E6%94%B9%E8%89%AF%E7%AC%AC%E4%B8%80%E5%80%8B%E7%A8%8B%E5%BC%8F http://steve90370.blogspot.tw/2010/11/cuda_20.html http://www2.kimicat.com/ */
ecd217e28dfe5518cb6b5847fb08164f92d0da58.cu
#include<stdio.h> #include<sys/time.h> #define MIMAX 129 #define MJMAX 65 #define MKMAX 65 #define NN 200 #define THREAD_NUM 127 /*static float p[MIMAX][MJMAX][MKMAX]; static float a[MIMAX][MJMAX][MKMAX][4]; static float b[MIMAX][MJMAX][MKMAX][3]; static float c[MIMAX][MJMAX][MKMAX][3]; static float bnd[MIMAX][MJMAX][MKMAX]; static float work1[MIMAX][MJMAX][MKMAX]; static float work2[MIMAX][MJMAX][MKMAX];*/ static int imax, jmax, kmax; static float omega; /*void initial_matrix(){ int i, j, k; for(i=0; i<imax; i++) for(j=0; j<jmax; j++) for(k=0; k<kmax; k++){ a[i][j][k][0] = 0.0; a[i][j][k][1] = 0.0; a[i][j][k][2] = 0.0; a[i][j][k][3] = 0.0; b[i][j][k][0] = 0.0; b[i][j][k][1] = 0.0; b[i][j][k][2] = 0.0; c[i][j][k][0] = 0.0; c[i][j][k][0]=0.0; c[i][j][k][1]=0.0; c[i][j][k][2]=0.0; p[i][j][k]=0.0; work1[i][j][k]=0.0; bnd[i][j][k]=0.0; } for(i=0 ; i<imax ; ++i) for(j=0 ; j<jmax ; ++j) for(k=0 ; k<kmax ; ++k){ a[i][j][k][0]=1.0; a[i][j][k][1]=1.0; a[i][j][k][2]=1.0; a[i][j][k][3]=1.0/6.0; b[i][j][k][0]=0.0; b[i][j][k][1]=0.0; b[i][j][k][2]=0.0; c[i][j][k][0]=1.0; c[i][j][k][1]=1.0; c[i][j][k][2]=1.0; p[i][j][k]=(float)(k*k)/(float)((kmax-1)*(kmax-1)); work1[i][j][k]=0.0; bnd[i][j][k]=1.0; } }*/ double second(){ struct timeval tm; double t; static int base_sec = 0, base_usec = 0; gettimeofday(&tm, NULL); if(base_sec == 0 && base_usec == 0){ base_sec = tm.tv_sec; base_usec = tm.tv_usec; t = 0.0; } else{ t = (double)(tm.tv_sec-base_sec) + ((double)(tm.tv_usec-base_usec))/1.0e6; } return t; } __global__ void jacobi(float *a0, float *a1, float *a2, float *a3, float *b0, float *b1, float *b2, float *c0, float *c1, float *c2, float *p, float *wrk1, float *wrk2, float *bnd, int nn, int imax, int jmax, int kmax, float omega, float *gosa){ int i, j, k, n; float s0, ss, temp; const int tid = threadIdx.x; const int size = (imax-1)/(imax-1); for(n=0;n<nn;++n){ temp=0.0; for(i=tid*size ; i<(tid+1)*size ; ++i) for(j=1 ; j<jmax-1 ; ++j) for(k=1 ; k<kmax-1 ; ++k){ s0 = a0[i*jmax*kmax+j*kmax+k] * p[(i+1)*jmax*kmax+j*kmax+k] + a1[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+(j+1)*kmax+k] + a2[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+j*kmax+(k+1)] + b0[i*jmax*kmax+j*kmax+k] * ( p[(i+1)*jmax*kmax+(j+1)*kmax+k] - p[(i+1)*jmax*kmax+(j-1)*kmax+k] - p[(i-1)*jmax*kmax+(j+1)*kmax+k] + p[(i-1)*jmax*kmax+(j-1)*kmax+k] ) + b1[i*jmax*kmax+j*kmax+k] * ( p[i*jmax*kmax+(j+1)*kmax+(k+1)] - p[i*jmax*kmax+(j-1)*kmax+(k+1)] - p[i*jmax*kmax+(j+1)*kmax+(k-1)] + p[i*jmax*kmax+(j-1)*kmax+(k-1)] ) + b2[i*jmax*kmax+j*kmax+k] * ( p[(i+1)*jmax*kmax+j*kmax+(k+1)] - p[(i-1)*jmax*kmax+j*kmax+(k+1)] - p[(i+1)*jmax*kmax+j*kmax+(k-1)] + p[(i-1)*jmax*kmax+j*kmax+(k-1)] ) + c0[i*jmax*kmax+j*kmax+k] * p[(i-1)*jmax*kmax+j*kmax+k] + c1[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+(j-1)*kmax+k] + c2[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+j*kmax+(k-1)] + wrk1[i*jmax*kmax+j*kmax+k]; ss = ( s0 * a3[i*jmax*kmax+j*kmax+k] - p[i*jmax*kmax+j*kmax+k] ) * bnd[i*jmax*kmax+j*kmax+k]; temp = temp + ss*ss; wrk2[i*jmax*kmax+j*kmax+k] = p[i*jmax*kmax+j*kmax+k] + omega * ss; } for(i=tid*size ; i<(tid+1)*size ; ++i) for(j=1 ; j<jmax-1 ; ++j) for(k=1 ; k<kmax-1 ; ++k) p[i*jmax*kmax+j*kmax+k] = wrk2[i*jmax*kmax+j*kmax+k]; } /* end n loop */ gosa[tid] = temp; } int main(){ int i, j, k; float final_gosa; double cpu0, cpu1, nflop, xmflops2, score; float gosa[THREAD_NUM]; /************************************/ float *p; float *a0, *a1, *a2, *a3; float *b0, *b1, *b2; float *c0, *c1, *c2; float *bnd; float *wrk1, *wrk2; /************************************/ imax = MIMAX-1; jmax = MJMAX-1; kmax = MKMAX-1; //int N_IJK = MIMAX*MJMAX*MKMAX; int N_IJK = imax*jmax*kmax; /************************************/ float *dev_p; float *dev_a0, *dev_a1, *dev_a2, *dev_a3; float *dev_b0, *dev_b1, *dev_b2; float *dev_c0, *dev_c1, *dev_c2; float *dev_bnd; float *dev_wrk1, *dev_wrk2; float *dev_gosa; /************************************/ omega = 0.8; //initial_maxtrix(); /******allocate mem on CPU***********/ a0 = (float*)malloc(sizeof(float)*N_IJK); a1 = (float*)malloc(sizeof(float)*N_IJK); a2 = (float*)malloc(sizeof(float)*N_IJK); a3 = (float*)malloc(sizeof(float)*N_IJK); b0 = (float*)malloc(sizeof(float)*N_IJK); b1 = (float*)malloc(sizeof(float)*N_IJK); b2 = (float*)malloc(sizeof(float)*N_IJK); c0 = (float*)malloc(sizeof(float)*N_IJK); c1 = (float*)malloc(sizeof(float)*N_IJK); c2 = (float*)malloc(sizeof(float)*N_IJK); p = (float*)malloc(sizeof(float)*N_IJK); wrk1 = (float*)malloc(sizeof(float)*N_IJK); wrk2 = (float*)malloc(sizeof(float)*N_IJK); bnd = (float*)malloc(sizeof(float)*N_IJK); //gosa = (float*)malloc(sizeof(float)); /************************************/ /******allocate mem on GPU***********/ cudaMalloc((void**)&dev_a0, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_a1, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_a2, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_a3, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_b0, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_b1, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_b2, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_c0, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_c1, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_c2, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_p, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_bnd, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_wrk1, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_wrk2, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_gosa, sizeof(float)*THREAD_NUM); /************************************/ /*****Initialize*********************/ //int i,j,k; for(i=0 ; i<imax ; ++i) for(j=0 ; j<jmax ; ++j) for(k=0 ; k<kmax ; ++k){ a0[i*jmax*kmax+j*kmax+k]=0.0; a1[i*jmax*kmax+j*kmax+k]=0.0; a2[i*jmax*kmax+j*kmax+k]=0.0; a3[i*jmax*kmax+j*kmax+k]=0.0; b0[i*jmax*kmax+j*kmax+k]=0.0; b1[i*jmax*kmax+j*kmax+k]=0.0; b2[i*jmax*kmax+j*kmax+k]=0.0; c0[i*jmax*kmax+j*kmax+k]=0.0; c1[i*jmax*kmax+j*kmax+k]=0.0; c2[i*jmax*kmax+j*kmax+k]=0.0; p[i*jmax*kmax+j*kmax+k]=0.0; wrk1[i*jmax*kmax+j*kmax+k]=0.0; bnd[i*jmax*kmax+j*kmax+k]=0.0; } for(i=0 ; i<imax ; ++i) for(j=0 ; j<jmax ; ++j) for(k=0 ; k<kmax ; ++k){ a0[i*jmax*kmax+j*kmax+k]=1.0; a1[i*jmax*kmax+j*kmax+k]=1.0; a2[i*jmax*kmax+j*kmax+k]=1.0; a3[i*jmax*kmax+j*kmax+k]=1.0/6.0; b0[i*jmax*kmax+j*kmax+k]=0.0; b1[i*jmax*kmax+j*kmax+k]=0.0; b2[i*jmax*kmax+j*kmax+k]=0.0; c0[i*jmax*kmax+j*kmax+k]=1.0; c1[i*jmax*kmax+j*kmax+k]=1.0; c2[i*jmax*kmax+j*kmax+k]=1.0; p[i*jmax*kmax+j*kmax+k]=(float)(k*k)/(float)((kmax-1)*(kmax-1)); wrk1[i*jmax*kmax+j*kmax+k]=0.0; bnd[i*jmax*kmax+j*kmax+k]=1.0; } /************************************/ /*****copy array to device mem*******/ cudaMemcpy(dev_a0, a0, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_a1, a1, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_a2, a2, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_a3, a3, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_b0, b0, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_b1, b1, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_b2, b2, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_c0, c0, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_c1, c1, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_c2, c2, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_wrk1, wrk1, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_wrk2, wrk2, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_bnd, bnd, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_p, p, N_IJK*sizeof(float), cudaMemcpyHostToDevice); //cudaMemcpy(dev_gosa, gosa, sizeof(float), cudaMemcpyHostToDevice); /************************************/ printf("mimax = %d mjmax = %d mkmax = %d\n", MIMAX, MJMAX, MKMAX); printf("imax = %d jmax = %d kmax = %d\n", imax, jmax, kmax); cpu0 = second(); /**measuring**/ jacobi<<<1,THREAD_NUM>>>(dev_a0, dev_a1, dev_a2, dev_a3, dev_b0, dev_b1, dev_b2, dev_c0, dev_c1, dev_c2, dev_p, dev_wrk1, dev_wrk2, dev_bnd, NN, imax, jmax, kmax, omega, dev_gosa); cudaDeviceSynchronize(); cpu1 = second(); cudaMemcpy(&gosa, dev_gosa, sizeof(float)*THREAD_NUM, cudaMemcpyDeviceToHost); /******Free mem on the GPU**********/ cudaFree(dev_a0); cudaFree(dev_a1); cudaFree(dev_a2); cudaFree(dev_a3); cudaFree(dev_b0); cudaFree(dev_b1); cudaFree(dev_b2); cudaFree(dev_c0); cudaFree(dev_c1); cudaFree(dev_c2); cudaFree(dev_p); cudaFree(dev_wrk1); cudaFree(dev_wrk2); cudaFree(dev_bnd); cudaFree(dev_gosa); /************************************/ /********Final sum of gosa***********/ for(int gosa_index=0; gosa_index<THREAD_NUM; gosa_index++){ final_gosa += gosa[gosa_index]; //printf("Gosa%d: %e \n", gosa_index, gosa[gosa_index]); } /************************************/ nflop = (kmax-2)*(jmax-2)*(imax-2)*34; if(cpu1 != 0.0) xmflops2 = nflop/cpu1*1.0e-6*(float)NN; score = xmflops2/32.27; printf("gpu: %f sec.\n", cpu1); printf("Loop executed for %d times\n", NN); printf("Gosa: %e \n", final_gosa); //printf("MFLOPS measured: %f\n", xmflops2); //printf("Score: %f\n", score); return(0); } /*thread_num = 127*/ /*tid = threadIdx.x+1 size = (imax-1)/(imax-1)*/ /*for 1~200 for i=tid*size; i<(tid+1)*size; i++ for j for k */ /*cudaMalloc(dev_gosa, sizeof(float)*thread_num)*/ /*sum*/ /* http://www2.kimicat.com/%E6%94%B9%E8%89%AF%E7%AC%AC%E4%B8%80%E5%80%8B%E7%A8%8B%E5%BC%8F http://steve90370.blogspot.tw/2010/11/cuda_20.html http://www2.kimicat.com/ */
abc686c67dc5d3f42430fdfbe2ece46814bab34e.hip
// !!! This is a file automatically generated by hipify!!! /* * kmeans_gpu.cu */ #include <hip/hip_runtime_api.h> #include <iostream> #include <iomanip> #include <stdio.h> #include "util.h" using namespace std; // declarations static int ThreadsPerBlock = 512; static inline int nextPowerOfTwo(int n); __device__ inline static float euclid_dist_2(const unsigned int size, const long count, const unsigned int k, float *points, float *clusters, int oId, int cId); __global__ void compute_delta(int *deviceIntermediates, int numIntermediates, int numIntermediates2); __global__ void nearest_cluster(const unsigned int size, const long count, const unsigned int k, float *points, float *deviceClusters, int *membership, int *intermediates); // function definitions static inline int nextPowerOfTwo(int n) { n--; n = n >> 1 | n; n = n >> 2 | n; n = n >> 4 | n; n = n >> 8 | n; n = n >> 16 | n; // n = n >> 32 | n return ++n; } __device__ inline static float euclid_dist_2(const unsigned int size, const long count, const unsigned int k, float *points, float *clusters, int oId, int cId) { float ans = 0.0; for (int i = 0; i < size; i++) { ans += (points[count * i + oId] - clusters[k * i + cId]) * (points[count * i + oId] - clusters[k * i + cId]); } return ans; } __global__ void compute_delta(int *deviceIntermediates, int numIntermediates, int numIntermediates2) { extern __shared__ unsigned int intermediates[]; intermediates[threadIdx.x] = (threadIdx.x < numIntermediates) ? deviceIntermediates[threadIdx.x] : 0; __syncthreads(); for (unsigned int s = numIntermediates2 / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { intermediates[threadIdx.x] += intermediates[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) { deviceIntermediates[0] = intermediates[0]; } } __global__ void nearest_cluster(const unsigned int size, const long count, const unsigned int k, float *points, float *deviceClusters, int *membership, int *intermediates) { extern __shared__ char sharedMemory[]; unsigned char *membershipChanged = (unsigned char *) sharedMemory; int objectId = blockDim.x * blockIdx.x + threadIdx.x; membershipChanged[threadIdx.x] = 0; // shared mem float *clusters = (float *) (sharedMemory + blockDim.x); for (int i = threadIdx.x; i < k; i += blockDim.x) { for (int j = 0; j < size; j++) { clusters[k * j + i] = deviceClusters[k * j + i]; } } __syncthreads(); if (objectId < count) { int index = 0; float dist; float min_dist = euclid_dist_2(size, count, k, points, clusters, objectId, 0); for (int i = 1; i < k; i++) { dist = euclid_dist_2(size, count, k, points, clusters, objectId, i); if (dist < min_dist) { min_dist = dist; index = i; } } if (membership[objectId] != index) { membershipChanged[threadIdx.x] = 1; } membership[objectId] = index; __syncthreads(); // For membershipChanged[] for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { membershipChanged[threadIdx.x] += membershipChanged[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) { intermediates[blockIdx.x] = membershipChanged[0]; } } } void kmeans_gpu(float **points, float ** clusters, int *membership, const long count, const unsigned int size, const unsigned int k) { int index, loop = 0; int *newClusterSize = (int*) calloc(k, sizeof(int)); float delta; float threshold = 0.0002; float **dimObjects = malloc2D(size, count); float **dimClusters = malloc2D(size, k); float **newClusters = malloc2D(size, k); float *deviceObjects; float *deviceClusters; int *deviceMembership; int *deviceIntermediates; double startTime = time(); hipSetDevice(0); for (int i = 0; i < size; i++) { for (int j = 0; j < count; j++) dimObjects[i][j] = points[j][i]; for (int j = 0; j < k; j++) dimClusters[i][j] = dimObjects[i][j]; } for (int i = 0; i < count; i++) { membership[i] = -1; } memset(newClusters[0], 0, size * k * sizeof(float)); const unsigned int numBlocks = (count + ThreadsPerBlock - 1) / ThreadsPerBlock; const unsigned int clusterBlockSharedDataSize = ThreadsPerBlock * sizeof(unsigned char) + k * size * sizeof(float); //const unsigned int clusterBlockSharedDataSize = numThreadsPerClusterBlock * sizeof(unsigned char); hipDeviceProp_t prop; int deviceNum; hipGetDevice(&deviceNum); hipGetDeviceProperties(&prop, deviceNum); cout << " Blocks: " << numBlocks << endl; cout << " Threads: " << ThreadsPerBlock << endl; cout << " Shared Mem: " << clusterBlockSharedDataSize << " / " << prop.sharedMemPerBlock << endl; if (clusterBlockSharedDataSize > prop.sharedMemPerBlock) { cout << " ERROR: Not enough memory!" << endl; } const unsigned int numReductionThreads = nextPowerOfTwo(numBlocks); const unsigned int reductionBlockSharedDataSize = numReductionThreads * sizeof(unsigned int); hipMalloc((void **) &deviceClusters, k * size * sizeof(float)); hipMalloc((void **) &deviceObjects, count * size * sizeof(float)); hipMalloc((void **) &deviceMembership, count * sizeof(int)); hipMalloc((void **) &deviceIntermediates, numReductionThreads * sizeof(unsigned int)); hipMemcpy(deviceObjects, dimObjects[0], count * size * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(deviceMembership, membership, count * sizeof(int), hipMemcpyHostToDevice); do { hipMemcpy(deviceClusters, dimClusters[0], k * size * sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( nearest_cluster), dim3(numBlocks), dim3(ThreadsPerBlock), clusterBlockSharedDataSize, 0, size, count, k, deviceObjects, deviceClusters, deviceMembership, deviceIntermediates); hipLaunchKernelGGL(( compute_delta), dim3(1), dim3(numReductionThreads), reductionBlockSharedDataSize, 0, deviceIntermediates, numBlocks, numReductionThreads); hipDeviceSynchronize(); hipMemcpy(membership, deviceMembership, count * sizeof(int), hipMemcpyDeviceToHost); checkCUDAError("kmeans 199"); for (int i = 0; i < count; i++) { index = membership[i]; newClusterSize[index]++; for (int j = 0; j < size; j++) { newClusters[j][index] += points[i][j]; } } for (int i = 0; i < k; i++) { for (int j = 0; j < size; j++) { if (newClusterSize[i] > 0) { dimClusters[j][i] = newClusters[j][i] / newClusterSize[i]; } newClusters[j][i] = 0.0; } newClusterSize[i] = 0; } int d; hipMemcpy(&d, deviceIntermediates, sizeof(int), hipMemcpyDeviceToHost); delta = (float) d / count; } while (delta > threshold && loop++ < 500); for (int i = 0; i < k; i++) { for (int j = 0; j < size; j++) { clusters[i][j] = dimClusters[j][i]; } } double endTime = time() - startTime; cout << "Time: " << endTime << " with " << loop << " iterations ("; cout << std::setprecision(2) << (delta * 100) << "% changed)" << endl; hipFree(deviceObjects); hipFree(deviceClusters); hipFree(deviceMembership); hipFree(deviceIntermediates); free(dimObjects[0]); free(dimClusters[0]); free(newClusters[0]); free(dimObjects); free(dimClusters); free(newClusters); free(newClusterSize); }
abc686c67dc5d3f42430fdfbe2ece46814bab34e.cu
/* * kmeans_gpu.cu */ #include <cuda_runtime_api.h> #include <iostream> #include <iomanip> #include <stdio.h> #include "util.h" using namespace std; // declarations static int ThreadsPerBlock = 512; static inline int nextPowerOfTwo(int n); __device__ inline static float euclid_dist_2(const unsigned int size, const long count, const unsigned int k, float *points, float *clusters, int oId, int cId); __global__ void compute_delta(int *deviceIntermediates, int numIntermediates, int numIntermediates2); __global__ void nearest_cluster(const unsigned int size, const long count, const unsigned int k, float *points, float *deviceClusters, int *membership, int *intermediates); // function definitions static inline int nextPowerOfTwo(int n) { n--; n = n >> 1 | n; n = n >> 2 | n; n = n >> 4 | n; n = n >> 8 | n; n = n >> 16 | n; // n = n >> 32 | n return ++n; } __device__ inline static float euclid_dist_2(const unsigned int size, const long count, const unsigned int k, float *points, float *clusters, int oId, int cId) { float ans = 0.0; for (int i = 0; i < size; i++) { ans += (points[count * i + oId] - clusters[k * i + cId]) * (points[count * i + oId] - clusters[k * i + cId]); } return ans; } __global__ void compute_delta(int *deviceIntermediates, int numIntermediates, int numIntermediates2) { extern __shared__ unsigned int intermediates[]; intermediates[threadIdx.x] = (threadIdx.x < numIntermediates) ? deviceIntermediates[threadIdx.x] : 0; __syncthreads(); for (unsigned int s = numIntermediates2 / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { intermediates[threadIdx.x] += intermediates[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) { deviceIntermediates[0] = intermediates[0]; } } __global__ void nearest_cluster(const unsigned int size, const long count, const unsigned int k, float *points, float *deviceClusters, int *membership, int *intermediates) { extern __shared__ char sharedMemory[]; unsigned char *membershipChanged = (unsigned char *) sharedMemory; int objectId = blockDim.x * blockIdx.x + threadIdx.x; membershipChanged[threadIdx.x] = 0; // shared mem float *clusters = (float *) (sharedMemory + blockDim.x); for (int i = threadIdx.x; i < k; i += blockDim.x) { for (int j = 0; j < size; j++) { clusters[k * j + i] = deviceClusters[k * j + i]; } } __syncthreads(); if (objectId < count) { int index = 0; float dist; float min_dist = euclid_dist_2(size, count, k, points, clusters, objectId, 0); for (int i = 1; i < k; i++) { dist = euclid_dist_2(size, count, k, points, clusters, objectId, i); if (dist < min_dist) { min_dist = dist; index = i; } } if (membership[objectId] != index) { membershipChanged[threadIdx.x] = 1; } membership[objectId] = index; __syncthreads(); // For membershipChanged[] for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { membershipChanged[threadIdx.x] += membershipChanged[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) { intermediates[blockIdx.x] = membershipChanged[0]; } } } void kmeans_gpu(float **points, float ** clusters, int *membership, const long count, const unsigned int size, const unsigned int k) { int index, loop = 0; int *newClusterSize = (int*) calloc(k, sizeof(int)); float delta; float threshold = 0.0002; float **dimObjects = malloc2D(size, count); float **dimClusters = malloc2D(size, k); float **newClusters = malloc2D(size, k); float *deviceObjects; float *deviceClusters; int *deviceMembership; int *deviceIntermediates; double startTime = time(); cudaSetDevice(0); for (int i = 0; i < size; i++) { for (int j = 0; j < count; j++) dimObjects[i][j] = points[j][i]; for (int j = 0; j < k; j++) dimClusters[i][j] = dimObjects[i][j]; } for (int i = 0; i < count; i++) { membership[i] = -1; } memset(newClusters[0], 0, size * k * sizeof(float)); const unsigned int numBlocks = (count + ThreadsPerBlock - 1) / ThreadsPerBlock; const unsigned int clusterBlockSharedDataSize = ThreadsPerBlock * sizeof(unsigned char) + k * size * sizeof(float); //const unsigned int clusterBlockSharedDataSize = numThreadsPerClusterBlock * sizeof(unsigned char); cudaDeviceProp prop; int deviceNum; cudaGetDevice(&deviceNum); cudaGetDeviceProperties(&prop, deviceNum); cout << " Blocks: " << numBlocks << endl; cout << " Threads: " << ThreadsPerBlock << endl; cout << " Shared Mem: " << clusterBlockSharedDataSize << " / " << prop.sharedMemPerBlock << endl; if (clusterBlockSharedDataSize > prop.sharedMemPerBlock) { cout << " ERROR: Not enough memory!" << endl; } const unsigned int numReductionThreads = nextPowerOfTwo(numBlocks); const unsigned int reductionBlockSharedDataSize = numReductionThreads * sizeof(unsigned int); cudaMalloc((void **) &deviceClusters, k * size * sizeof(float)); cudaMalloc((void **) &deviceObjects, count * size * sizeof(float)); cudaMalloc((void **) &deviceMembership, count * sizeof(int)); cudaMalloc((void **) &deviceIntermediates, numReductionThreads * sizeof(unsigned int)); cudaMemcpy(deviceObjects, dimObjects[0], count * size * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(deviceMembership, membership, count * sizeof(int), cudaMemcpyHostToDevice); do { cudaMemcpy(deviceClusters, dimClusters[0], k * size * sizeof(float), cudaMemcpyHostToDevice); nearest_cluster<<<numBlocks, ThreadsPerBlock, clusterBlockSharedDataSize>>>(size, count, k, deviceObjects, deviceClusters, deviceMembership, deviceIntermediates); compute_delta<<<1, numReductionThreads, reductionBlockSharedDataSize>>>(deviceIntermediates, numBlocks, numReductionThreads); cudaDeviceSynchronize(); cudaMemcpy(membership, deviceMembership, count * sizeof(int), cudaMemcpyDeviceToHost); checkCUDAError("kmeans 199"); for (int i = 0; i < count; i++) { index = membership[i]; newClusterSize[index]++; for (int j = 0; j < size; j++) { newClusters[j][index] += points[i][j]; } } for (int i = 0; i < k; i++) { for (int j = 0; j < size; j++) { if (newClusterSize[i] > 0) { dimClusters[j][i] = newClusters[j][i] / newClusterSize[i]; } newClusters[j][i] = 0.0; } newClusterSize[i] = 0; } int d; cudaMemcpy(&d, deviceIntermediates, sizeof(int), cudaMemcpyDeviceToHost); delta = (float) d / count; } while (delta > threshold && loop++ < 500); for (int i = 0; i < k; i++) { for (int j = 0; j < size; j++) { clusters[i][j] = dimClusters[j][i]; } } double endTime = time() - startTime; cout << "Time: " << endTime << " with " << loop << " iterations ("; cout << std::setprecision(2) << (delta * 100) << "% changed)" << endl; cudaFree(deviceObjects); cudaFree(deviceClusters); cudaFree(deviceMembership); cudaFree(deviceIntermediates); free(dimObjects[0]); free(dimClusters[0]); free(newClusters[0]); free(dimObjects); free(dimClusters); free(newClusters); free(newClusterSize); }
540d71f4940926dbbd4750d19cc031acccd5e314.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Device funtion to compute a thread block's accumulated matrix product __device__ void block_matrix_product(int K_dim){ //Fragmemnts used to store data fetched from SMEM value_t frag_a[ThreadItemsY]; value_t frag_b[ThreadItemsX]; //Accumulator storage accum_t accumulator[ThreadItemsX][ThreadItemsY]; //GEMM mailoop - iterates over the entire K dimension - not unrolled for(int kblock = 0;kblock < K_dim;kblock += BlockItemsK){ //Load A and B tiles from global memory and store to SMEM // __syncthreads(); //Warp tile structure - iterates over the thread Block tile #pragma unroll for(int warp_k = 0; warp_k < BlockItemsK; warp_k += WarpItemsK){ //Fetch frag_a and frag_b from SMEM corresponding to k-index //Thread tile structure -- accumulate an outer product #pragma unroll for(int thread_x = 0; thread_x < ThreadItemsX;++thread_x){ #pragma unroll for(int thread_y = 0;thread_y < ThreadItemsY;++thread_y){ accumulator[thread_x][thread_y] += frag_a[y] * frag_b[x]; } } } __syncthreads(); } } template < int BlockItemsY, //Height in rows of a tile in matrix C int BlockItemsX, //Width in columns of a tile in matrix C int ThreadItemsY,//Height in rows of a thread-tile C int ThreadItemsX,//Width in columns of a thread-tile C int BlockItemsK, //Number of K-split subgroups in a block bool UseDounbleScratchTiles,///Whether to double buffer shared memory grid_raster_strategy::kind_t RasterStrategy //Grid rasterization strategy > struct block_task_policy; template < ///Parameterization of block_task_policy typename block_task_policy_t, //Multiplicand value type(matrices A and B) typename value_t, //Accumulator value type(matrix C and scalars) typename accum_t, //layout enumerant for matrix A matrix_transform_t::kind_t TransformA, //Alignment (in bytes) for A operand int LdgAlignA, //Layout enumerant for matrix B matrix_transform_t::kind_t TransformB, //Alignment (in bytes) for B operand int LdgAlignB, //Epilogue functor applied to matrix product typename epilogue_op_t, //Alignment (in bytes) for C operand int LdgAlignC, //Whether GEMM supports matrix sizes other than mult of BlockItems{XY} bool Ragged >struct block_task; //CUTLASS SGEMM example __global__ void gemm_kernel(float* C, float const * A, float const *B, int M, int N, int K){ //Define the GEMM tile_sizes typedef block_task_policy< 128,//BlockItemsY: Height in rows of a tile 32, //BlockItemsX - Width in columns of a tile 8, //ThreadItemsY - Height in rows of a thread-tile 4, //ThreadItemsX - Width in columns of a thread-tile 8, //BlockItemsK - Depth of a tile true, //UseDoubleScratchTiles -whether to double-buffer SMEM block_raster_enum::Default //Block rasterization strategy >block_task_policy_t; //Define the epilogue functor typedef gemm::blas_scaled_epilogue<float,float,float> epilogue_op_t; //Define the block task type typedef block_type< block_task_policy_t, float, float, matrix_transform_t::NonTranspose, 4, matrix_transform_t::NonTranspose, 4, epilogue_op_t, 4, true, > block_type_t; __shared__ block_task_t::scratch_storage_t smem; //Construct and run the task block_task_t( reinterpret_cast(&smem), &smem, A, B, C, epilogue_op_t(1,0), M, N, K ).run(); }
540d71f4940926dbbd4750d19cc031acccd5e314.cu
//Device funtion to compute a thread block's accumulated matrix product __device__ void block_matrix_product(int K_dim){ //Fragmemnts used to store data fetched from SMEM value_t frag_a[ThreadItemsY]; value_t frag_b[ThreadItemsX]; //Accumulator storage accum_t accumulator[ThreadItemsX][ThreadItemsY]; //GEMM mailoop - iterates over the entire K dimension - not unrolled for(int kblock = 0;kblock < K_dim;kblock += BlockItemsK){ //Load A and B tiles from global memory and store to SMEM // __syncthreads(); //Warp tile structure - iterates over the thread Block tile #pragma unroll for(int warp_k = 0; warp_k < BlockItemsK; warp_k += WarpItemsK){ //Fetch frag_a and frag_b from SMEM corresponding to k-index //Thread tile structure -- accumulate an outer product #pragma unroll for(int thread_x = 0; thread_x < ThreadItemsX;++thread_x){ #pragma unroll for(int thread_y = 0;thread_y < ThreadItemsY;++thread_y){ accumulator[thread_x][thread_y] += frag_a[y] * frag_b[x]; } } } __syncthreads(); } } template < int BlockItemsY, //Height in rows of a tile in matrix C int BlockItemsX, //Width in columns of a tile in matrix C int ThreadItemsY,//Height in rows of a thread-tile C int ThreadItemsX,//Width in columns of a thread-tile C int BlockItemsK, //Number of K-split subgroups in a block bool UseDounbleScratchTiles,///Whether to double buffer shared memory grid_raster_strategy::kind_t RasterStrategy //Grid rasterization strategy > struct block_task_policy; template < ///Parameterization of block_task_policy typename block_task_policy_t, //Multiplicand value type(matrices A and B) typename value_t, //Accumulator value type(matrix C and scalars) typename accum_t, //layout enumerant for matrix A matrix_transform_t::kind_t TransformA, //Alignment (in bytes) for A operand int LdgAlignA, //Layout enumerant for matrix B matrix_transform_t::kind_t TransformB, //Alignment (in bytes) for B operand int LdgAlignB, //Epilogue functor applied to matrix product typename epilogue_op_t, //Alignment (in bytes) for C operand int LdgAlignC, //Whether GEMM supports matrix sizes other than mult of BlockItems{XY} bool Ragged >struct block_task; //CUTLASS SGEMM example __global__ void gemm_kernel(float* C, float const * A, float const *B, int M, int N, int K){ //Define the GEMM tile_sizes typedef block_task_policy< 128,//BlockItemsY: Height in rows of a tile 32, //BlockItemsX - Width in columns of a tile 8, //ThreadItemsY - Height in rows of a thread-tile 4, //ThreadItemsX - Width in columns of a thread-tile 8, //BlockItemsK - Depth of a tile true, //UseDoubleScratchTiles -whether to double-buffer SMEM block_raster_enum::Default //Block rasterization strategy >block_task_policy_t; //Define the epilogue functor typedef gemm::blas_scaled_epilogue<float,float,float> epilogue_op_t; //Define the block task type typedef block_type< block_task_policy_t, float, float, matrix_transform_t::NonTranspose, 4, matrix_transform_t::NonTranspose, 4, epilogue_op_t, 4, true, > block_type_t; __shared__ block_task_t::scratch_storage_t smem; //Construct and run the task block_task_t( reinterpret_cast(&smem), &smem, A, B, C, epilogue_op_t(1,0), M, N, K ).run(); }