hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
70623b6d92a846b5b9076d3b1b4eace25429b4dd.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <time.h> #define CHECK(call)\ {\ const hipError_t error = call;\ if (error != hipSuccess)\ {\ printf("Error: %s:%d, ", __FILE__, __LINE__);\ printf("code: %d, reason: %s\n", error, hipGetErrorString(error));\ exit(1);\ }\ }\ void sumArrayOnHost(float *A, float *B,float *C, const int N) { for (int i = 0; i < N; i++) C[i] = A[i] + B[i]; } // CHECK(hipMemcpy(d_C, gpuRes, bBytes, hipMemcpyHostToDevice)); __global__ void sumArrayOnDevice(float *A, float *B, float *C) { int i = threadIdx.x; C[i] = A[i] + B[i]; } void checkResult(float *host, float *device, const int N) { double epsilon = 1.0E-8; int match = 1; for (int i = 0; i < N; i++) { if (abs(host[i] - device[i]) > epsilon) { match = 0; printf("Don't match!\n"); printf("host %5.2f device %5.2f at current %d\n", host[i], device[i], i); break; } } if (match) printf("Array match\n\n"); return; } void initData(float *inp, int size) { time_t t; srand((unsigned) time(&t)); for (int i = 0; i < size; i++) inp[i] = (float)(rand() & 0xFF) / 10.0f; } int main(int argc, char **argv) { int dev = 0; hipSetDevice(dev); int nElem = 32; printf("Inp Size %d\n", nElem); size_t nBytes = nElem *sizeof(float); float *h_A, *h_B, *host, *gpu; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); host = (float *)malloc(nBytes); gpu = (float *)malloc(nBytes); initData(h_A, nElem); initData(h_B, nElem); memset(host, 0, nBytes); memset(gpu, 0, nBytes); float *d_A, *d_B, *d_C; hipMalloc((float**)&d_A, nBytes); hipMalloc((float**)&d_B, nBytes); hipMalloc((float**)&d_C, nBytes); hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice); dim3 block(nElem); dim3 grid(nElem/block.x); hipLaunchKernelGGL(( sumArrayOnDevice), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C); printf("Execution Cfg <<<%d, %d>>>\n", grid.x, block.x); hipMemcpy(gpu, d_C, nBytes, hipMemcpyDeviceToHost); sumArrayOnHost(h_A, h_B, host, nElem); checkResult(host, gpu, nElem); hipFree(d_A); hipFree(d_B); hipFree(d_C); free(h_A); free(h_B); free(host); free(gpu); int c = getchar(); return 0; }
70623b6d92a846b5b9076d3b1b4eace25429b4dd.cu
#include <cuda_runtime.h> #include <stdio.h> #include <time.h> #define CHECK(call)\ {\ const cudaError_t error = call;\ if (error != cudaSuccess)\ {\ printf("Error: %s:%d, ", __FILE__, __LINE__);\ printf("code: %d, reason: %s\n", error, cudaGetErrorString(error));\ exit(1);\ }\ }\ void sumArrayOnHost(float *A, float *B,float *C, const int N) { for (int i = 0; i < N; i++) C[i] = A[i] + B[i]; } // CHECK(cudaMemcpy(d_C, gpuRes, bBytes, cudaMemcpyHostToDevice)); __global__ void sumArrayOnDevice(float *A, float *B, float *C) { int i = threadIdx.x; C[i] = A[i] + B[i]; } void checkResult(float *host, float *device, const int N) { double epsilon = 1.0E-8; int match = 1; for (int i = 0; i < N; i++) { if (abs(host[i] - device[i]) > epsilon) { match = 0; printf("Don't match!\n"); printf("host %5.2f device %5.2f at current %d\n", host[i], device[i], i); break; } } if (match) printf("Array match\n\n"); return; } void initData(float *inp, int size) { time_t t; srand((unsigned) time(&t)); for (int i = 0; i < size; i++) inp[i] = (float)(rand() & 0xFF) / 10.0f; } int main(int argc, char **argv) { int dev = 0; cudaSetDevice(dev); int nElem = 32; printf("Inp Size %d\n", nElem); size_t nBytes = nElem *sizeof(float); float *h_A, *h_B, *host, *gpu; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); host = (float *)malloc(nBytes); gpu = (float *)malloc(nBytes); initData(h_A, nElem); initData(h_B, nElem); memset(host, 0, nBytes); memset(gpu, 0, nBytes); float *d_A, *d_B, *d_C; cudaMalloc((float**)&d_A, nBytes); cudaMalloc((float**)&d_B, nBytes); cudaMalloc((float**)&d_C, nBytes); cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice); dim3 block(nElem); dim3 grid(nElem/block.x); sumArrayOnDevice<<<grid, block>>>(d_A, d_B, d_C); printf("Execution Cfg <<<%d, %d>>>\n", grid.x, block.x); cudaMemcpy(gpu, d_C, nBytes, cudaMemcpyDeviceToHost); sumArrayOnHost(h_A, h_B, host, nElem); checkResult(host, gpu, nElem); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(host); free(gpu); int c = getchar(); return 0; }
14221875664d51e55090180584998d81f47dc4d4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // updateOutput, updateGradInput Kernels ported from Sergey Zagoruyko's pyinn, which itself was a // port from Caffe #include <THHUNN/THHUNN.h> #include <THH/THHTensor.hpp> #include <THH/THHDeviceTensor.cuh> #include <THH/THHDeviceTensorUtils.cuh> #include <THH/THHNumerics.cuh> #include <THH/THHReduceApplyUtils.cuh> #include <THH/THHSortUtils.cuh> #include <THH/THHTensorMathReduce.cuh> #include <THHUNN/SharedMem.cuh> #include <THHUNN/common.h> #include <algorithm> const int WARP_SIZE = 32; // Crude benchmarks suggest 256 is better than 512 and 1024 // TODO: Autotune/use better heuristics, improve speed more. const int MAX_BLOCK_SIZE = 256; static int getGradParamsNumThreads(int batchSize){ //warp per item in a batch, up to a maximum return ::min(batchSize * WARP_SIZE, MAX_BLOCK_SIZE); } template <typename T, typename AccT, typename IndexType, int kSize> __global__ void spatialDepthwiseConvolutionUpdateOutput( const THCDeviceTensor<T, 4> input, THCDeviceTensor<T, 4> output, const THCDeviceTensor<T, 4> weight, const THCDeviceTensor<T, 1> bias, bool biasEnabled, IndexType totalElements, const int outputChannels, const int depthwiseMultiplier, const int inputWidth, const int inputHeight, const int outputWidth, const int outputHeight, const int kernelWidth, const int kernelHeight, const int strideWidth, const int strideHeight, const int padWidth, const int padHeight, const int dilationWidth, const int dilationHeight) { const int KW_LIMIT = (kSize !=0) ? kSize : kernelWidth; const int KH_LIMIT = (kSize !=0) ? kSize : kernelHeight; for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalElements; linearIndex += gridDim.x * blockDim.x) { //calculate n,c,h,w indices, replacing modulos by divide and multiply add, //result is same as would be in the code below //const int n = linearIndex / batchStride; //batchStride = outputChannels * outputHeight * outputWidth //const int c = (linearIndex / channelStride) % outputChannels; //channelStride = outputHeight * outputWidth //const int h = (linearIndex / outputWidth) % outputHeight; //const int w = linearIndex % outputWidth; int indtmp1 = linearIndex/outputWidth; const int w = linearIndex - indtmp1 * outputWidth; int indtmp2 = indtmp1/outputHeight; const int h = indtmp1 - indtmp2 * outputHeight; indtmp1 = indtmp2; indtmp2 = indtmp1/outputChannels; const int c = indtmp1 - indtmp2 * outputChannels; const int n = indtmp2; int inputChannel = c; int inputChannels = outputChannels; if (depthwiseMultiplier !=1) { inputChannel /= depthwiseMultiplier; inputChannels /= depthwiseMultiplier; } int weightOffset = c * kernelHeight * kernelWidth; AccT value = biasEnabled ? ScalarConvert<T, AccT>::to(bias.data()[c]) : ScalarConvert<int, AccT>::to(0); const IndexType offset0 = (n * inputChannels + inputChannel) * inputHeight * inputWidth; #ifndef __HIP_PLATFORM_HCC__ #pragma unroll #endif for (int kH = 0; kH < KH_LIMIT; ++kH) { #ifndef __HIP_PLATFORM_HCC__ #pragma unroll #endif for (int kW = 0; kW < KW_LIMIT; ++kW) { const int h_in = -padHeight + h * strideHeight + kH * dilationHeight; const int w_in = -padWidth + w * strideWidth + kW * dilationWidth; if ((h_in >= 0) && (h_in < inputHeight) && (w_in >= 0) && (w_in < inputWidth)) { const IndexType offset = offset0 + h_in * inputWidth + w_in; value = THCNumerics<AccT>::add( value, THCNumerics<AccT>::mul( ScalarConvert<T, AccT>::to(weight.data()[weightOffset]), ScalarConvert<T, AccT>::to(input.data()[offset]))); } ++weightOffset; } } output.data()[linearIndex] = ScalarConvert<AccT, T>::to(value); } } template <typename T, typename AccT, typename IndexType, int kSize, int stride> __global__ void spatialDepthwiseConvolutionUpdateGradInput( const THCDeviceTensor<T, 4> gradOutput, THCDeviceTensor<T, 4> gradInput, const THCDeviceTensor<T, 4> weight, IndexType totalElements, const int inputChannels, const int depthwiseMultiplier, const int outputChannels, const int inputWidth, const int inputHeight, const int outputWidth, const int outputHeight, const int kernelWidth, const int kernelHeight, const int strideWidth, const int strideHeight, const int padWidth, const int padHeight, const int dilationWidth, const int dilationHeight) { const int KW_LIMIT = (kSize !=0) ? kSize : kernelWidth; const int KH_LIMIT = (kSize !=0) ? kSize : kernelHeight; const int strideW = (stride !=0) ? stride : strideWidth; const int strideH = (stride !=0) ? stride : strideHeight; for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalElements; linearIndex += gridDim.x * blockDim.x) { int indtmp1 = linearIndex/inputWidth; const int w = linearIndex - indtmp1 * inputWidth; int indtmp2 = indtmp1/inputHeight; const int h = indtmp1 - indtmp2 * inputHeight; indtmp1 = indtmp2; indtmp2 = indtmp1/inputChannels; const int c = indtmp1 - indtmp2 * inputChannels; const int n = indtmp2; AccT value = ScalarConvert<int, AccT>::to(0); #ifndef __HIP_PLATFORM_HCC__ #pragma unroll #endif for (int multiplier = 0; multiplier < depthwiseMultiplier; ++multiplier) { int och = (c * depthwiseMultiplier) + multiplier; int weightOffset = och * kernelHeight * kernelWidth; #ifndef __HIP_PLATFORM_HCC__ #pragma unroll #endif for (int kh = 0; kh < KH_LIMIT; ++kh) { #ifdef __HIP_PLATFORM_HCC__ #pragma unroll #endif for (int kw = 0; kw < KW_LIMIT; ++kw) { int h_out = h + padHeight - kh * dilationHeight; int w_out = w + padWidth - kw * dilationWidth; if ((h_out % strideH == 0) && (w_out % strideW == 0)) { h_out = h_out / strideH; w_out = w_out / strideW; if ((h_out >= 0) && (h_out < outputHeight) && (w_out >= 0) && (w_out < outputWidth)) { const int offset = ((n * outputChannels + och) * outputHeight + h_out) * outputWidth + w_out; value = THCNumerics<AccT>::add( value, THCNumerics<AccT>::mul( ScalarConvert<T, AccT>::to(weight.data()[weightOffset]), ScalarConvert<T, AccT>::to(gradOutput.data()[offset]))); } } ++weightOffset; } } } gradInput.data()[linearIndex] = ScalarConvert<AccT, T>::to(value); } } template <typename T, typename AccT, typename IndexType> __global__ void spatialDepthwiseConvolutionAccGradParameters( const THCDeviceTensor<T, 4> gradOutput, const THCDeviceTensor<T, 4> input, THCDeviceTensor<T, 4> gradWeight, const int batchSize, const int inputChannels, const int kernelChannels, const int depthwiseMultiplier, const int inputWidth, const int inputHeight, const int outputWidth, const int outputHeight, const int kernelWidth, const int kernelHeight, const int strideWidth, const int strideHeight, const int padWidth, const int padHeight, const int dilationWidth, const int dilationHeight) { const int channelStride = kernelWidth * kernelHeight; // Have to use a statically typed Shared Memory pointer SharedMem<AccT> smem; // Each Block is responsible for accumulating over a permutation of // (channels x kH x kW), use blockIdx to determine which one int bidx = blockIdx.x; int kW = bidx % kernelWidth; int kH = (bidx / kernelWidth) % kernelHeight; int ch = (bidx / channelStride); // Need to calculate which input channel is associated with this filter // channel int inputCh = ch / depthwiseMultiplier; AccT grad = ScalarConvert<float, AccT>::to(0.0); const int laneId = threadIdx.x % WARP_SIZE; const int batch = threadIdx.x / WARP_SIZE; const int nwarps = blockDim.x / WARP_SIZE; const int imageElements = outputWidth * outputHeight; // Use warp per item. In the original kernel, a threadblock was used to sum over NHW. // Here, we use a warp to sum values over HW dimension, and if batchSize is larger than the // number of warps, a warp would loop over remaining batch items (e.g. if there are 8 warps, // warp 0 would go over 0-8-16 etc image, warp 1 over 1-9-17 etc). Later in blockReduce, // all the warps will be reduced anyway, thus the full reduction will be over NHW, like it // should be. That allows to get rid of one modulo operation inside the loop (because n/batchIdx // now does not have to be computed through modulo, you are just looping over it), and // bring a nice speed-up. for (int batchIdx = batch; batchIdx < batchSize; batchIdx += nwarps){ // Warp-stride loop over elements in a batch item for (IndexType idx = laneId; idx < imageElements; idx += WARP_SIZE) { // Need to calculate the following: batch position, and offset into the gradOutput // in height, and width. We can intuit the corresponding position in the input from // the other parameters we have int go_w_offset = idx % outputWidth; int go_h_offset = (idx / outputWidth); int i_w_offset = (go_w_offset * strideWidth) + (kW * dilationWidth) - padWidth; int i_h_offset = (go_h_offset * strideHeight) + (kH * dilationHeight) - padHeight; if (i_w_offset >= 0 && i_h_offset >= 0 && i_w_offset < inputWidth && i_h_offset < inputHeight) { int inputOffset = ((batchIdx * inputChannels + inputCh) * inputHeight + i_h_offset) * inputWidth + i_w_offset; int outputOffset = ((batchIdx * kernelChannels + ch) * outputHeight ) * outputWidth + idx; grad = THCNumerics<AccT>::add( grad, THCNumerics<AccT>::mul( ScalarConvert<T, AccT>::to(input.data()[inputOffset]), ScalarConvert<T, AccT>::to(gradOutput.data()[outputOffset]))); } } } __syncthreads(); // At this point each thread in the block has a local gradient, which we need to // accumulate prior to writing the global value AccT *buf = smem.getPointer(); AccT tval = reduceBlock<AccT, ReduceAdd<AccT>>( buf, blockDim.x, grad, ReduceAdd<AccT>(), ScalarConvert<float, AccT>::to(0)); // After reduction, first thread in the block has the gradient, so its responsible // for writing it to gradWeight if (threadIdx.x == 0) { int weightOffset = kW + (kernelWidth * kH) + (kernelWidth * kernelHeight * ch); gradWeight.data()[weightOffset] = ScalarConvert<AccT, T>::to(tval); } } #include <THHUNN/generic/SpatialDepthwiseConvolution.hip> #include <THH/THHGenerateFloatTypes.h>
14221875664d51e55090180584998d81f47dc4d4.cu
// updateOutput, updateGradInput Kernels ported from Sergey Zagoruyko's pyinn, which itself was a // port from Caffe #include <THCUNN/THCUNN.h> #include <THC/THCTensor.hpp> #include <THC/THCDeviceTensor.cuh> #include <THC/THCDeviceTensorUtils.cuh> #include <THC/THCNumerics.cuh> #include <THC/THCReduceApplyUtils.cuh> #include <THC/THCSortUtils.cuh> #include <THC/THCTensorMathReduce.cuh> #include <THCUNN/SharedMem.cuh> #include <THCUNN/common.h> #include <algorithm> const int WARP_SIZE = 32; // Crude benchmarks suggest 256 is better than 512 and 1024 // TODO: Autotune/use better heuristics, improve speed more. const int MAX_BLOCK_SIZE = 256; static int getGradParamsNumThreads(int batchSize){ //warp per item in a batch, up to a maximum return std::min(batchSize * WARP_SIZE, MAX_BLOCK_SIZE); } template <typename T, typename AccT, typename IndexType, int kSize> __global__ void spatialDepthwiseConvolutionUpdateOutput( const THCDeviceTensor<T, 4> input, THCDeviceTensor<T, 4> output, const THCDeviceTensor<T, 4> weight, const THCDeviceTensor<T, 1> bias, bool biasEnabled, IndexType totalElements, const int outputChannels, const int depthwiseMultiplier, const int inputWidth, const int inputHeight, const int outputWidth, const int outputHeight, const int kernelWidth, const int kernelHeight, const int strideWidth, const int strideHeight, const int padWidth, const int padHeight, const int dilationWidth, const int dilationHeight) { const int KW_LIMIT = (kSize !=0) ? kSize : kernelWidth; const int KH_LIMIT = (kSize !=0) ? kSize : kernelHeight; for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalElements; linearIndex += gridDim.x * blockDim.x) { //calculate n,c,h,w indices, replacing modulos by divide and multiply add, //result is same as would be in the code below //const int n = linearIndex / batchStride; //batchStride = outputChannels * outputHeight * outputWidth //const int c = (linearIndex / channelStride) % outputChannels; //channelStride = outputHeight * outputWidth //const int h = (linearIndex / outputWidth) % outputHeight; //const int w = linearIndex % outputWidth; int indtmp1 = linearIndex/outputWidth; const int w = linearIndex - indtmp1 * outputWidth; int indtmp2 = indtmp1/outputHeight; const int h = indtmp1 - indtmp2 * outputHeight; indtmp1 = indtmp2; indtmp2 = indtmp1/outputChannels; const int c = indtmp1 - indtmp2 * outputChannels; const int n = indtmp2; int inputChannel = c; int inputChannels = outputChannels; if (depthwiseMultiplier !=1) { inputChannel /= depthwiseMultiplier; inputChannels /= depthwiseMultiplier; } int weightOffset = c * kernelHeight * kernelWidth; AccT value = biasEnabled ? ScalarConvert<T, AccT>::to(bias.data()[c]) : ScalarConvert<int, AccT>::to(0); const IndexType offset0 = (n * inputChannels + inputChannel) * inputHeight * inputWidth; #ifndef __HIP_PLATFORM_HCC__ #pragma unroll #endif for (int kH = 0; kH < KH_LIMIT; ++kH) { #ifndef __HIP_PLATFORM_HCC__ #pragma unroll #endif for (int kW = 0; kW < KW_LIMIT; ++kW) { const int h_in = -padHeight + h * strideHeight + kH * dilationHeight; const int w_in = -padWidth + w * strideWidth + kW * dilationWidth; if ((h_in >= 0) && (h_in < inputHeight) && (w_in >= 0) && (w_in < inputWidth)) { const IndexType offset = offset0 + h_in * inputWidth + w_in; value = THCNumerics<AccT>::add( value, THCNumerics<AccT>::mul( ScalarConvert<T, AccT>::to(weight.data()[weightOffset]), ScalarConvert<T, AccT>::to(input.data()[offset]))); } ++weightOffset; } } output.data()[linearIndex] = ScalarConvert<AccT, T>::to(value); } } template <typename T, typename AccT, typename IndexType, int kSize, int stride> __global__ void spatialDepthwiseConvolutionUpdateGradInput( const THCDeviceTensor<T, 4> gradOutput, THCDeviceTensor<T, 4> gradInput, const THCDeviceTensor<T, 4> weight, IndexType totalElements, const int inputChannels, const int depthwiseMultiplier, const int outputChannels, const int inputWidth, const int inputHeight, const int outputWidth, const int outputHeight, const int kernelWidth, const int kernelHeight, const int strideWidth, const int strideHeight, const int padWidth, const int padHeight, const int dilationWidth, const int dilationHeight) { const int KW_LIMIT = (kSize !=0) ? kSize : kernelWidth; const int KH_LIMIT = (kSize !=0) ? kSize : kernelHeight; const int strideW = (stride !=0) ? stride : strideWidth; const int strideH = (stride !=0) ? stride : strideHeight; for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalElements; linearIndex += gridDim.x * blockDim.x) { int indtmp1 = linearIndex/inputWidth; const int w = linearIndex - indtmp1 * inputWidth; int indtmp2 = indtmp1/inputHeight; const int h = indtmp1 - indtmp2 * inputHeight; indtmp1 = indtmp2; indtmp2 = indtmp1/inputChannels; const int c = indtmp1 - indtmp2 * inputChannels; const int n = indtmp2; AccT value = ScalarConvert<int, AccT>::to(0); #ifndef __HIP_PLATFORM_HCC__ #pragma unroll #endif for (int multiplier = 0; multiplier < depthwiseMultiplier; ++multiplier) { int och = (c * depthwiseMultiplier) + multiplier; int weightOffset = och * kernelHeight * kernelWidth; #ifndef __HIP_PLATFORM_HCC__ #pragma unroll #endif for (int kh = 0; kh < KH_LIMIT; ++kh) { #ifdef __HIP_PLATFORM_HCC__ #pragma unroll #endif for (int kw = 0; kw < KW_LIMIT; ++kw) { int h_out = h + padHeight - kh * dilationHeight; int w_out = w + padWidth - kw * dilationWidth; if ((h_out % strideH == 0) && (w_out % strideW == 0)) { h_out = h_out / strideH; w_out = w_out / strideW; if ((h_out >= 0) && (h_out < outputHeight) && (w_out >= 0) && (w_out < outputWidth)) { const int offset = ((n * outputChannels + och) * outputHeight + h_out) * outputWidth + w_out; value = THCNumerics<AccT>::add( value, THCNumerics<AccT>::mul( ScalarConvert<T, AccT>::to(weight.data()[weightOffset]), ScalarConvert<T, AccT>::to(gradOutput.data()[offset]))); } } ++weightOffset; } } } gradInput.data()[linearIndex] = ScalarConvert<AccT, T>::to(value); } } template <typename T, typename AccT, typename IndexType> __global__ void spatialDepthwiseConvolutionAccGradParameters( const THCDeviceTensor<T, 4> gradOutput, const THCDeviceTensor<T, 4> input, THCDeviceTensor<T, 4> gradWeight, const int batchSize, const int inputChannels, const int kernelChannels, const int depthwiseMultiplier, const int inputWidth, const int inputHeight, const int outputWidth, const int outputHeight, const int kernelWidth, const int kernelHeight, const int strideWidth, const int strideHeight, const int padWidth, const int padHeight, const int dilationWidth, const int dilationHeight) { const int channelStride = kernelWidth * kernelHeight; // Have to use a statically typed Shared Memory pointer SharedMem<AccT> smem; // Each Block is responsible for accumulating over a permutation of // (channels x kH x kW), use blockIdx to determine which one int bidx = blockIdx.x; int kW = bidx % kernelWidth; int kH = (bidx / kernelWidth) % kernelHeight; int ch = (bidx / channelStride); // Need to calculate which input channel is associated with this filter // channel int inputCh = ch / depthwiseMultiplier; AccT grad = ScalarConvert<float, AccT>::to(0.0); const int laneId = threadIdx.x % WARP_SIZE; const int batch = threadIdx.x / WARP_SIZE; const int nwarps = blockDim.x / WARP_SIZE; const int imageElements = outputWidth * outputHeight; // Use warp per item. In the original kernel, a threadblock was used to sum over NHW. // Here, we use a warp to sum values over HW dimension, and if batchSize is larger than the // number of warps, a warp would loop over remaining batch items (e.g. if there are 8 warps, // warp 0 would go over 0-8-16 etc image, warp 1 over 1-9-17 etc). Later in blockReduce, // all the warps will be reduced anyway, thus the full reduction will be over NHW, like it // should be. That allows to get rid of one modulo operation inside the loop (because n/batchIdx // now does not have to be computed through modulo, you are just looping over it), and // bring a nice speed-up. for (int batchIdx = batch; batchIdx < batchSize; batchIdx += nwarps){ // Warp-stride loop over elements in a batch item for (IndexType idx = laneId; idx < imageElements; idx += WARP_SIZE) { // Need to calculate the following: batch position, and offset into the gradOutput // in height, and width. We can intuit the corresponding position in the input from // the other parameters we have int go_w_offset = idx % outputWidth; int go_h_offset = (idx / outputWidth); int i_w_offset = (go_w_offset * strideWidth) + (kW * dilationWidth) - padWidth; int i_h_offset = (go_h_offset * strideHeight) + (kH * dilationHeight) - padHeight; if (i_w_offset >= 0 && i_h_offset >= 0 && i_w_offset < inputWidth && i_h_offset < inputHeight) { int inputOffset = ((batchIdx * inputChannels + inputCh) * inputHeight + i_h_offset) * inputWidth + i_w_offset; int outputOffset = ((batchIdx * kernelChannels + ch) * outputHeight ) * outputWidth + idx; grad = THCNumerics<AccT>::add( grad, THCNumerics<AccT>::mul( ScalarConvert<T, AccT>::to(input.data()[inputOffset]), ScalarConvert<T, AccT>::to(gradOutput.data()[outputOffset]))); } } } __syncthreads(); // At this point each thread in the block has a local gradient, which we need to // accumulate prior to writing the global value AccT *buf = smem.getPointer(); AccT tval = reduceBlock<AccT, ReduceAdd<AccT>>( buf, blockDim.x, grad, ReduceAdd<AccT>(), ScalarConvert<float, AccT>::to(0)); // After reduction, first thread in the block has the gradient, so its responsible // for writing it to gradWeight if (threadIdx.x == 0) { int weightOffset = kW + (kernelWidth * kH) + (kernelWidth * kernelHeight * ch); gradWeight.data()[weightOffset] = ScalarConvert<AccT, T>::to(tval); } } #include <THCUNN/generic/SpatialDepthwiseConvolution.cu> #include <THC/THCGenerateFloatTypes.h>
e213e0387f998f61c544768244e28b3001a9deb5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 20.04.2018 // #include<ops/declarable/helpers/transforms.h> #include <array/ResultSet.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <array/NDArrayFactory.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// // x - input, y - paddings, z - output template<typename X, typename Y> __global__ static void padCuda(const int mode, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const void *vPadVal) { const X padVal = *reinterpret_cast<const X*>(vPadVal); const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Y*>(vy); auto z = reinterpret_cast<X*>(vz); __shared__ int rank, rankMinusOne; __shared__ Nd4jLong zLen, totalThreads, *coords, *xShape, *zShape, shift1, shift2, yStride0; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; coords = reinterpret_cast<Nd4jLong*>(shmem); zLen = shape::length(zShapeInfo); xShape = shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)); zShape = shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)); yStride0 = shape::stride(const_cast<Nd4jLong*>(yShapeInfo))[0]; rank = shape::rank(xShapeInfo); zLen = shape::length(zShapeInfo); rankMinusOne = rank - 1; totalThreads = gridDim.x * blockDim.x; shift1 = mode == 1 ? 0 : 1; // REFLECT : SYMMETRIC shift2 = mode == 1 ? 2 : 1; // REFLECT : SYMMETRIC } __syncthreads(); auto xzCoord = coords + threadIdx.x * rank; // we use xzCoord storage both for x and z arrays const auto tid = blockIdx.x * blockDim.x + threadIdx.x; if(mode == 0) { // CONSTANT case for (Nd4jLong i = tid; i < zLen; i += totalThreads) { shape::index2coords(i, zShapeInfo, xzCoord); const auto zOffset = shape::getOffset(zShapeInfo, xzCoord); bool within = true; for(int j = rankMinusOne; j >= 0; --j) { if(xShape[j] == zShape[j]) continue; const auto left = y[shape::getIndexOffset(yStride0 * j, yShapeInfo)]; if(xzCoord[j] < left || xzCoord[j] >= left + xShape[j]) {within = false; break;} else {xzCoord[j] = xzCoord[j] - left;} } if(within) z[zOffset] = x[shape::getOffset(xShapeInfo, xzCoord)]; else z[zOffset] = padVal; } } else { // REFLECT and SYMMETRIC cases for (Nd4jLong i = tid; i < zLen; i += totalThreads) { shape::index2coords(i, zShapeInfo, xzCoord); const auto zOffset = shape::getOffset(zShapeInfo, xzCoord); for(int j = rankMinusOne; j >= 0; --j) { if(xShape[j] == zShape[j]) continue; xzCoord[j] = xzCoord[j] - y[shape::getIndexOffset(yStride0 * j, yShapeInfo)]; // are ready to fill middle (within input dimension range) if(xzCoord[j] < 0) xzCoord[j] = -xzCoord[j] - shift1; // means fill from left else if(xzCoord[j] >= xShape[j]) xzCoord[j] = 2 * xShape[j] - xzCoord[j] - shift2; // means fill from right } const auto xOffset = shape::getOffset(xShapeInfo, xzCoord); z[zOffset] = x[xOffset]; } } } /////////////////////////////////////////////////////////////////// template<typename X, typename Y> static void padCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const int mode, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const void* padVal) { hipLaunchKernelGGL(( padCuda<X,Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, mode, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, padVal); } /////////////////////////////////////////////////////////////////// void pad(sd::LaunchContext * context, const int mode, const NDArray& input, const NDArray& paddings, NDArray& output, const NDArray& padValue) { PointersManager manager(context, "pad"); NDArray::prepareSpecialUse({&output}, {&input, &paddings, &padValue}); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = 8 * threadsPerBlock * output.rankOf() + 128; const auto xType = input.dataType(); const auto yType = paddings.dataType(); BUILD_DOUBLE_SELECTOR(xType, yType, padCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), mode, input.getSpecialBuffer(), input.getSpecialShapeInfo(), paddings.getSpecialBuffer(), paddings.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), padValue.getSpecialBuffer()), LIBND4J_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({&output}, {&input, &paddings, &padValue}); manager.synchronize(); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void mirrorPadLinearKernel(void const* vx, Nd4jLong* xShape, void* vz, Nd4jLong* zShape, Nd4jLong leftSide, Nd4jLong leftSideCorrected, Nd4jLong xLen, Nd4jLong len, Nd4jLong zLen) { __shared__ T const* x; __shared__ T* z; if (threadIdx.x == 0) { x = reinterpret_cast<T const*>(vx); z = reinterpret_cast<T*>(vz); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for(int i = start; i < zLen; i+= step) { auto zIndex = shape::getIndexOffset(i, zShape); auto xIndex = shape::getIndexOffset(len - i, xShape); if (i < leftSide) // left side xIndex = shape::getIndexOffset(leftSideCorrected - i, xShape); else if(i >= leftSide && i < leftSide + xLen) // middle xIndex = shape::getIndexOffset(i - leftSide, xShape); // else // right side // z[i] = x[len - i]; z[zIndex] = x[xIndex]; } } template <typename F, typename I> static __global__ void mirrorPadKernel(void const* vx, Nd4jLong* xShape, void* vz, Nd4jLong* zShape, Nd4jLong outLen, void const* paddings, Nd4jLong* paddingShape, int reflBorder) { __shared__ F const* x; __shared__ I const* pads; __shared__ F* z; __shared__ Nd4jLong zRank, rank; __shared__ Nd4jLong* xIdx; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; xIdx = reinterpret_cast<Nd4jLong*>(shmem); rank = shape::rank(xShape); x = reinterpret_cast<F const*>(vx);// pads = reinterpret_cast<I const*>(paddings); z = reinterpret_cast<F*>(vz); } __syncthreads(); auto start = threadIdx.x + blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for(Nd4jLong i = start; i < outLen; i+= step) { auto xzCoord = xIdx + threadIdx.x * rank; //auto zxCoord = xIdx + (threadIdx.x + threadIdx.x % 2 + 1) * rank; shape::index2coords(i, zShape, xzCoord); auto outOffset = shape::getOffset(zShape, xzCoord); // auto intStep = blockDim.y * gridDim.y; for(int j = 0; j < rank; j++) { const Nd4jLong inLen = shape::sizeAt(xShape, j); Nd4jLong coords[2] = {j, 0}; auto padOffset = shape::getOffset(paddingShape, coords); // padding already has rank 2 const auto leftSide = pads[padOffset]; const auto leftSideCorrected = leftSide - reflBorder; const Nd4jLong len = 2 * (inLen - 1) + leftSide + reflBorder; if(xzCoord[j] < leftSide) // left side xzCoord[j] = leftSideCorrected - xzCoord[j]; else if(xzCoord[j] >= leftSide && xzCoord[j] < leftSide + inLen) // middle xzCoord[j] = xzCoord[j] - leftSide; else if (len > xzCoord[j]) // right side xzCoord[j] = len - xzCoord[j]; else xzCoord[j] = xzCoord[j] - len; } auto inOffset = shape::getOffset(xShape, xzCoord); z[outOffset] = x[inOffset]; } } template<typename F, typename I> static void mirrorPad_(sd::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode) { // mode: 0 - REFLECT, else - SYMMETRIC const int reflBorder = (bool)mode ? 1 : 0; const int rank = input.rankOf(); const Nd4jLong outLen = output.lengthOf(); auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({&output}, {&input, &paddings}); if(rank <= 1) { const Nd4jLong inLen = input.lengthOf(); const auto leftSide = paddings.e<Nd4jLong>(0); const auto leftSideCorrected = leftSide - reflBorder; const Nd4jLong len = 2*(inLen-1) + leftSide + reflBorder; hipLaunchKernelGGL(( mirrorPadLinearKernel<F>), dim3(256), dim3(512), 256, *stream, input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), leftSide, leftSideCorrected, inLen, len, outLen); sd::DebugHelper::checkErrorCode(stream, "helpers::mirrorPadLinearKernel(...) failed"); } else { hipLaunchKernelGGL(( mirrorPadKernel<F, I>), dim3(256), dim3(256), 8192, *stream, input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), outLen, paddings.getSpecialBuffer(), paddings.getSpecialShapeInfo(), reflBorder); sd::DebugHelper::checkErrorCode(stream, "helpers::mirrorPadKernel(...) failed"); } NDArray::registerSpecialUse({&output}, {&input, &paddings}); } void mirrorPad(sd::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode) { BUILD_DOUBLE_SELECTOR(input.dataType(), paddings.dataType(), mirrorPad_, (context, input, paddings, output, mode), LIBND4J_TYPES, INDEXING_TYPES); } } } }
e213e0387f998f61c544768244e28b3001a9deb5.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 20.04.2018 // #include<ops/declarable/helpers/transforms.h> #include <array/ResultSet.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <array/NDArrayFactory.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// // x - input, y - paddings, z - output template<typename X, typename Y> __global__ static void padCuda(const int mode, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const void *vPadVal) { const X padVal = *reinterpret_cast<const X*>(vPadVal); const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Y*>(vy); auto z = reinterpret_cast<X*>(vz); __shared__ int rank, rankMinusOne; __shared__ Nd4jLong zLen, totalThreads, *coords, *xShape, *zShape, shift1, shift2, yStride0; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; coords = reinterpret_cast<Nd4jLong*>(shmem); zLen = shape::length(zShapeInfo); xShape = shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)); zShape = shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)); yStride0 = shape::stride(const_cast<Nd4jLong*>(yShapeInfo))[0]; rank = shape::rank(xShapeInfo); zLen = shape::length(zShapeInfo); rankMinusOne = rank - 1; totalThreads = gridDim.x * blockDim.x; shift1 = mode == 1 ? 0 : 1; // REFLECT : SYMMETRIC shift2 = mode == 1 ? 2 : 1; // REFLECT : SYMMETRIC } __syncthreads(); auto xzCoord = coords + threadIdx.x * rank; // we use xzCoord storage both for x and z arrays const auto tid = blockIdx.x * blockDim.x + threadIdx.x; if(mode == 0) { // CONSTANT case for (Nd4jLong i = tid; i < zLen; i += totalThreads) { shape::index2coords(i, zShapeInfo, xzCoord); const auto zOffset = shape::getOffset(zShapeInfo, xzCoord); bool within = true; for(int j = rankMinusOne; j >= 0; --j) { if(xShape[j] == zShape[j]) continue; const auto left = y[shape::getIndexOffset(yStride0 * j, yShapeInfo)]; if(xzCoord[j] < left || xzCoord[j] >= left + xShape[j]) {within = false; break;} else {xzCoord[j] = xzCoord[j] - left;} } if(within) z[zOffset] = x[shape::getOffset(xShapeInfo, xzCoord)]; else z[zOffset] = padVal; } } else { // REFLECT and SYMMETRIC cases for (Nd4jLong i = tid; i < zLen; i += totalThreads) { shape::index2coords(i, zShapeInfo, xzCoord); const auto zOffset = shape::getOffset(zShapeInfo, xzCoord); for(int j = rankMinusOne; j >= 0; --j) { if(xShape[j] == zShape[j]) continue; xzCoord[j] = xzCoord[j] - y[shape::getIndexOffset(yStride0 * j, yShapeInfo)]; // are ready to fill middle (within input dimension range) if(xzCoord[j] < 0) xzCoord[j] = -xzCoord[j] - shift1; // means fill from left else if(xzCoord[j] >= xShape[j]) xzCoord[j] = 2 * xShape[j] - xzCoord[j] - shift2; // means fill from right } const auto xOffset = shape::getOffset(xShapeInfo, xzCoord); z[zOffset] = x[xOffset]; } } } /////////////////////////////////////////////////////////////////// template<typename X, typename Y> static void padCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const int mode, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const void* padVal) { padCuda<X,Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(mode, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, padVal); } /////////////////////////////////////////////////////////////////// void pad(sd::LaunchContext * context, const int mode, const NDArray& input, const NDArray& paddings, NDArray& output, const NDArray& padValue) { PointersManager manager(context, "pad"); NDArray::prepareSpecialUse({&output}, {&input, &paddings, &padValue}); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = 8 * threadsPerBlock * output.rankOf() + 128; const auto xType = input.dataType(); const auto yType = paddings.dataType(); BUILD_DOUBLE_SELECTOR(xType, yType, padCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), mode, input.getSpecialBuffer(), input.getSpecialShapeInfo(), paddings.getSpecialBuffer(), paddings.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), padValue.getSpecialBuffer()), LIBND4J_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({&output}, {&input, &paddings, &padValue}); manager.synchronize(); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void mirrorPadLinearKernel(void const* vx, Nd4jLong* xShape, void* vz, Nd4jLong* zShape, Nd4jLong leftSide, Nd4jLong leftSideCorrected, Nd4jLong xLen, Nd4jLong len, Nd4jLong zLen) { __shared__ T const* x; __shared__ T* z; if (threadIdx.x == 0) { x = reinterpret_cast<T const*>(vx); z = reinterpret_cast<T*>(vz); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for(int i = start; i < zLen; i+= step) { auto zIndex = shape::getIndexOffset(i, zShape); auto xIndex = shape::getIndexOffset(len - i, xShape); if (i < leftSide) // left side xIndex = shape::getIndexOffset(leftSideCorrected - i, xShape); else if(i >= leftSide && i < leftSide + xLen) // middle xIndex = shape::getIndexOffset(i - leftSide, xShape); // else // right side // z[i] = x[len - i]; z[zIndex] = x[xIndex]; } } template <typename F, typename I> static __global__ void mirrorPadKernel(void const* vx, Nd4jLong* xShape, void* vz, Nd4jLong* zShape, Nd4jLong outLen, void const* paddings, Nd4jLong* paddingShape, int reflBorder) { __shared__ F const* x; __shared__ I const* pads; __shared__ F* z; __shared__ Nd4jLong zRank, rank; __shared__ Nd4jLong* xIdx; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; xIdx = reinterpret_cast<Nd4jLong*>(shmem); rank = shape::rank(xShape); x = reinterpret_cast<F const*>(vx);// pads = reinterpret_cast<I const*>(paddings); z = reinterpret_cast<F*>(vz); } __syncthreads(); auto start = threadIdx.x + blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for(Nd4jLong i = start; i < outLen; i+= step) { auto xzCoord = xIdx + threadIdx.x * rank; //auto zxCoord = xIdx + (threadIdx.x + threadIdx.x % 2 + 1) * rank; shape::index2coords(i, zShape, xzCoord); auto outOffset = shape::getOffset(zShape, xzCoord); // auto intStep = blockDim.y * gridDim.y; for(int j = 0; j < rank; j++) { const Nd4jLong inLen = shape::sizeAt(xShape, j); Nd4jLong coords[2] = {j, 0}; auto padOffset = shape::getOffset(paddingShape, coords); // padding already has rank 2 const auto leftSide = pads[padOffset]; const auto leftSideCorrected = leftSide - reflBorder; const Nd4jLong len = 2 * (inLen - 1) + leftSide + reflBorder; if(xzCoord[j] < leftSide) // left side xzCoord[j] = leftSideCorrected - xzCoord[j]; else if(xzCoord[j] >= leftSide && xzCoord[j] < leftSide + inLen) // middle xzCoord[j] = xzCoord[j] - leftSide; else if (len > xzCoord[j]) // right side xzCoord[j] = len - xzCoord[j]; else xzCoord[j] = xzCoord[j] - len; } auto inOffset = shape::getOffset(xShape, xzCoord); z[outOffset] = x[inOffset]; } } template<typename F, typename I> static void mirrorPad_(sd::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode) { // mode: 0 - REFLECT, else - SYMMETRIC const int reflBorder = (bool)mode ? 1 : 0; const int rank = input.rankOf(); const Nd4jLong outLen = output.lengthOf(); auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({&output}, {&input, &paddings}); if(rank <= 1) { const Nd4jLong inLen = input.lengthOf(); const auto leftSide = paddings.e<Nd4jLong>(0); const auto leftSideCorrected = leftSide - reflBorder; const Nd4jLong len = 2*(inLen-1) + leftSide + reflBorder; mirrorPadLinearKernel<F><<<256, 512, 256, *stream>>>(input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), leftSide, leftSideCorrected, inLen, len, outLen); sd::DebugHelper::checkErrorCode(stream, "helpers::mirrorPadLinearKernel(...) failed"); } else { mirrorPadKernel<F, I><<<256, 256, 8192, *stream>>>(input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), outLen, paddings.getSpecialBuffer(), paddings.getSpecialShapeInfo(), reflBorder); sd::DebugHelper::checkErrorCode(stream, "helpers::mirrorPadKernel(...) failed"); } NDArray::registerSpecialUse({&output}, {&input, &paddings}); } void mirrorPad(sd::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode) { BUILD_DOUBLE_SELECTOR(input.dataType(), paddings.dataType(), mirrorPad_, (context, input, paddings, output, mode), LIBND4J_TYPES, INDEXING_TYPES); } } } }
1ac687592f9226d2e58c2cf08771aef3b6d432eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" /* * NOTICE: numThreads = numVectors. * Each thread is a vector. computes the distances between his vector and all clusters. * devDistsVectorsToClusters[numThreads * numClusters]. * EXAMPLE: devDistsVectorsToClusters[0]-[numClusters] contains all distances from v1 to all clusters */ __global__ void computeDistancesArray(double *devVectors, double *devClusters, int numOfVectors, int numThreadsInBlock, int numOfDims, double *devDistsVectorsToClusters) { int i, blockID = blockIdx.x; double result = 0; if (blockID == gridDim.x - 1 && numOfVectors % blockDim.x <= threadIdx.x) return; for (i = 0; i < numOfDims; ++i) { result += (devVectors[(blockID*numThreadsInBlock + threadIdx.x)*numOfDims + i] - devClusters[threadIdx.y*numOfDims + i]) * (devVectors[(blockID*numThreadsInBlock + threadIdx.x)*numOfDims + i] - devClusters[threadIdx.y*numOfDims + i]); } devDistsVectorsToClusters[numOfVectors*threadIdx.y + (blockID*numThreadsInBlock + threadIdx.x)] = result; } /* * NOTICE: numThreads = numVectors. * Each thread is a vector. Traverses devDistsVectorsToClusters[] and finds the min distance. * Writes it to devVectorIndexOfCluster[numVectors] */ __global__ void findMinDistanceForEachVectorFromCluster(int numOfVectors, int numOfClusters, int numThreadsInBlock, double *devDistsVectorsToClusters, int *devVectorIndexOfCluster) { int i, xid = threadIdx.x, blockId = blockIdx.x; double minIndex = 0, minDistance, tempDistance; if (blockIdx.x == gridDim.x - 1 && numOfVectors % blockDim.x <= xid) return; minDistance = devDistsVectorsToClusters[numThreadsInBlock*blockId + xid]; for (i = 1; i < numOfClusters; i++) { tempDistance = devDistsVectorsToClusters[numThreadsInBlock*blockId + xid + i*numOfVectors]; if (minDistance > tempDistance) { minIndex = i; minDistance = tempDistance; } } devVectorIndexOfCluster[numThreadsInBlock*blockId + xid] = minIndex; } hipError_t computeClustersMeansWithCUDA(double *devVectors, double **clusters, int numOfVectors, int numOfClusters, int numOfDims, int *vectorIndexOfCluster) { hipError_t cudaStatus; hipDeviceProp_t devProp; int maxThreadsPerBlock, maxGridSize[3]; int numBlocks, numThreadsInBlock; hipGetDeviceProperties(&devProp, 0); // 0 is device 0 for (int i = 0; i < 3; ++i) maxGridSize[i] = devProp.maxGridSize[i]; //configuring kerenl params numThreadsInBlock = devProp.maxThreadsPerBlock / numOfClusters; dim3 dim(numThreadsInBlock, numOfClusters); numBlocks = numOfVectors / numThreadsInBlock; if (numOfVectors % numThreadsInBlock > 0) { numBlocks++; } double *devClusters; double *devDistsVectorsToClusters = 0; int *devVectorIndexOfCluster = 0; // Allocate GPU buffers for three vectors (two input, one output) cudaStatus = hipMalloc((void**)&devClusters, numOfClusters*numOfDims * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&devDistsVectorsToClusters, numOfClusters*numOfVectors * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&devVectorIndexOfCluster, numOfVectors * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input from host memory to GPU buffers. cudaStatus = hipMemcpy(devClusters, clusters[0], numOfClusters*numOfDims * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } //launch kernels// computeDistancesArray << <numBlocks, dim >> > (devVectors, devClusters, numOfVectors, numThreadsInBlock, numOfDims, devDistsVectorsToClusters); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } /* hipDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch*/ cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } numThreadsInBlock = devProp.maxThreadsPerBlock; numBlocks = numOfVectors / numThreadsInBlock; if (numOfVectors % numThreadsInBlock > 0) { numBlocks++; } findMinDistanceForEachVectorFromCluster << <numBlocks, numThreadsInBlock >> > (numOfVectors, numOfClusters, numThreadsInBlock, devDistsVectorsToClusters, devVectorIndexOfCluster); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } /* hipDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch*/ cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output from GPU buffer to host memory. cudaStatus = hipMemcpy(vectorIndexOfCluster, devVectorIndexOfCluster, numOfVectors * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(devClusters); hipFree(devDistsVectorsToClusters); hipFree(devVectorIndexOfCluster); return cudaStatus; }
1ac687592f9226d2e58c2cf08771aef3b6d432eb.cu
#include "kernel.h" /* * NOTICE: numThreads = numVectors. * Each thread is a vector. computes the distances between his vector and all clusters. * devDistsVectorsToClusters[numThreads * numClusters]. * EXAMPLE: devDistsVectorsToClusters[0]-[numClusters] contains all distances from v1 to all clusters */ __global__ void computeDistancesArray(double *devVectors, double *devClusters, int numOfVectors, int numThreadsInBlock, int numOfDims, double *devDistsVectorsToClusters) { int i, blockID = blockIdx.x; double result = 0; if (blockID == gridDim.x - 1 && numOfVectors % blockDim.x <= threadIdx.x) return; for (i = 0; i < numOfDims; ++i) { result += (devVectors[(blockID*numThreadsInBlock + threadIdx.x)*numOfDims + i] - devClusters[threadIdx.y*numOfDims + i]) * (devVectors[(blockID*numThreadsInBlock + threadIdx.x)*numOfDims + i] - devClusters[threadIdx.y*numOfDims + i]); } devDistsVectorsToClusters[numOfVectors*threadIdx.y + (blockID*numThreadsInBlock + threadIdx.x)] = result; } /* * NOTICE: numThreads = numVectors. * Each thread is a vector. Traverses devDistsVectorsToClusters[] and finds the min distance. * Writes it to devVectorIndexOfCluster[numVectors] */ __global__ void findMinDistanceForEachVectorFromCluster(int numOfVectors, int numOfClusters, int numThreadsInBlock, double *devDistsVectorsToClusters, int *devVectorIndexOfCluster) { int i, xid = threadIdx.x, blockId = blockIdx.x; double minIndex = 0, minDistance, tempDistance; if (blockIdx.x == gridDim.x - 1 && numOfVectors % blockDim.x <= xid) return; minDistance = devDistsVectorsToClusters[numThreadsInBlock*blockId + xid]; for (i = 1; i < numOfClusters; i++) { tempDistance = devDistsVectorsToClusters[numThreadsInBlock*blockId + xid + i*numOfVectors]; if (minDistance > tempDistance) { minIndex = i; minDistance = tempDistance; } } devVectorIndexOfCluster[numThreadsInBlock*blockId + xid] = minIndex; } cudaError_t computeClustersMeansWithCUDA(double *devVectors, double **clusters, int numOfVectors, int numOfClusters, int numOfDims, int *vectorIndexOfCluster) { cudaError_t cudaStatus; cudaDeviceProp devProp; int maxThreadsPerBlock, maxGridSize[3]; int numBlocks, numThreadsInBlock; cudaGetDeviceProperties(&devProp, 0); // 0 is device 0 for (int i = 0; i < 3; ++i) maxGridSize[i] = devProp.maxGridSize[i]; //configuring kerenl params numThreadsInBlock = devProp.maxThreadsPerBlock / numOfClusters; dim3 dim(numThreadsInBlock, numOfClusters); numBlocks = numOfVectors / numThreadsInBlock; if (numOfVectors % numThreadsInBlock > 0) { numBlocks++; } double *devClusters; double *devDistsVectorsToClusters = 0; int *devVectorIndexOfCluster = 0; // Allocate GPU buffers for three vectors (two input, one output) cudaStatus = cudaMalloc((void**)&devClusters, numOfClusters*numOfDims * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&devDistsVectorsToClusters, numOfClusters*numOfVectors * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&devVectorIndexOfCluster, numOfVectors * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input from host memory to GPU buffers. cudaStatus = cudaMemcpy(devClusters, clusters[0], numOfClusters*numOfDims * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } //launch kernels// computeDistancesArray << <numBlocks, dim >> > (devVectors, devClusters, numOfVectors, numThreadsInBlock, numOfDims, devDistsVectorsToClusters); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } /* cudaDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch*/ cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } numThreadsInBlock = devProp.maxThreadsPerBlock; numBlocks = numOfVectors / numThreadsInBlock; if (numOfVectors % numThreadsInBlock > 0) { numBlocks++; } findMinDistanceForEachVectorFromCluster << <numBlocks, numThreadsInBlock >> > (numOfVectors, numOfClusters, numThreadsInBlock, devDistsVectorsToClusters, devVectorIndexOfCluster); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } /* cudaDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch*/ cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output from GPU buffer to host memory. cudaStatus = cudaMemcpy(vectorIndexOfCluster, devVectorIndexOfCluster, numOfVectors * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(devClusters); cudaFree(devDistsVectorsToClusters); cudaFree(devVectorIndexOfCluster); return cudaStatus; }
9757f04168c991fc4a2b6c795e96cd6229ddfb42.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "optixParams.h" // our launch params extern "C" { __constant__ LaunchParams optixLaunchParams; } // ray type -> these must be in the // same order as the ray types in Nau's project enum { PHONG_RAY_TYPE=0, RAY_TYPE_COUNT }; // ------------------------------------------------------- // closest hit computes color based on material color or texture extern "C" __global__ void __closesthit__phong() { // get the payload variable float3 &prd = *(float3*)getPRD<float3>(); // get mesh data const TriangleMeshSBTData &sbtData = *(const TriangleMeshSBTData*)optixGetSbtDataPointer(); prd = sbtData.diffuse; } // nothing to do in here extern "C" __global__ void __anyhit__phong() { } // miss sets the bacgground color extern "C" __global__ void __miss__phong() { float3 &prd = *(float3*)getPRD<float3>(); // set blue as background color prd = make_float3(0.0f, 0.0f, 1.0f); } // ray gen program - responsible for launching primary rays extern "C" __global__ void __raygen__renderFrame() { const int ix = optixGetLaunchIndex().x; const int iy = optixGetLaunchIndex().y; const auto &camera = optixLaunchParams.camera; // ray payload float3 pixelColorPRD = make_float3(1.f); uint32_t u0, u1; packPointer( &pixelColorPRD, u0, u1 ); // compute ray direction // normalized screen plane position, in [-1, 1]^2 const float2 screen(make_float2(ix+.5f,iy+.5f) / make_float2(optixGetLaunchDimensions().x, optixGetLaunchDimensions().y) * 2.0 - 1.0); // note: nau already takes into account the field of view when computing // camera horizontal and vertival float3 rayDir = normalize(camera.direction + screen.x * camera.horizontal + screen.y * camera.vertical); // trace primary ray optixTrace(optixLaunchParams.traversable, camera.position, rayDir, 0.f, // tmin 1e20f, // tmax 0.0f, // rayTime OptixVisibilityMask( 255 ), OPTIX_RAY_FLAG_DISABLE_ANYHIT, PHONG_RAY_TYPE, // SBT offset RAY_TYPE_COUNT, // SBT stride PHONG_RAY_TYPE, // missSBTIndex u0, u1 ); //convert float (0-1) to int (0-255) const int r = int(255.0f*pixelColorPRD.x); const int g = int(255.0f*pixelColorPRD.y); const int b = int(255.0f*pixelColorPRD.z); // convert to 32-bit rgba value const uint32_t rgba = 0xff000000 | (r<<0) | (g<<8) | (b<<16); // compute index const uint32_t fbIndex = ix+iy*optixGetLaunchDimensions().x; // write to output buffer optixLaunchParams.frame.colorBuffer[fbIndex] = rgba; }
9757f04168c991fc4a2b6c795e96cd6229ddfb42.cu
#include "optixParams.h" // our launch params extern "C" { __constant__ LaunchParams optixLaunchParams; } // ray type -> these must be in the // same order as the ray types in Nau's project enum { PHONG_RAY_TYPE=0, RAY_TYPE_COUNT }; // ------------------------------------------------------- // closest hit computes color based on material color or texture extern "C" __global__ void __closesthit__phong() { // get the payload variable float3 &prd = *(float3*)getPRD<float3>(); // get mesh data const TriangleMeshSBTData &sbtData = *(const TriangleMeshSBTData*)optixGetSbtDataPointer(); prd = sbtData.diffuse; } // nothing to do in here extern "C" __global__ void __anyhit__phong() { } // miss sets the bacgground color extern "C" __global__ void __miss__phong() { float3 &prd = *(float3*)getPRD<float3>(); // set blue as background color prd = make_float3(0.0f, 0.0f, 1.0f); } // ray gen program - responsible for launching primary rays extern "C" __global__ void __raygen__renderFrame() { const int ix = optixGetLaunchIndex().x; const int iy = optixGetLaunchIndex().y; const auto &camera = optixLaunchParams.camera; // ray payload float3 pixelColorPRD = make_float3(1.f); uint32_t u0, u1; packPointer( &pixelColorPRD, u0, u1 ); // compute ray direction // normalized screen plane position, in [-1, 1]^2 const float2 screen(make_float2(ix+.5f,iy+.5f) / make_float2(optixGetLaunchDimensions().x, optixGetLaunchDimensions().y) * 2.0 - 1.0); // note: nau already takes into account the field of view when computing // camera horizontal and vertival float3 rayDir = normalize(camera.direction + screen.x * camera.horizontal + screen.y * camera.vertical); // trace primary ray optixTrace(optixLaunchParams.traversable, camera.position, rayDir, 0.f, // tmin 1e20f, // tmax 0.0f, // rayTime OptixVisibilityMask( 255 ), OPTIX_RAY_FLAG_DISABLE_ANYHIT, PHONG_RAY_TYPE, // SBT offset RAY_TYPE_COUNT, // SBT stride PHONG_RAY_TYPE, // missSBTIndex u0, u1 ); //convert float (0-1) to int (0-255) const int r = int(255.0f*pixelColorPRD.x); const int g = int(255.0f*pixelColorPRD.y); const int b = int(255.0f*pixelColorPRD.z); // convert to 32-bit rgba value const uint32_t rgba = 0xff000000 | (r<<0) | (g<<8) | (b<<16); // compute index const uint32_t fbIndex = ix+iy*optixGetLaunchDimensions().x; // write to output buffer optixLaunchParams.frame.colorBuffer[fbIndex] = rgba; }
9eefd6a9a5aa83488c28444402e020e24c5ae1a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cumath.h" #define uint int #define real float #define real3 float3 #define real4 float4 #define mkreal3 make_float3 #define mkreal4 make_float4 #define Gx 0 #define Gy -9 #define Gz 0 #define Q 19 #define TAU 1000.0 __constant__ real PI = 3.1415; __constant__ real C = 340.0; __constant__ real W = 1/TAU; __device__ real feq( real3 c , real3 v ) { real C2 = C*C; real cv = dot(c,v); return 1 + 3*cv/C2 + 4.5*cv/(C2*C2) - 1.5*dot(v,v)/C2 ; } extern "C" { __global__ void collision_step( real*pts , int dx , int dy , int dz ) { uint x = threadIdx.x + blockDim.x * blockIdx.x; uint y = threadIdx.y + blockDim.y * blockIdx.y; uint z = threadIdx.z + blockDim.z * blockIdx.z; if( x >= dx || y >= dy || z >= dz ) return; uint i = ((( x*dy ) + y )*dz + z)*Q; real g = 0.0f; for( uint j=0 ; j<Q ; ++j ) g += pts[i+j]; real3 V = mkreal3(0,0,0); V += pts[i+ 1] * mkreal3( 1, 0, 0); V += pts[i+ 2] * mkreal3( 0, 1, 0); V += pts[i+ 3] * mkreal3( 0, 0, 1); V += pts[i+ 4] * mkreal3(-1, 0, 0); V += pts[i+ 5] * mkreal3( 0,-1, 0); V += pts[i+ 6] * mkreal3( 0, 0,-1); V += pts[i+ 7] * mkreal3( 0, 1, 1); V += pts[i+ 8] * mkreal3( 1, 0, 1); V += pts[i+ 9] * mkreal3( 1, 1, 0); V += pts[i+10] * mkreal3( 0,-1,-1); V += pts[i+11] * mkreal3(-1, 0,-1); V += pts[i+12] * mkreal3(-1,-1, 0); V += pts[i+13] * mkreal3( 0,+1,-1); V += pts[i+14] * mkreal3(+1, 0,-1); V += pts[i+15] * mkreal3(+1,-1, 0); V += pts[i+16] * mkreal3( 0,-1,+1); V += pts[i+17] * mkreal3(-1, 0,+1); V += pts[i+18] * mkreal3(-1,+1, 0); if( g < 1e-5 && g > -1e-5 ) return; V = V / g; pts[i ] = pts[i ]*(1.0-W) + W*g*feq( mkreal3( 0, 0, 0), V )/ 3.0; pts[i+ 1] = pts[i+ 1]*(1.0-W) + W*g*feq( mkreal3( 1, 0, 0), V )/18.0; pts[i+ 2] = pts[i+ 2]*(1.0-W) + W*g*feq( mkreal3( 0, 1, 0), V )/18.0; pts[i+ 3] = pts[i+ 3]*(1.0-W) + W*g*feq( mkreal3( 0, 0, 1), V )/18.0; pts[i+ 4] = pts[i+ 4]*(1.0-W) + W*g*feq( mkreal3(-1, 0, 0), V )/18.0; pts[i+ 5] = pts[i+ 5]*(1.0-W) + W*g*feq( mkreal3( 0,-1, 0), V )/18.0; pts[i+ 6] = pts[i+ 6]*(1.0-W) + W*g*feq( mkreal3( 0, 0,-1), V )/18.0; pts[i+ 7] = pts[i+ 7]*(1.0-W) + W*g*feq( mkreal3( 0, 1, 1), V )/18.0; pts[i+ 8] = pts[i+ 8]*(1.0-W) + W*g*feq( mkreal3( 1, 0, 1), V )/36.0; pts[i+ 9] = pts[i+ 9]*(1.0-W) + W*g*feq( mkreal3( 1, 1, 0), V )/36.0; pts[i+10] = pts[i+10]*(1.0-W) + W*g*feq( mkreal3( 0,-1,-1), V )/36.0; pts[i+11] = pts[i+11]*(1.0-W) + W*g*feq( mkreal3(-1, 0,-1), V )/36.0; pts[i+12] = pts[i+12]*(1.0-W) + W*g*feq( mkreal3(-1,-1, 0), V )/36.0; pts[i+13] = pts[i+13]*(1.0-W) + W*g*feq( mkreal3( 0,+1,-1), V )/36.0; pts[i+14] = pts[i+14]*(1.0-W) + W*g*feq( mkreal3(+1, 0,-1), V )/36.0; pts[i+15] = pts[i+15]*(1.0-W) + W*g*feq( mkreal3(+1,-1, 0), V )/36.0; pts[i+16] = pts[i+16]*(1.0-W) + W*g*feq( mkreal3( 0,-1,+1), V )/36.0; pts[i+17] = pts[i+17]*(1.0-W) + W*g*feq( mkreal3(-1, 0,+1), V )/36.0; pts[i+18] = pts[i+18]*(1.0-W) + W*g*feq( mkreal3(-1,+1, 0), V )/36.0; if( x == 0 ) { pts[i+ 1] += pts[i+ 4]; pts[i+14] += pts[i+11]; pts[i+15] += pts[i+12]; pts[i+ 8] += pts[i+17]; pts[i+ 9] += pts[i+18]; pts[i+ 4] = 0; pts[i+11] = 0; pts[i+12] = 0; pts[i+17] = 0; pts[i+18] = 0; } if( x == dx-1 ) { pts[i+ 4] += pts[i+ 1]; pts[i+11] += pts[i+14]; pts[i+12] += pts[i+15]; pts[i+17] += pts[i+ 8]; pts[i+18] += pts[i+ 9]; pts[i+ 1] = 0; pts[i+14] = 0; pts[i+15] = 0; pts[i+ 8] = 0; pts[i+ 9] = 0; } if( y == 0 ) { pts[i+ 2] += pts[i+ 5]; pts[i+13] += pts[i+10]; pts[i+18] += pts[i+12]; pts[i+ 9] += pts[i+15]; pts[i+ 7] += pts[i+16]; pts[i+ 5] = 0; pts[i+10] = 0; pts[i+12] = 0; pts[i+15] = 0; pts[i+16] = 0; } if( y == dy-1 ) { pts[i+ 5] += pts[i+ 2]; pts[i+10] += pts[i+13]; pts[i+12] += pts[i+18]; pts[i+15] += pts[i+ 9]; pts[i+16] += pts[i+ 7]; pts[i+ 2] = 0; pts[i+13] = 0; pts[i+18] = 0; pts[i+ 9] = 0; pts[i+ 7] = 0; } if( z == 0 ) { pts[i+ 3] += pts[i+ 6]; pts[i+16] += pts[i+10]; pts[i+17] += pts[i+11]; pts[i+ 7] += pts[i+13]; pts[i+ 8] += pts[i+14]; pts[i+ 6] = 0; pts[i+10] = 0; pts[i+11] = 0; pts[i+13] = 0; pts[i+14] = 0; } if( z == dz-1 ) { pts[i+ 6] += pts[i+ 3]; pts[i+10] += pts[i+16]; pts[i+11] += pts[i+17]; pts[i+13] += pts[i+ 7]; pts[i+14] += pts[i+ 8]; pts[i+ 3] = 0; pts[i+16] = 0; pts[i+17] = 0; pts[i+ 7] = 0; pts[i+ 8] = 0; } } __global__ void streaming_step( real*f1 , real*f2 , int Dx , int Dy , int Dz ) { uint x = threadIdx.x + blockDim.x * blockIdx.x; uint y = threadIdx.y + blockDim.y * blockIdx.y; uint z = threadIdx.z + blockDim.z * blockIdx.z; if( x >= Dx || y >= Dy || z >= Dz ) return; uint i = ((( x*Dy ) + y )*Dz + z)*Q; uint dx = Dy * Dz * Q; uint dy = Dz * Q; uint dz = Q; f2[i ] = f1[i ]; f2[i+dx + 1] = f1[i+ 1]; f2[i +dy + 2] = f1[i+ 2]; f2[i +dz+ 3] = f1[i+ 3]; f2[i-dx + 4] = f1[i+ 4]; f2[i -dy + 5] = f1[i+ 5]; f2[i -dz+ 6] = f1[i+ 6]; f2[i +dy+dz+ 7] = f1[i+ 7]; f2[i+dx +dz+ 8] = f1[i+ 8]; f2[i+dx+dy + 9] = f1[i+ 9]; f2[i -dy-dz+10] = f1[i+10]; f2[i-dx -dz+11] = f1[i+11]; f2[i-dx-dy +12] = f1[i+12]; f2[i +dy-dz+13] = f1[i+13]; f2[i+dx -dz+14] = f1[i+14]; f2[i+dx-dy +15] = f1[i+15]; f2[i -dy+dz+16] = f1[i+16]; f2[i-dx +dz+17] = f1[i+17]; f2[i-dx+dy +18] = f1[i+18]; /** for D3Q27 model **/ /* f2[i+dx+dy+dz]*/ /* f2[i-dx+dy+dz]*/ /* f2[i+dx-dy+dz]*/ /* f2[i+dx+dy-dz]*/ /* f2[i+dx-dy-dz]*/ /* f2[i-dx+dy-dz]*/ /* f2[i-dx-dy+dz]*/ /* f2[i-dx-dy-dz]*/ } __global__ void colors( real4*col , real*pts , int dx , int dy , int dz ) { uint x = threadIdx.x + blockDim.x * blockIdx.x; uint y = threadIdx.y + blockDim.y * blockIdx.y; uint z = threadIdx.z + blockDim.z * blockIdx.z; if( x >= dx || y >= dy || z >= dz ) return; uint i = (( x*dy ) + y )*dz + z; uint iQ = i*Q; real g = 0.0f; for( uint j=0 ; j<Q ; ++j ) g += pts[iQ+j]; col[i].w = g > .001 ? 1.0 : 0.0; } }
9eefd6a9a5aa83488c28444402e020e24c5ae1a9.cu
#include "cumath.h" #define uint int #define real float #define real3 float3 #define real4 float4 #define mkreal3 make_float3 #define mkreal4 make_float4 #define Gx 0 #define Gy -9 #define Gz 0 #define Q 19 #define TAU 1000.0 __constant__ real PI = 3.1415; __constant__ real C = 340.0; __constant__ real W = 1/TAU; __device__ real feq( real3 c , real3 v ) { real C2 = C*C; real cv = dot(c,v); return 1 + 3*cv/C2 + 4.5*cv/(C2*C2) - 1.5*dot(v,v)/C2 ; } extern "C" { __global__ void collision_step( real*pts , int dx , int dy , int dz ) { uint x = threadIdx.x + blockDim.x * blockIdx.x; uint y = threadIdx.y + blockDim.y * blockIdx.y; uint z = threadIdx.z + blockDim.z * blockIdx.z; if( x >= dx || y >= dy || z >= dz ) return; uint i = ((( x*dy ) + y )*dz + z)*Q; real g = 0.0f; for( uint j=0 ; j<Q ; ++j ) g += pts[i+j]; real3 V = mkreal3(0,0,0); V += pts[i+ 1] * mkreal3( 1, 0, 0); V += pts[i+ 2] * mkreal3( 0, 1, 0); V += pts[i+ 3] * mkreal3( 0, 0, 1); V += pts[i+ 4] * mkreal3(-1, 0, 0); V += pts[i+ 5] * mkreal3( 0,-1, 0); V += pts[i+ 6] * mkreal3( 0, 0,-1); V += pts[i+ 7] * mkreal3( 0, 1, 1); V += pts[i+ 8] * mkreal3( 1, 0, 1); V += pts[i+ 9] * mkreal3( 1, 1, 0); V += pts[i+10] * mkreal3( 0,-1,-1); V += pts[i+11] * mkreal3(-1, 0,-1); V += pts[i+12] * mkreal3(-1,-1, 0); V += pts[i+13] * mkreal3( 0,+1,-1); V += pts[i+14] * mkreal3(+1, 0,-1); V += pts[i+15] * mkreal3(+1,-1, 0); V += pts[i+16] * mkreal3( 0,-1,+1); V += pts[i+17] * mkreal3(-1, 0,+1); V += pts[i+18] * mkreal3(-1,+1, 0); if( g < 1e-5 && g > -1e-5 ) return; V = V / g; pts[i ] = pts[i ]*(1.0-W) + W*g*feq( mkreal3( 0, 0, 0), V )/ 3.0; pts[i+ 1] = pts[i+ 1]*(1.0-W) + W*g*feq( mkreal3( 1, 0, 0), V )/18.0; pts[i+ 2] = pts[i+ 2]*(1.0-W) + W*g*feq( mkreal3( 0, 1, 0), V )/18.0; pts[i+ 3] = pts[i+ 3]*(1.0-W) + W*g*feq( mkreal3( 0, 0, 1), V )/18.0; pts[i+ 4] = pts[i+ 4]*(1.0-W) + W*g*feq( mkreal3(-1, 0, 0), V )/18.0; pts[i+ 5] = pts[i+ 5]*(1.0-W) + W*g*feq( mkreal3( 0,-1, 0), V )/18.0; pts[i+ 6] = pts[i+ 6]*(1.0-W) + W*g*feq( mkreal3( 0, 0,-1), V )/18.0; pts[i+ 7] = pts[i+ 7]*(1.0-W) + W*g*feq( mkreal3( 0, 1, 1), V )/18.0; pts[i+ 8] = pts[i+ 8]*(1.0-W) + W*g*feq( mkreal3( 1, 0, 1), V )/36.0; pts[i+ 9] = pts[i+ 9]*(1.0-W) + W*g*feq( mkreal3( 1, 1, 0), V )/36.0; pts[i+10] = pts[i+10]*(1.0-W) + W*g*feq( mkreal3( 0,-1,-1), V )/36.0; pts[i+11] = pts[i+11]*(1.0-W) + W*g*feq( mkreal3(-1, 0,-1), V )/36.0; pts[i+12] = pts[i+12]*(1.0-W) + W*g*feq( mkreal3(-1,-1, 0), V )/36.0; pts[i+13] = pts[i+13]*(1.0-W) + W*g*feq( mkreal3( 0,+1,-1), V )/36.0; pts[i+14] = pts[i+14]*(1.0-W) + W*g*feq( mkreal3(+1, 0,-1), V )/36.0; pts[i+15] = pts[i+15]*(1.0-W) + W*g*feq( mkreal3(+1,-1, 0), V )/36.0; pts[i+16] = pts[i+16]*(1.0-W) + W*g*feq( mkreal3( 0,-1,+1), V )/36.0; pts[i+17] = pts[i+17]*(1.0-W) + W*g*feq( mkreal3(-1, 0,+1), V )/36.0; pts[i+18] = pts[i+18]*(1.0-W) + W*g*feq( mkreal3(-1,+1, 0), V )/36.0; if( x == 0 ) { pts[i+ 1] += pts[i+ 4]; pts[i+14] += pts[i+11]; pts[i+15] += pts[i+12]; pts[i+ 8] += pts[i+17]; pts[i+ 9] += pts[i+18]; pts[i+ 4] = 0; pts[i+11] = 0; pts[i+12] = 0; pts[i+17] = 0; pts[i+18] = 0; } if( x == dx-1 ) { pts[i+ 4] += pts[i+ 1]; pts[i+11] += pts[i+14]; pts[i+12] += pts[i+15]; pts[i+17] += pts[i+ 8]; pts[i+18] += pts[i+ 9]; pts[i+ 1] = 0; pts[i+14] = 0; pts[i+15] = 0; pts[i+ 8] = 0; pts[i+ 9] = 0; } if( y == 0 ) { pts[i+ 2] += pts[i+ 5]; pts[i+13] += pts[i+10]; pts[i+18] += pts[i+12]; pts[i+ 9] += pts[i+15]; pts[i+ 7] += pts[i+16]; pts[i+ 5] = 0; pts[i+10] = 0; pts[i+12] = 0; pts[i+15] = 0; pts[i+16] = 0; } if( y == dy-1 ) { pts[i+ 5] += pts[i+ 2]; pts[i+10] += pts[i+13]; pts[i+12] += pts[i+18]; pts[i+15] += pts[i+ 9]; pts[i+16] += pts[i+ 7]; pts[i+ 2] = 0; pts[i+13] = 0; pts[i+18] = 0; pts[i+ 9] = 0; pts[i+ 7] = 0; } if( z == 0 ) { pts[i+ 3] += pts[i+ 6]; pts[i+16] += pts[i+10]; pts[i+17] += pts[i+11]; pts[i+ 7] += pts[i+13]; pts[i+ 8] += pts[i+14]; pts[i+ 6] = 0; pts[i+10] = 0; pts[i+11] = 0; pts[i+13] = 0; pts[i+14] = 0; } if( z == dz-1 ) { pts[i+ 6] += pts[i+ 3]; pts[i+10] += pts[i+16]; pts[i+11] += pts[i+17]; pts[i+13] += pts[i+ 7]; pts[i+14] += pts[i+ 8]; pts[i+ 3] = 0; pts[i+16] = 0; pts[i+17] = 0; pts[i+ 7] = 0; pts[i+ 8] = 0; } } __global__ void streaming_step( real*f1 , real*f2 , int Dx , int Dy , int Dz ) { uint x = threadIdx.x + blockDim.x * blockIdx.x; uint y = threadIdx.y + blockDim.y * blockIdx.y; uint z = threadIdx.z + blockDim.z * blockIdx.z; if( x >= Dx || y >= Dy || z >= Dz ) return; uint i = ((( x*Dy ) + y )*Dz + z)*Q; uint dx = Dy * Dz * Q; uint dy = Dz * Q; uint dz = Q; f2[i ] = f1[i ]; f2[i+dx + 1] = f1[i+ 1]; f2[i +dy + 2] = f1[i+ 2]; f2[i +dz+ 3] = f1[i+ 3]; f2[i-dx + 4] = f1[i+ 4]; f2[i -dy + 5] = f1[i+ 5]; f2[i -dz+ 6] = f1[i+ 6]; f2[i +dy+dz+ 7] = f1[i+ 7]; f2[i+dx +dz+ 8] = f1[i+ 8]; f2[i+dx+dy + 9] = f1[i+ 9]; f2[i -dy-dz+10] = f1[i+10]; f2[i-dx -dz+11] = f1[i+11]; f2[i-dx-dy +12] = f1[i+12]; f2[i +dy-dz+13] = f1[i+13]; f2[i+dx -dz+14] = f1[i+14]; f2[i+dx-dy +15] = f1[i+15]; f2[i -dy+dz+16] = f1[i+16]; f2[i-dx +dz+17] = f1[i+17]; f2[i-dx+dy +18] = f1[i+18]; /** for D3Q27 model **/ /* f2[i+dx+dy+dz]*/ /* f2[i-dx+dy+dz]*/ /* f2[i+dx-dy+dz]*/ /* f2[i+dx+dy-dz]*/ /* f2[i+dx-dy-dz]*/ /* f2[i-dx+dy-dz]*/ /* f2[i-dx-dy+dz]*/ /* f2[i-dx-dy-dz]*/ } __global__ void colors( real4*col , real*pts , int dx , int dy , int dz ) { uint x = threadIdx.x + blockDim.x * blockIdx.x; uint y = threadIdx.y + blockDim.y * blockIdx.y; uint z = threadIdx.z + blockDim.z * blockIdx.z; if( x >= dx || y >= dy || z >= dz ) return; uint i = (( x*dy ) + y )*dz + z; uint iQ = i*Q; real g = 0.0f; for( uint j=0 ; j<Q ; ++j ) g += pts[iQ+j]; col[i].w = g > .001 ? 1.0 : 0.0; } }
3fc226e4fc06d5b0b3d22f62035d9bebf91e0501.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved // modified from // https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu // Original license: Apache 2.0 // clang-format off // modify from // https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu /*! ******************* BEGIN Caffe Copyright Notice and Disclaimer ***************** * * COPYRIGHT * * All contributions by the University of California: * Copyright (c) 2014-2017 The Regents of the University of California (Regents) * All rights reserved. * * All other contributions: * Copyright (c) 2014-2017, the respective contributors * All rights reserved. * * Caffe uses a shared copyright model: each contributor holds copyright over * their contributions to Caffe. The project versioning records all such * contribution and copyright details. If a contributor wants to further mark * their specific copyright on a particular contribution, they should indicate * their copyright solely in the commit message of the change when it is * committed. * * LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL *DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE *OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * CONTRIBUTION AGREEMENT * * By contributing to the BVLC/caffe repository through pull-request, comment, * or otherwise, the contributor releases their content to the * license and copyright terms herein. * ***************** END Caffe Copyright Notice and Disclaimer ********************* * * Copyright (c) 2018 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file modulated_deformable_im2col.cuh * \brief Function definitions of converting an image to * column matrix based on kernel, padding, dilation, and offset. * These functions are mainly used in deformable convolution operators. * \ref: https://arxiv.org/abs/1703.06211 * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng */ #include <ATen/ATen.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <float.h> #include <math.h> #include <stdio.h> #include <THH/THHAtomics.cuh> using namespace at; #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) namespace { const int CUDA_NUM_THREADS = 1024; const int kMaxGridNum = 65535; inline int GET_BLOCKS(const int N) { return ::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); } } template <typename scalar_t> __device__ scalar_t deformable_im2col_bilinear( const scalar_t* bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t get_gradient_weight( scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t get_coordinate_weight( scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t* im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void deformable_im2col_gpu_kernel( const int n, const scalar_t* data_im, const scalar_t* data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; // const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * // height + h_in) * width + w_in; const scalar_t* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { // const scalar_t map_h = i * dilation_h + offset_h; // const scalar_t map_w = j * dilation_w + offset_w; // const int cur_height = height - h_in; // const int cur_width = width - w_in; // val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, // cur_width, map_h, map_w); val = deformable_im2col_bilinear( data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } template <typename scalar_t> __global__ void deformable_col2im_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = get_gradient_weight( cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void deformable_col2im_coord_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_im, const scalar_t* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_offset) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t* data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } const scalar_t weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } void deformable_im2col( const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size // todo: check parallel_imgs is correctly passed in int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_im.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "deformable_im2col_gpu", ([&] { const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); hipLaunchKernelGGL(( deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, data_col_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in deformable_im2col: %s\n", hipGetErrorString(err)); } } void deformable_col2im( const at::Tensor data_col, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_im) { // todo: make sure parallel_imgs is passed in correctly int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_col.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "deformable_col2im_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t* grad_im_ = grad_im.data_ptr<scalar_t>(); hipLaunchKernelGGL(( deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, deformable_group, height_col, width_col, grad_im_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in deformable_col2im: %s\n", hipGetErrorString(err)); } } void deformable_col2im_coord( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_offset) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_col.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t* grad_offset_ = grad_offset.data_ptr<scalar_t>(); hipLaunchKernelGGL(( deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col_, data_im_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, height_col, width_col, grad_offset_); })); } template <typename scalar_t> __device__ scalar_t dmcn_im2col_bilinear( const scalar_t* bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t dmcn_get_gradient_weight( scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t dmcn_get_coordinate_weight( scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t* im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void modulated_deformable_im2col_gpu_kernel( const int n, const scalar_t* data_im, const scalar_t* data_offset, const scalar_t* data_mask, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; // const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * // height + h_in) * width + w_in; const scalar_t* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t* data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; // if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { // const float map_h = i * dilation_h + offset_h; // const float map_w = j * dilation_w + offset_w; // const int cur_height = height - h_in; // const int cur_width = width - w_in; // val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, // cur_width, map_h, map_w); val = dmcn_im2col_bilinear( data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val * mask; data_col_ptr += batch_size * height_col * width_col; // data_col_ptr += height_col * width_col; } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_offset, const scalar_t* data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t* data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index] * mask; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = dmcn_get_gradient_weight( cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_coord_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_im, const scalar_t* data_offset, const scalar_t* data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_offset, scalar_t* grad_mask) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0, mval = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t* data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t* data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } else { mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear( data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); } const scalar_t weight = dmcn_get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos] * mask; cnt += 1; } // KERNEL_ASSIGN(grad_offset[index], offset_req, val); grad_offset[index] = val; if (offset_c % 2 == 0) // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + // deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * // height_col + h) * width_col + w], mask_req, mval); grad_mask [(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; } } void modulated_deformable_im2col_cuda( const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col; at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_im.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] { const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, data_col_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf( "error in modulated_deformable_im2col_cuda: %s\n", hipGetErrorString(err)); } } void modulated_deformable_col2im_cuda( const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_im) { const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_col.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t* grad_im_ = grad_im.data_ptr<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, deformable_group, height_col, width_col, grad_im_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf( "error in modulated_deformable_col2im_cuda: %s\n", hipGetErrorString(err)); } } void modulated_deformable_col2im_coord_cuda( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_offset, at::Tensor grad_mask) { const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_col.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t* grad_offset_ = grad_offset.data_ptr<scalar_t>(); scalar_t* grad_mask_ = grad_mask.data_ptr<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, grad_offset_, grad_mask_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf( "error in modulated_deformable_col2im_coord_cuda: %s\n", hipGetErrorString(err)); } }
3fc226e4fc06d5b0b3d22f62035d9bebf91e0501.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved // modified from // https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu // Original license: Apache 2.0 // clang-format off // modify from // https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu /*! ******************* BEGIN Caffe Copyright Notice and Disclaimer ***************** * * COPYRIGHT * * All contributions by the University of California: * Copyright (c) 2014-2017 The Regents of the University of California (Regents) * All rights reserved. * * All other contributions: * Copyright (c) 2014-2017, the respective contributors * All rights reserved. * * Caffe uses a shared copyright model: each contributor holds copyright over * their contributions to Caffe. The project versioning records all such * contribution and copyright details. If a contributor wants to further mark * their specific copyright on a particular contribution, they should indicate * their copyright solely in the commit message of the change when it is * committed. * * LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL *DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE *OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * CONTRIBUTION AGREEMENT * * By contributing to the BVLC/caffe repository through pull-request, comment, * or otherwise, the contributor releases their content to the * license and copyright terms herein. * ***************** END Caffe Copyright Notice and Disclaimer ********************* * * Copyright (c) 2018 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file modulated_deformable_im2col.cuh * \brief Function definitions of converting an image to * column matrix based on kernel, padding, dilation, and offset. * These functions are mainly used in deformable convolution operators. * \ref: https://arxiv.org/abs/1703.06211 * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng */ #include <ATen/ATen.h> #include <c10/cuda/CUDAGuard.h> #include <float.h> #include <math.h> #include <stdio.h> #include <THC/THCAtomics.cuh> using namespace at; #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) namespace { const int CUDA_NUM_THREADS = 1024; const int kMaxGridNum = 65535; inline int GET_BLOCKS(const int N) { return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); } } template <typename scalar_t> __device__ scalar_t deformable_im2col_bilinear( const scalar_t* bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t get_gradient_weight( scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t get_coordinate_weight( scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t* im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void deformable_im2col_gpu_kernel( const int n, const scalar_t* data_im, const scalar_t* data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; // const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * // height + h_in) * width + w_in; const scalar_t* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { // const scalar_t map_h = i * dilation_h + offset_h; // const scalar_t map_w = j * dilation_w + offset_w; // const int cur_height = height - h_in; // const int cur_width = width - w_in; // val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, // cur_width, map_h, map_w); val = deformable_im2col_bilinear( data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } template <typename scalar_t> __global__ void deformable_col2im_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = get_gradient_weight( cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void deformable_col2im_coord_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_im, const scalar_t* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_offset) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t* data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } const scalar_t weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } void deformable_im2col( const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size // todo: check parallel_imgs is correctly passed in int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; at::cuda::CUDAGuard device_guard(data_im.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "deformable_im2col_gpu", ([&] { const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); deformable_im2col_gpu_kernel<<< GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, data_col_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in deformable_im2col: %s\n", cudaGetErrorString(err)); } } void deformable_col2im( const at::Tensor data_col, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_im) { // todo: make sure parallel_imgs is passed in correctly int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; at::cuda::CUDAGuard device_guard(data_col.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "deformable_col2im_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t* grad_im_ = grad_im.data_ptr<scalar_t>(); deformable_col2im_gpu_kernel<<< GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, deformable_group, height_col, width_col, grad_im_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in deformable_col2im: %s\n", cudaGetErrorString(err)); } } void deformable_col2im_coord( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_offset) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; at::cuda::CUDAGuard device_guard(data_col.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t* grad_offset_ = grad_offset.data_ptr<scalar_t>(); deformable_col2im_coord_gpu_kernel<<< GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col_, data_im_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, height_col, width_col, grad_offset_); })); } template <typename scalar_t> __device__ scalar_t dmcn_im2col_bilinear( const scalar_t* bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t dmcn_get_gradient_weight( scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t dmcn_get_coordinate_weight( scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t* im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void modulated_deformable_im2col_gpu_kernel( const int n, const scalar_t* data_im, const scalar_t* data_offset, const scalar_t* data_mask, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; // const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * // height + h_in) * width + w_in; const scalar_t* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t* data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; // if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { // const float map_h = i * dilation_h + offset_h; // const float map_w = j * dilation_w + offset_w; // const int cur_height = height - h_in; // const int cur_width = width - w_in; // val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, // cur_width, map_h, map_w); val = dmcn_im2col_bilinear( data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val * mask; data_col_ptr += batch_size * height_col * width_col; // data_col_ptr += height_col * width_col; } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_offset, const scalar_t* data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t* data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index] * mask; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = dmcn_get_gradient_weight( cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_coord_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_im, const scalar_t* data_offset, const scalar_t* data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_offset, scalar_t* grad_mask) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0, mval = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t* data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t* data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } else { mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear( data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); } const scalar_t weight = dmcn_get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos] * mask; cnt += 1; } // KERNEL_ASSIGN(grad_offset[index], offset_req, val); grad_offset[index] = val; if (offset_c % 2 == 0) // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + // deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * // height_col + h) * width_col + w], mask_req, mval); grad_mask [(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; } } void modulated_deformable_im2col_cuda( const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col; at::cuda::CUDAGuard device_guard(data_im.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] { const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); modulated_deformable_im2col_gpu_kernel<<< GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, data_col_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf( "error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); } } void modulated_deformable_col2im_cuda( const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_im) { const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; at::cuda::CUDAGuard device_guard(data_col.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t* grad_im_ = grad_im.data_ptr<scalar_t>(); modulated_deformable_col2im_gpu_kernel<<< GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, deformable_group, height_col, width_col, grad_im_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf( "error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); } } void modulated_deformable_col2im_coord_cuda( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_offset, at::Tensor grad_mask) { const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; at::cuda::CUDAGuard device_guard(data_col.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t* grad_offset_ = grad_offset.data_ptr<scalar_t>(); scalar_t* grad_mask_ = grad_mask.data_ptr<scalar_t>(); modulated_deformable_col2im_coord_gpu_kernel<<< GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, grad_offset_, grad_mask_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf( "error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err)); } }
a6c863f76737895975924678a97e34f33abbdfac.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <thrust/gather.h> #include <thrust/iterator/discard_iterator.h> #include "cupoch/camera/pinhole_camera_intrinsic.h" #include "cupoch/geometry/boundingvolume.h" #include "cupoch/geometry/geometry_utils.h" #include "cupoch/geometry/image.h" #include "cupoch/knn/kdtree_flann.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/utility/console.h" #include "cupoch/utility/helper.h" #include "cupoch/utility/platform.h" namespace cupoch { namespace geometry { namespace { template <class... Args> struct check_nan_functor { check_nan_functor(bool remove_nan, bool remove_infinite) : remove_nan_(remove_nan), remove_infinite_(remove_infinite){}; const bool remove_nan_; const bool remove_infinite_; __device__ bool operator()(const thrust::tuple<Args...> &x) const { const Eigen::Vector3f &point = thrust::get<0>(x); bool is_nan = remove_nan_ && (isnan(point(0)) || isnan(point(1)) || isnan(point(2))); bool is_infinite = remove_infinite_ && (isinf(point(0)) || isinf(point(1)) || isinf(point(2))); return is_nan || is_infinite; } }; struct gaussian_filter_functor { gaussian_filter_functor(const Eigen::Vector3f *points, const Eigen::Vector3f *normals, const Eigen::Vector3f *colors, const int *indices, const float *dists, float sigma2, size_t num_max_search_points, bool has_normal, bool has_color) : points_(points), normals_(normals), colors_(colors), indices_(indices), dists_(dists), sigma2_(sigma2), num_max_search_points_(num_max_search_points), has_normal_(has_normal), has_color_(has_color){}; const Eigen::Vector3f *points_; const Eigen::Vector3f *normals_; const Eigen::Vector3f *colors_; const int *indices_; const float *dists_; const float sigma2_; const size_t num_max_search_points_; const bool has_normal_; const bool has_color_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector3f> operator()(size_t idx) const { float total_weight = 0.0; Eigen::Vector3f res_p = Eigen::Vector3f::Zero(); Eigen::Vector3f res_n = Eigen::Vector3f::Zero(); Eigen::Vector3f res_c = Eigen::Vector3f::Zero(); for (int i = 0; i < num_max_search_points_; ++i) { const int j = idx * num_max_search_points_ + i; const int idx_j = __ldg(&indices_[j]); if (idx_j >= 0) { float weight = exp(-0.5 * dists_[j] / sigma2_); res_p += weight * points_[idx_j]; if (has_normal_) res_n += weight * normals_[idx_j]; if (has_color_) res_c += weight * colors_[idx_j]; total_weight += weight; } } res_p /= total_weight; res_n /= total_weight; res_c /= total_weight; return thrust::make_tuple(res_p, res_n, res_c); } }; template <class... Args> struct pass_through_filter_functor { pass_through_filter_functor(int axis_no, float min_bound, float max_bound) : axis_no_(axis_no), min_bound_(min_bound), max_bound_(max_bound){}; const int axis_no_; const float min_bound_; const float max_bound_; __device__ bool operator()( const thrust::tuple<Eigen::Vector3f, Args...> &x) const { float val = thrust::get<0>(x)[axis_no_]; return val < min_bound_ || max_bound_ < val; } }; } // namespace PointCloud::PointCloud() : GeometryBase3D(Geometry::GeometryType::PointCloud) {} PointCloud::PointCloud(const thrust::host_vector<Eigen::Vector3f> &points) : GeometryBase3D(Geometry::GeometryType::PointCloud), points_(points) {} PointCloud::PointCloud(const std::vector<Eigen::Vector3f> &points) : GeometryBase3D(Geometry::GeometryType::PointCloud), points_(points) {} PointCloud::PointCloud(const utility::device_vector<Eigen::Vector3f> &points) : GeometryBase3D(Geometry::GeometryType::PointCloud), points_(points) {} PointCloud::PointCloud(const PointCloud &other) : GeometryBase3D(Geometry::GeometryType::PointCloud), points_(other.points_), normals_(other.normals_), colors_(other.colors_) {} PointCloud::~PointCloud() {} PointCloud &PointCloud::operator=(const PointCloud &other) { points_ = other.points_; normals_ = other.normals_; colors_ = other.colors_; return *this; } void PointCloud::SetPoints(const thrust::host_vector<Eigen::Vector3f> &points) { points_ = points; } thrust::host_vector<Eigen::Vector3f> PointCloud::GetPoints() const { thrust::host_vector<Eigen::Vector3f> points = points_; return points; } void PointCloud::SetNormals( const thrust::host_vector<Eigen::Vector3f> &normals) { normals_ = normals; } thrust::host_vector<Eigen::Vector3f> PointCloud::GetNormals() const { thrust::host_vector<Eigen::Vector3f> normals = normals_; return normals; } void PointCloud::SetColors(const thrust::host_vector<Eigen::Vector3f> &colors) { colors_ = colors; } thrust::host_vector<Eigen::Vector3f> PointCloud::GetColors() const { thrust::host_vector<Eigen::Vector3f> colors = colors_; return colors; } PointCloud &PointCloud::Clear() { points_.clear(); normals_.clear(); colors_.clear(); return *this; } bool PointCloud::IsEmpty() const { return !HasPoints(); } Eigen::Vector3f PointCloud::GetMinBound() const { return utility::ComputeMinBound<3>(points_); } Eigen::Vector3f PointCloud::GetMaxBound() const { return utility::ComputeMaxBound<3>(points_); } Eigen::Vector3f PointCloud::GetCenter() const { return utility::ComputeCenter<3>(points_); } AxisAlignedBoundingBox<3> PointCloud::GetAxisAlignedBoundingBox() const { return AxisAlignedBoundingBox<3>::CreateFromPoints(points_); } OrientedBoundingBox PointCloud::GetOrientedBoundingBox() const { return OrientedBoundingBox::CreateFromPoints(points_); } PointCloud &PointCloud::Translate(const Eigen::Vector3f &translation, bool relative) { TranslatePoints<3>(translation, points_, relative); return *this; } PointCloud &PointCloud::Scale(const float scale, bool center) { ScalePoints<3>(scale, points_, center); return *this; } PointCloud &PointCloud::Rotate(const Eigen::Matrix3f &R, bool center) { RotatePoints<3>(utility::GetStream(0), R, points_, center); RotateNormals(utility::GetStream(1), R, normals_); cudaSafeCall(hipDeviceSynchronize()); return *this; } PointCloud &PointCloud::operator+=(const PointCloud &cloud) { // We do not use std::vector::insert to combine std::vector because it will // crash if the pointcloud is added to itself. if (cloud.IsEmpty()) return (*this); size_t old_vert_num = points_.size(); size_t add_vert_num = cloud.points_.size(); size_t new_vert_num = old_vert_num + add_vert_num; if ((!HasPoints() || HasNormals()) && cloud.HasNormals()) { normals_.resize(new_vert_num); thrust::copy(cloud.normals_.begin(), cloud.normals_.end(), normals_.begin() + old_vert_num); } else { normals_.clear(); } if ((!HasPoints() || HasColors()) && cloud.HasColors()) { colors_.resize(new_vert_num); thrust::copy(cloud.colors_.begin(), cloud.colors_.end(), colors_.begin() + old_vert_num); } else { colors_.clear(); } points_.resize(new_vert_num); thrust::copy(cloud.points_.begin(), cloud.points_.end(), points_.begin() + old_vert_num); return (*this); } PointCloud PointCloud::operator+(const PointCloud &cloud) const { return (PointCloud(*this) += cloud); } PointCloud &PointCloud::NormalizeNormals() { thrust::for_each(normals_.begin(), normals_.end(), [] __device__(Eigen::Vector3f & nl) { nl.normalize(); }); return *this; } PointCloud &PointCloud::PaintUniformColor(const Eigen::Vector3f &color) { ResizeAndPaintUniformColor(colors_, points_.size(), color); return *this; } PointCloud &PointCloud::Transform(const Eigen::Matrix4f &transformation) { TransformPoints<3>(utility::GetStream(0), transformation, points_); TransformNormals(utility::GetStream(1), transformation, normals_); cudaSafeCall(hipDeviceSynchronize()); return *this; } std::shared_ptr<PointCloud> PointCloud::Crop( const AxisAlignedBoundingBox<3> &bbox) const { if (bbox.IsEmpty()) { utility::LogError( "[CropPointCloud] AxisAlignedBoundingBox either has zeros " "size, or has wrong bounds."); } return SelectByIndex(bbox.GetPointIndicesWithinBoundingBox(points_)); } std::shared_ptr<PointCloud> PointCloud::Crop( const OrientedBoundingBox &bbox) const { if (bbox.IsEmpty()) { utility::LogError( "[CropPointCloud] AxisAlignedBoundingBox either has zeros " "size, or has wrong bounds."); } return SelectByIndex(bbox.GetPointIndicesWithinBoundingBox(points_)); } PointCloud &PointCloud::RemoveNoneFinitePoints(bool remove_nan, bool remove_infinite) { bool has_normal = HasNormals(); bool has_color = HasColors(); size_t old_point_num = points_.size(); size_t k = 0; auto runs = [=] (auto&... params) { remove_if_vectors( utility::exec_policy(0), check_nan_functor<typename std::remove_reference_t<decltype(params)>::value_type...>(remove_nan, remove_infinite), params...); }; if (!has_normal && !has_color) { runs(points_); } else if (has_normal && !has_color) { runs(points_, normals_); } else if (!has_normal && has_color) { runs(points_, colors_); } else { runs(points_, normals_, colors_); } utility::LogDebug( "[RemoveNoneFinitePoints] {:d} nan points have been removed.", (int)(old_point_num - k)); return *this; } std::shared_ptr<PointCloud> PointCloud::GaussianFilter( float search_radius, float sigma2, size_t num_max_search_points) { auto out = std::make_shared<PointCloud>(); if (search_radius <= 0 || sigma2 <= 0 || num_max_search_points <= 0) { utility::LogError( "[GaussianFilter] Illegal input parameters, radius and sigma2 " "must be positive."); return out; } bool has_normal = HasNormals(); bool has_color = HasColors(); knn::KDTreeFlann kdtree; kdtree.SetRawData(ConvertVector3fVectorRef(*this)); utility::device_vector<int> indices; utility::device_vector<float> dist; kdtree.SearchRadius(points_, search_radius, num_max_search_points, indices, dist); size_t n_pt = points_.size(); out->points_.resize(n_pt); if (has_normal) out->normals_.resize(n_pt); if (has_color) out->colors_.resize(n_pt); gaussian_filter_functor func(thrust::raw_pointer_cast(points_.data()), thrust::raw_pointer_cast(normals_.data()), thrust::raw_pointer_cast(colors_.data()), thrust::raw_pointer_cast(indices.data()), thrust::raw_pointer_cast(dist.data()), sigma2, num_max_search_points, has_normal, has_color); auto runs = [size = points_.size(), &func] (auto&&... params) { thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(size), make_tuple_iterator(params...), func); }; if (has_normal && has_color) { runs(out->points_.begin(), out->normals_.begin(), out->colors_.begin()); } else if (has_normal) { runs(out->points_.begin(), out->normals_.begin(), thrust::make_discard_iterator()); } else if (has_color) { runs(out->points_.begin(), thrust::make_discard_iterator(), out->colors_.begin()); } else { runs(out->points_.begin(), thrust::make_discard_iterator(), thrust::make_discard_iterator()); } return out; } std::shared_ptr<PointCloud> PointCloud::PassThroughFilter(size_t axis_no, float min_bound, float max_bound) { auto out = std::make_shared<PointCloud>(); if (axis_no >= 3) { utility::LogError( "[PassThroughFilter] Illegal input parameters, axis_no " "must be 0, 1 or 2."); return out; } *out = *this; bool has_normal = HasNormals(); bool has_color = HasColors(); auto runs = [=, &points = out->points_] (auto&... params) { remove_if_vectors( utility::exec_policy(0), pass_through_filter_functor<typename std::remove_reference_t<decltype(params)>::value_type...>( axis_no, min_bound, max_bound), points, params...); }; if (has_normal && has_color) { runs(out->normals_, out->colors_); } else if (has_normal) { runs(out->normals_); } else if (has_color) { runs(out->colors_); } else { runs(); } return out; } } // namespace geometry } // namespace cupoch
a6c863f76737895975924678a97e34f33abbdfac.cu
/** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <thrust/gather.h> #include <thrust/iterator/discard_iterator.h> #include "cupoch/camera/pinhole_camera_intrinsic.h" #include "cupoch/geometry/boundingvolume.h" #include "cupoch/geometry/geometry_utils.h" #include "cupoch/geometry/image.h" #include "cupoch/knn/kdtree_flann.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/utility/console.h" #include "cupoch/utility/helper.h" #include "cupoch/utility/platform.h" namespace cupoch { namespace geometry { namespace { template <class... Args> struct check_nan_functor { check_nan_functor(bool remove_nan, bool remove_infinite) : remove_nan_(remove_nan), remove_infinite_(remove_infinite){}; const bool remove_nan_; const bool remove_infinite_; __device__ bool operator()(const thrust::tuple<Args...> &x) const { const Eigen::Vector3f &point = thrust::get<0>(x); bool is_nan = remove_nan_ && (isnan(point(0)) || isnan(point(1)) || isnan(point(2))); bool is_infinite = remove_infinite_ && (isinf(point(0)) || isinf(point(1)) || isinf(point(2))); return is_nan || is_infinite; } }; struct gaussian_filter_functor { gaussian_filter_functor(const Eigen::Vector3f *points, const Eigen::Vector3f *normals, const Eigen::Vector3f *colors, const int *indices, const float *dists, float sigma2, size_t num_max_search_points, bool has_normal, bool has_color) : points_(points), normals_(normals), colors_(colors), indices_(indices), dists_(dists), sigma2_(sigma2), num_max_search_points_(num_max_search_points), has_normal_(has_normal), has_color_(has_color){}; const Eigen::Vector3f *points_; const Eigen::Vector3f *normals_; const Eigen::Vector3f *colors_; const int *indices_; const float *dists_; const float sigma2_; const size_t num_max_search_points_; const bool has_normal_; const bool has_color_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector3f> operator()(size_t idx) const { float total_weight = 0.0; Eigen::Vector3f res_p = Eigen::Vector3f::Zero(); Eigen::Vector3f res_n = Eigen::Vector3f::Zero(); Eigen::Vector3f res_c = Eigen::Vector3f::Zero(); for (int i = 0; i < num_max_search_points_; ++i) { const int j = idx * num_max_search_points_ + i; const int idx_j = __ldg(&indices_[j]); if (idx_j >= 0) { float weight = exp(-0.5 * dists_[j] / sigma2_); res_p += weight * points_[idx_j]; if (has_normal_) res_n += weight * normals_[idx_j]; if (has_color_) res_c += weight * colors_[idx_j]; total_weight += weight; } } res_p /= total_weight; res_n /= total_weight; res_c /= total_weight; return thrust::make_tuple(res_p, res_n, res_c); } }; template <class... Args> struct pass_through_filter_functor { pass_through_filter_functor(int axis_no, float min_bound, float max_bound) : axis_no_(axis_no), min_bound_(min_bound), max_bound_(max_bound){}; const int axis_no_; const float min_bound_; const float max_bound_; __device__ bool operator()( const thrust::tuple<Eigen::Vector3f, Args...> &x) const { float val = thrust::get<0>(x)[axis_no_]; return val < min_bound_ || max_bound_ < val; } }; } // namespace PointCloud::PointCloud() : GeometryBase3D(Geometry::GeometryType::PointCloud) {} PointCloud::PointCloud(const thrust::host_vector<Eigen::Vector3f> &points) : GeometryBase3D(Geometry::GeometryType::PointCloud), points_(points) {} PointCloud::PointCloud(const std::vector<Eigen::Vector3f> &points) : GeometryBase3D(Geometry::GeometryType::PointCloud), points_(points) {} PointCloud::PointCloud(const utility::device_vector<Eigen::Vector3f> &points) : GeometryBase3D(Geometry::GeometryType::PointCloud), points_(points) {} PointCloud::PointCloud(const PointCloud &other) : GeometryBase3D(Geometry::GeometryType::PointCloud), points_(other.points_), normals_(other.normals_), colors_(other.colors_) {} PointCloud::~PointCloud() {} PointCloud &PointCloud::operator=(const PointCloud &other) { points_ = other.points_; normals_ = other.normals_; colors_ = other.colors_; return *this; } void PointCloud::SetPoints(const thrust::host_vector<Eigen::Vector3f> &points) { points_ = points; } thrust::host_vector<Eigen::Vector3f> PointCloud::GetPoints() const { thrust::host_vector<Eigen::Vector3f> points = points_; return points; } void PointCloud::SetNormals( const thrust::host_vector<Eigen::Vector3f> &normals) { normals_ = normals; } thrust::host_vector<Eigen::Vector3f> PointCloud::GetNormals() const { thrust::host_vector<Eigen::Vector3f> normals = normals_; return normals; } void PointCloud::SetColors(const thrust::host_vector<Eigen::Vector3f> &colors) { colors_ = colors; } thrust::host_vector<Eigen::Vector3f> PointCloud::GetColors() const { thrust::host_vector<Eigen::Vector3f> colors = colors_; return colors; } PointCloud &PointCloud::Clear() { points_.clear(); normals_.clear(); colors_.clear(); return *this; } bool PointCloud::IsEmpty() const { return !HasPoints(); } Eigen::Vector3f PointCloud::GetMinBound() const { return utility::ComputeMinBound<3>(points_); } Eigen::Vector3f PointCloud::GetMaxBound() const { return utility::ComputeMaxBound<3>(points_); } Eigen::Vector3f PointCloud::GetCenter() const { return utility::ComputeCenter<3>(points_); } AxisAlignedBoundingBox<3> PointCloud::GetAxisAlignedBoundingBox() const { return AxisAlignedBoundingBox<3>::CreateFromPoints(points_); } OrientedBoundingBox PointCloud::GetOrientedBoundingBox() const { return OrientedBoundingBox::CreateFromPoints(points_); } PointCloud &PointCloud::Translate(const Eigen::Vector3f &translation, bool relative) { TranslatePoints<3>(translation, points_, relative); return *this; } PointCloud &PointCloud::Scale(const float scale, bool center) { ScalePoints<3>(scale, points_, center); return *this; } PointCloud &PointCloud::Rotate(const Eigen::Matrix3f &R, bool center) { RotatePoints<3>(utility::GetStream(0), R, points_, center); RotateNormals(utility::GetStream(1), R, normals_); cudaSafeCall(cudaDeviceSynchronize()); return *this; } PointCloud &PointCloud::operator+=(const PointCloud &cloud) { // We do not use std::vector::insert to combine std::vector because it will // crash if the pointcloud is added to itself. if (cloud.IsEmpty()) return (*this); size_t old_vert_num = points_.size(); size_t add_vert_num = cloud.points_.size(); size_t new_vert_num = old_vert_num + add_vert_num; if ((!HasPoints() || HasNormals()) && cloud.HasNormals()) { normals_.resize(new_vert_num); thrust::copy(cloud.normals_.begin(), cloud.normals_.end(), normals_.begin() + old_vert_num); } else { normals_.clear(); } if ((!HasPoints() || HasColors()) && cloud.HasColors()) { colors_.resize(new_vert_num); thrust::copy(cloud.colors_.begin(), cloud.colors_.end(), colors_.begin() + old_vert_num); } else { colors_.clear(); } points_.resize(new_vert_num); thrust::copy(cloud.points_.begin(), cloud.points_.end(), points_.begin() + old_vert_num); return (*this); } PointCloud PointCloud::operator+(const PointCloud &cloud) const { return (PointCloud(*this) += cloud); } PointCloud &PointCloud::NormalizeNormals() { thrust::for_each(normals_.begin(), normals_.end(), [] __device__(Eigen::Vector3f & nl) { nl.normalize(); }); return *this; } PointCloud &PointCloud::PaintUniformColor(const Eigen::Vector3f &color) { ResizeAndPaintUniformColor(colors_, points_.size(), color); return *this; } PointCloud &PointCloud::Transform(const Eigen::Matrix4f &transformation) { TransformPoints<3>(utility::GetStream(0), transformation, points_); TransformNormals(utility::GetStream(1), transformation, normals_); cudaSafeCall(cudaDeviceSynchronize()); return *this; } std::shared_ptr<PointCloud> PointCloud::Crop( const AxisAlignedBoundingBox<3> &bbox) const { if (bbox.IsEmpty()) { utility::LogError( "[CropPointCloud] AxisAlignedBoundingBox either has zeros " "size, or has wrong bounds."); } return SelectByIndex(bbox.GetPointIndicesWithinBoundingBox(points_)); } std::shared_ptr<PointCloud> PointCloud::Crop( const OrientedBoundingBox &bbox) const { if (bbox.IsEmpty()) { utility::LogError( "[CropPointCloud] AxisAlignedBoundingBox either has zeros " "size, or has wrong bounds."); } return SelectByIndex(bbox.GetPointIndicesWithinBoundingBox(points_)); } PointCloud &PointCloud::RemoveNoneFinitePoints(bool remove_nan, bool remove_infinite) { bool has_normal = HasNormals(); bool has_color = HasColors(); size_t old_point_num = points_.size(); size_t k = 0; auto runs = [=] (auto&... params) { remove_if_vectors( utility::exec_policy(0), check_nan_functor<typename std::remove_reference_t<decltype(params)>::value_type...>(remove_nan, remove_infinite), params...); }; if (!has_normal && !has_color) { runs(points_); } else if (has_normal && !has_color) { runs(points_, normals_); } else if (!has_normal && has_color) { runs(points_, colors_); } else { runs(points_, normals_, colors_); } utility::LogDebug( "[RemoveNoneFinitePoints] {:d} nan points have been removed.", (int)(old_point_num - k)); return *this; } std::shared_ptr<PointCloud> PointCloud::GaussianFilter( float search_radius, float sigma2, size_t num_max_search_points) { auto out = std::make_shared<PointCloud>(); if (search_radius <= 0 || sigma2 <= 0 || num_max_search_points <= 0) { utility::LogError( "[GaussianFilter] Illegal input parameters, radius and sigma2 " "must be positive."); return out; } bool has_normal = HasNormals(); bool has_color = HasColors(); knn::KDTreeFlann kdtree; kdtree.SetRawData(ConvertVector3fVectorRef(*this)); utility::device_vector<int> indices; utility::device_vector<float> dist; kdtree.SearchRadius(points_, search_radius, num_max_search_points, indices, dist); size_t n_pt = points_.size(); out->points_.resize(n_pt); if (has_normal) out->normals_.resize(n_pt); if (has_color) out->colors_.resize(n_pt); gaussian_filter_functor func(thrust::raw_pointer_cast(points_.data()), thrust::raw_pointer_cast(normals_.data()), thrust::raw_pointer_cast(colors_.data()), thrust::raw_pointer_cast(indices.data()), thrust::raw_pointer_cast(dist.data()), sigma2, num_max_search_points, has_normal, has_color); auto runs = [size = points_.size(), &func] (auto&&... params) { thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(size), make_tuple_iterator(params...), func); }; if (has_normal && has_color) { runs(out->points_.begin(), out->normals_.begin(), out->colors_.begin()); } else if (has_normal) { runs(out->points_.begin(), out->normals_.begin(), thrust::make_discard_iterator()); } else if (has_color) { runs(out->points_.begin(), thrust::make_discard_iterator(), out->colors_.begin()); } else { runs(out->points_.begin(), thrust::make_discard_iterator(), thrust::make_discard_iterator()); } return out; } std::shared_ptr<PointCloud> PointCloud::PassThroughFilter(size_t axis_no, float min_bound, float max_bound) { auto out = std::make_shared<PointCloud>(); if (axis_no >= 3) { utility::LogError( "[PassThroughFilter] Illegal input parameters, axis_no " "must be 0, 1 or 2."); return out; } *out = *this; bool has_normal = HasNormals(); bool has_color = HasColors(); auto runs = [=, &points = out->points_] (auto&... params) { remove_if_vectors( utility::exec_policy(0), pass_through_filter_functor<typename std::remove_reference_t<decltype(params)>::value_type...>( axis_no, min_bound, max_bound), points, params...); }; if (has_normal && has_color) { runs(out->normals_, out->colors_); } else if (has_normal) { runs(out->normals_); } else if (has_color) { runs(out->colors_); } else { runs(); } return out; } } // namespace geometry } // namespace cupoch
81c9d5d3da5874c812f526796ad6674f9befb2eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! ******************* BEGIN Caffe Copyright Notice and Disclaimer **************** * * COPYRIGHT * * All contributions by the University of California: * Copyright (c) 2014-2017 The Regents of the University of California (Regents) * All rights reserved. * * All other contributions: * Copyright (c) 2014-2017, the respective contributors * All rights reserved. * * Caffe uses a shared copyright model: each contributor holds copyright over * their contributions to Caffe. The project versioning records all such * contribution and copyright details. If a contributor wants to further mark * their specific copyright on a particular contribution, they should indicate * their copyright solely in the commit message of the change when it is * committed. * * LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * CONTRIBUTION AGREEMENT * * By contributing to the BVLC/caffe repository through pull-request, comment, * or otherwise, the contributor releases their content to the * license and copyright terms herein. * ***************** END Caffe Copyright Notice and Disclaimer ******************** * * Copyright (c) 2017 Microsoft * Licensed under The Apache-2.0 License [see LICENSE for details] * \file deformable_im2col.cuh * \brief Function definitions of converting an image to * column matrix based on kernel, padding, dilation, and offset. * These functions are mainly used in deformable convolution operators. * \ref: https://arxiv.org/abs/1703.06211 * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai */ #include <hipcub/hipcub.hpp> #include <vector> #include "caffe2/core/common.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/deform_conv_op.h" #include "caffe2/operators/deform_conv_op_impl.h" namespace caffe2 { typedef TIndex index_t; typedef std::vector<TIndex> TShape; template <typename DType> __device__ DType deformable_im2col_bilinear( const DType* bottom_data, const int data_width, const int height, const int width, DType h, DType w) { int h_low = floor(h); int w_low = floor(w); int h_high; int w_high; if (h_low >= height - 1) { h_high = h_low = height - 1; h = (DType)h_low; } else { h_high = h_low + 1; } if (w_low >= width - 1) { w_high = w_low = width - 1; w = (DType)w_low; } else { w_high = w_low + 1; } DType lh = h - h_low; DType lw = w - w_low; DType hh = 1 - lh, hw = 1 - lw; DType v1 = bottom_data[h_low * data_width + w_low]; DType v2 = bottom_data[h_low * data_width + w_high]; DType v3 = bottom_data[h_high * data_width + w_low]; DType v4 = bottom_data[h_high * data_width + w_high]; DType w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; DType val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename DType> __device__ DType get_gradient_weight( DType argmax_h, DType argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) { // empty return 0; } argmax_h = max(argmax_h, (DType)0.0f); argmax_w = max(argmax_w, (DType)0.0f); int argmax_h_low = (int)argmax_h; int argmax_w_low = (int)argmax_w; int argmax_h_high; int argmax_w_high; if (argmax_h_low >= height - 1) { argmax_h_high = argmax_h_low = height - 1; argmax_h = (DType)argmax_h_low; } else { argmax_h_high = argmax_h_low + 1; } if (argmax_w_low >= width - 1) { argmax_w_high = argmax_w_low = width - 1; argmax_w = (DType)argmax_w_low; } else { argmax_w_high = argmax_w_low + 1; } DType weight = 0; if (h == argmax_h_low) { if (w == argmax_w_low) { weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); } else if (w == argmax_w_high) { weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); } } else if (h == argmax_h_high) { if (w == argmax_w_low) { weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); } else if (w == argmax_w_high) { weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); } } return weight; } template <typename DType> __device__ DType get_coordinate_weight( DType argmax_h, DType argmax_w, const int height, const int width, const DType* im_data, const int data_width, const int bp_dir) { if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) { // empty return 0; } if (argmax_h < 0) argmax_h = 0; if (argmax_w < 0) argmax_w = 0; int argmax_h_low = (int)argmax_h; int argmax_w_low = (int)argmax_w; int argmax_h_high; int argmax_w_high; if (argmax_h_low >= height - 1) { argmax_h_high = argmax_h_low = height - 1; argmax_h = (DType)argmax_h_low; } else { argmax_h_high = argmax_h_low + 1; } if (argmax_w_low >= width - 1) { argmax_w_high = argmax_w_low = width - 1; argmax_w = (DType)argmax_w_low; } else { argmax_w_high = argmax_w_low + 1; } DType weight = 0; if (bp_dir == 0) { weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } /*! * \brief deformable_im2col gpu kernel. * DO NOT call this directly. Use wrapper function im2col() instead; */ template <typename DType> __global__ void deformable_im2col_gpu_kernel( const int n, const DType* data_im, const DType* data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int height_col, const int width_col, DType* data_col) { CUDA_1D_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int c_im = (index / width_col) / height_col; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; DType* data_col_ptr = data_col + (c_col * height_col + h_col) * width_col + w_col; const DType* data_im_ptr = data_im + (c_im * height + h_in) * width + w_in; const DType* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const DType offset_h = data_offset_ptr[data_offset_h_ptr]; const DType offset_w = data_offset_ptr[data_offset_w_ptr]; DType val = static_cast<DType>(0); const DType h_im = h_in + i * dilation_h + offset_h; const DType w_im = w_in + j * dilation_w + offset_w; if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { const DType map_h = i * dilation_h + offset_h; const DType map_w = j * dilation_w + offset_w; const int cur_height = height - h_in; const int cur_width = width - w_in; val = deformable_im2col_bilinear( data_im_ptr, width, cur_height, cur_width, map_h, map_w); } *data_col_ptr = val; data_col_ptr += height_col * width_col; } } } } /*!\brief * cpu function of deformable_im2col algorithm * \param s device stream * \param data_im pointer of an image (C, H, W, ...) in the image batch * \param data_offset pointer of offset (C, H, W, ...) in the offset batch * \param im_shape input image shape in dimensions (N, C, H, W,) * \param col_shape column buffer shape (#channels, output_im_height, * output_im_width, ...) \param kernel_shape kernel filter shape \param pad pad * shape \param stride stride shape \param dilation dilation shape \param * deformable_group #offset group that deformable convolution use \param * data_col column buffer pointer */ template <typename DType, typename Context> void DeformConvOpBase<DType, Context>::DeformableIm2col( const DType* data_im, const DType* data_offset, const std::vector<TIndex>& im_shape, const std::vector<TIndex>& col_shape, DType* data_col) { CHECK_LT(2, CAFFE_CUDA_NUM_THREADS); CAFFE_ENFORCE_EQ(pad_t(), pad_b()); CAFFE_ENFORCE_EQ(pad_l(), pad_r()); const int pad_h = pad_t(); const int pad_w = pad_l(); index_t channel_per_deformable_group = im_shape[1] / deformable_group_; index_t num_kernels = im_shape[1] * size_from_dim_(1, col_shape); hipLaunchKernelGGL(( deformable_im2col_gpu_kernel<DType>) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), num_kernels, data_im, data_offset, im_shape[2], im_shape[3], kernel_h(), kernel_w(), pad_h, pad_w, stride_h(), stride_w(), dilation_h(), dilation_w(), channel_per_deformable_group, col_shape[1], col_shape[2], data_col); } /*! * \brief deformable_col2im gpu kernel. * \brief DO NOT call this directly. Use wrapper function deformable_col2im() * instead; */ template <typename DType> __global__ void deformable_col2im_gpu_kernel( const int n, const DType* data_col, const DType* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int height_col, const int width_col, DType* grad_im) { CUDA_1D_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col) % kernel_w; const int i = (index / width_col / height_col / kernel_w) % kernel_h; const int c = index / width_col / height_col / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const DType* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const DType offset_h = data_offset_ptr[data_offset_h_ptr]; const DType offset_w = data_offset_ptr[data_offset_w_ptr]; const DType cur_inv_h_data = h_in + i * dilation_h + offset_h; const DType cur_inv_w_data = w_in + j * dilation_w + offset_w; const DType cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = (c * height + cur_h + dy) * width + cur_w + dx; DType weight = get_gradient_weight( cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } /*!\brief * gpu function of deformable_col2im algorithm * \param s device stream * \param data_col start pointer of the column buffer to be filled * \param data_offset pointer of offset (C, H, W, ...) in the offset batch * \param im_shape input image shape in dimensions (N, C, H, W,) * \param col_shape column buffer shape * \param kernel_shape kernel filter shape * \param pad pad shape * \param stride stride shape * \param dilation dilation shape * \param deformable_group #offset group that deformable convolution use * \param grad_im pointer of a image (C, H, W,...) in the image batch */ template <typename DType, typename Context> void DeformConvOpBase<DType, Context>::DeformableCol2im( const DType* data_col, const DType* data_offset, const std::vector<TIndex>& im_shape, const std::vector<TIndex>& col_shape, DType* grad_im) { CAFFE_ENFORCE_EQ(pad_t(), pad_b()); CAFFE_ENFORCE_EQ(pad_l(), pad_r()); const int pad_h = pad_t(); const int pad_w = pad_l(); index_t im_size = size_from_dim_(1, im_shape); index_t channel_per_deformable_group = im_shape[1] / deformable_group_; index_t num_kernels = size_from_dim_(0, col_shape); // num_axes should be smaller than block size CHECK_LT(2, CAFFE_CUDA_NUM_THREADS); // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( deformable_col2im_gpu_kernel<DType>) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), num_kernels, data_col, data_offset, im_shape[1], im_shape[2], im_shape[3], kernel_h(), kernel_w(), pad_h, pad_w, stride_h(), stride_w(), dilation_h(), dilation_w(), channel_per_deformable_group, col_shape[1], col_shape[2], grad_im); } /*! * \brief deformable_col2im_coord gpu kernel. * \brief DO NOT call this directly. Use wrapper function * deformable_col2im_coord() instead; */ template <typename DType> __global__ void deformable_col2im_coord_gpu_kernel( const int n, const DType* data_col, const DType* data_im, const DType* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int height_col, const int width_col, DType* grad_offset) { CUDA_1D_KERNEL_LOOP(index, n) { DType val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = index / width_col / height_col; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const DType* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * width_col * height_col; const DType* data_im_ptr = data_im + deformable_group_index * channel_per_deformable_group / kernel_h / kernel_w * height * width; const DType* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = ((col_c * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col) % kernel_w; int i = (col_pos / width_col / height_col / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const DType offset_h = data_offset_ptr[data_offset_h_ptr]; const DType offset_w = data_offset_ptr[data_offset_w_ptr]; DType inv_h = h_in + i * dilation_h + offset_h; DType inv_w = w_in + j * dilation_w + offset_w; if (inv_h < 0 || inv_w < 0 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -1; } const DType weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } /*!\brief * gpu function of deformable_col2im_coord algorithm * \param s device stream * \param data_col start pointer of the column buffer to be filled * \param data_im pointer of an image (C, H, W, ...) in the image batch * \param data_offset pointer of offset (C, H, W, ...) in the offset batch * \param im_shape input image shape in dimensions (N, C, H, W,) * \param col_shape column buffer shape * \param kernel_shape kernel filter shape * \param pad pad shape * \param stride stride shape * \param dilation dilation shape * \param deformable_group #offset group that deformable convolution use * \param grad_offset pointer of the offset (C, H, W,...) in the offset batch */ template <typename DType, typename Context> void DeformConvOpBase<DType, Context>::DeformableCol2imCoord( const DType* data_col, const DType* data_im, const DType* data_offset, const std::vector<TIndex>& im_shape, const std::vector<TIndex>& col_shape, DType* grad_offset) { CAFFE_ENFORCE_EQ(pad_t(), pad_b()); CAFFE_ENFORCE_EQ(pad_l(), pad_r()); const int pad_h = pad_t(); const int pad_w = pad_l(); index_t num_kernels = col_shape[1] * col_shape[2] * 2 * kernel_h() * kernel_w() * deformable_group_; index_t channel_per_deformable_group = col_shape[0] / deformable_group_; // num_axes should be smaller than block size CHECK_LT(2, CAFFE_CUDA_NUM_THREADS); // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( deformable_col2im_coord_gpu_kernel<DType>) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), num_kernels, data_col, data_im, data_offset, im_shape[1], im_shape[2], im_shape[3], kernel_h(), kernel_w(), pad_h, pad_w, stride_h(), stride_w(), dilation_h(), dilation_w(), channel_per_deformable_group, col_shape[1], col_shape[2], grad_offset); } REGISTER_CUDA_OPERATOR(DeformConv, DeformConvOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( DeformConvGradient, DeformConvGradientOp<float, CUDAContext>); } // namespace caffe2
81c9d5d3da5874c812f526796ad6674f9befb2eb.cu
/*! ******************* BEGIN Caffe Copyright Notice and Disclaimer **************** * * COPYRIGHT * * All contributions by the University of California: * Copyright (c) 2014-2017 The Regents of the University of California (Regents) * All rights reserved. * * All other contributions: * Copyright (c) 2014-2017, the respective contributors * All rights reserved. * * Caffe uses a shared copyright model: each contributor holds copyright over * their contributions to Caffe. The project versioning records all such * contribution and copyright details. If a contributor wants to further mark * their specific copyright on a particular contribution, they should indicate * their copyright solely in the commit message of the change when it is * committed. * * LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * CONTRIBUTION AGREEMENT * * By contributing to the BVLC/caffe repository through pull-request, comment, * or otherwise, the contributor releases their content to the * license and copyright terms herein. * ***************** END Caffe Copyright Notice and Disclaimer ******************** * * Copyright (c) 2017 Microsoft * Licensed under The Apache-2.0 License [see LICENSE for details] * \file deformable_im2col.cuh * \brief Function definitions of converting an image to * column matrix based on kernel, padding, dilation, and offset. * These functions are mainly used in deformable convolution operators. * \ref: https://arxiv.org/abs/1703.06211 * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai */ #include <cub/block/block_reduce.cuh> #include <vector> #include "caffe2/core/common.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/deform_conv_op.h" #include "caffe2/operators/deform_conv_op_impl.h" namespace caffe2 { typedef TIndex index_t; typedef std::vector<TIndex> TShape; template <typename DType> __device__ DType deformable_im2col_bilinear( const DType* bottom_data, const int data_width, const int height, const int width, DType h, DType w) { int h_low = floor(h); int w_low = floor(w); int h_high; int w_high; if (h_low >= height - 1) { h_high = h_low = height - 1; h = (DType)h_low; } else { h_high = h_low + 1; } if (w_low >= width - 1) { w_high = w_low = width - 1; w = (DType)w_low; } else { w_high = w_low + 1; } DType lh = h - h_low; DType lw = w - w_low; DType hh = 1 - lh, hw = 1 - lw; DType v1 = bottom_data[h_low * data_width + w_low]; DType v2 = bottom_data[h_low * data_width + w_high]; DType v3 = bottom_data[h_high * data_width + w_low]; DType v4 = bottom_data[h_high * data_width + w_high]; DType w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; DType val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename DType> __device__ DType get_gradient_weight( DType argmax_h, DType argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) { // empty return 0; } argmax_h = max(argmax_h, (DType)0.0f); argmax_w = max(argmax_w, (DType)0.0f); int argmax_h_low = (int)argmax_h; int argmax_w_low = (int)argmax_w; int argmax_h_high; int argmax_w_high; if (argmax_h_low >= height - 1) { argmax_h_high = argmax_h_low = height - 1; argmax_h = (DType)argmax_h_low; } else { argmax_h_high = argmax_h_low + 1; } if (argmax_w_low >= width - 1) { argmax_w_high = argmax_w_low = width - 1; argmax_w = (DType)argmax_w_low; } else { argmax_w_high = argmax_w_low + 1; } DType weight = 0; if (h == argmax_h_low) { if (w == argmax_w_low) { weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); } else if (w == argmax_w_high) { weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); } } else if (h == argmax_h_high) { if (w == argmax_w_low) { weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); } else if (w == argmax_w_high) { weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); } } return weight; } template <typename DType> __device__ DType get_coordinate_weight( DType argmax_h, DType argmax_w, const int height, const int width, const DType* im_data, const int data_width, const int bp_dir) { if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) { // empty return 0; } if (argmax_h < 0) argmax_h = 0; if (argmax_w < 0) argmax_w = 0; int argmax_h_low = (int)argmax_h; int argmax_w_low = (int)argmax_w; int argmax_h_high; int argmax_w_high; if (argmax_h_low >= height - 1) { argmax_h_high = argmax_h_low = height - 1; argmax_h = (DType)argmax_h_low; } else { argmax_h_high = argmax_h_low + 1; } if (argmax_w_low >= width - 1) { argmax_w_high = argmax_w_low = width - 1; argmax_w = (DType)argmax_w_low; } else { argmax_w_high = argmax_w_low + 1; } DType weight = 0; if (bp_dir == 0) { weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } /*! * \brief deformable_im2col gpu kernel. * DO NOT call this directly. Use wrapper function im2col() instead; */ template <typename DType> __global__ void deformable_im2col_gpu_kernel( const int n, const DType* data_im, const DType* data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int height_col, const int width_col, DType* data_col) { CUDA_1D_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int c_im = (index / width_col) / height_col; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; DType* data_col_ptr = data_col + (c_col * height_col + h_col) * width_col + w_col; const DType* data_im_ptr = data_im + (c_im * height + h_in) * width + w_in; const DType* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const DType offset_h = data_offset_ptr[data_offset_h_ptr]; const DType offset_w = data_offset_ptr[data_offset_w_ptr]; DType val = static_cast<DType>(0); const DType h_im = h_in + i * dilation_h + offset_h; const DType w_im = w_in + j * dilation_w + offset_w; if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { const DType map_h = i * dilation_h + offset_h; const DType map_w = j * dilation_w + offset_w; const int cur_height = height - h_in; const int cur_width = width - w_in; val = deformable_im2col_bilinear( data_im_ptr, width, cur_height, cur_width, map_h, map_w); } *data_col_ptr = val; data_col_ptr += height_col * width_col; } } } } /*!\brief * cpu function of deformable_im2col algorithm * \param s device stream * \param data_im pointer of an image (C, H, W, ...) in the image batch * \param data_offset pointer of offset (C, H, W, ...) in the offset batch * \param im_shape input image shape in dimensions (N, C, H, W,) * \param col_shape column buffer shape (#channels, output_im_height, * output_im_width, ...) \param kernel_shape kernel filter shape \param pad pad * shape \param stride stride shape \param dilation dilation shape \param * deformable_group #offset group that deformable convolution use \param * data_col column buffer pointer */ template <typename DType, typename Context> void DeformConvOpBase<DType, Context>::DeformableIm2col( const DType* data_im, const DType* data_offset, const std::vector<TIndex>& im_shape, const std::vector<TIndex>& col_shape, DType* data_col) { CHECK_LT(2, CAFFE_CUDA_NUM_THREADS); CAFFE_ENFORCE_EQ(pad_t(), pad_b()); CAFFE_ENFORCE_EQ(pad_l(), pad_r()); const int pad_h = pad_t(); const int pad_w = pad_l(); index_t channel_per_deformable_group = im_shape[1] / deformable_group_; index_t num_kernels = im_shape[1] * size_from_dim_(1, col_shape); deformable_im2col_gpu_kernel<DType> <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( num_kernels, data_im, data_offset, im_shape[2], im_shape[3], kernel_h(), kernel_w(), pad_h, pad_w, stride_h(), stride_w(), dilation_h(), dilation_w(), channel_per_deformable_group, col_shape[1], col_shape[2], data_col); } /*! * \brief deformable_col2im gpu kernel. * \brief DO NOT call this directly. Use wrapper function deformable_col2im() * instead; */ template <typename DType> __global__ void deformable_col2im_gpu_kernel( const int n, const DType* data_col, const DType* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int height_col, const int width_col, DType* grad_im) { CUDA_1D_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col) % kernel_w; const int i = (index / width_col / height_col / kernel_w) % kernel_h; const int c = index / width_col / height_col / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const DType* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const DType offset_h = data_offset_ptr[data_offset_h_ptr]; const DType offset_w = data_offset_ptr[data_offset_w_ptr]; const DType cur_inv_h_data = h_in + i * dilation_h + offset_h; const DType cur_inv_w_data = w_in + j * dilation_w + offset_w; const DType cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = (c * height + cur_h + dy) * width + cur_w + dx; DType weight = get_gradient_weight( cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } /*!\brief * gpu function of deformable_col2im algorithm * \param s device stream * \param data_col start pointer of the column buffer to be filled * \param data_offset pointer of offset (C, H, W, ...) in the offset batch * \param im_shape input image shape in dimensions (N, C, H, W,) * \param col_shape column buffer shape * \param kernel_shape kernel filter shape * \param pad pad shape * \param stride stride shape * \param dilation dilation shape * \param deformable_group #offset group that deformable convolution use * \param grad_im pointer of a image (C, H, W,...) in the image batch */ template <typename DType, typename Context> void DeformConvOpBase<DType, Context>::DeformableCol2im( const DType* data_col, const DType* data_offset, const std::vector<TIndex>& im_shape, const std::vector<TIndex>& col_shape, DType* grad_im) { CAFFE_ENFORCE_EQ(pad_t(), pad_b()); CAFFE_ENFORCE_EQ(pad_l(), pad_r()); const int pad_h = pad_t(); const int pad_w = pad_l(); index_t im_size = size_from_dim_(1, im_shape); index_t channel_per_deformable_group = im_shape[1] / deformable_group_; index_t num_kernels = size_from_dim_(0, col_shape); // num_axes should be smaller than block size CHECK_LT(2, CAFFE_CUDA_NUM_THREADS); // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) deformable_col2im_gpu_kernel<DType> <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( num_kernels, data_col, data_offset, im_shape[1], im_shape[2], im_shape[3], kernel_h(), kernel_w(), pad_h, pad_w, stride_h(), stride_w(), dilation_h(), dilation_w(), channel_per_deformable_group, col_shape[1], col_shape[2], grad_im); } /*! * \brief deformable_col2im_coord gpu kernel. * \brief DO NOT call this directly. Use wrapper function * deformable_col2im_coord() instead; */ template <typename DType> __global__ void deformable_col2im_coord_gpu_kernel( const int n, const DType* data_col, const DType* data_im, const DType* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int height_col, const int width_col, DType* grad_offset) { CUDA_1D_KERNEL_LOOP(index, n) { DType val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = index / width_col / height_col; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const DType* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * width_col * height_col; const DType* data_im_ptr = data_im + deformable_group_index * channel_per_deformable_group / kernel_h / kernel_w * height * width; const DType* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = ((col_c * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col) % kernel_w; int i = (col_pos / width_col / height_col / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const DType offset_h = data_offset_ptr[data_offset_h_ptr]; const DType offset_w = data_offset_ptr[data_offset_w_ptr]; DType inv_h = h_in + i * dilation_h + offset_h; DType inv_w = w_in + j * dilation_w + offset_w; if (inv_h < 0 || inv_w < 0 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -1; } const DType weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } /*!\brief * gpu function of deformable_col2im_coord algorithm * \param s device stream * \param data_col start pointer of the column buffer to be filled * \param data_im pointer of an image (C, H, W, ...) in the image batch * \param data_offset pointer of offset (C, H, W, ...) in the offset batch * \param im_shape input image shape in dimensions (N, C, H, W,) * \param col_shape column buffer shape * \param kernel_shape kernel filter shape * \param pad pad shape * \param stride stride shape * \param dilation dilation shape * \param deformable_group #offset group that deformable convolution use * \param grad_offset pointer of the offset (C, H, W,...) in the offset batch */ template <typename DType, typename Context> void DeformConvOpBase<DType, Context>::DeformableCol2imCoord( const DType* data_col, const DType* data_im, const DType* data_offset, const std::vector<TIndex>& im_shape, const std::vector<TIndex>& col_shape, DType* grad_offset) { CAFFE_ENFORCE_EQ(pad_t(), pad_b()); CAFFE_ENFORCE_EQ(pad_l(), pad_r()); const int pad_h = pad_t(); const int pad_w = pad_l(); index_t num_kernels = col_shape[1] * col_shape[2] * 2 * kernel_h() * kernel_w() * deformable_group_; index_t channel_per_deformable_group = col_shape[0] / deformable_group_; // num_axes should be smaller than block size CHECK_LT(2, CAFFE_CUDA_NUM_THREADS); // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) deformable_col2im_coord_gpu_kernel<DType> <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( num_kernels, data_col, data_im, data_offset, im_shape[1], im_shape[2], im_shape[3], kernel_h(), kernel_w(), pad_h, pad_w, stride_h(), stride_w(), dilation_h(), dilation_w(), channel_per_deformable_group, col_shape[1], col_shape[2], grad_offset); } REGISTER_CUDA_OPERATOR(DeformConv, DeformConvOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( DeformConvGradient, DeformConvGradientOp<float, CUDAContext>); } // namespace caffe2
064ffa52945209bbbce4101228aeaf0599d8c744.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> #include <stdio.h> #include <cstdint> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <chrono> #define DEBUG_1D 0 #define DEBUG_THREAD_INFO_FLOAT32 0 #define DEBUG_THREAD_INFO_INT32 0 #define DEBUG_BITS 0 #define DEBUG_SEEDS 0 template <typename scalar_t> __global__ void binarize_kernel( torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> input) { // handle access indices const int c = blockIdx.x * blockDim.x + threadIdx.x; const int d = blockIdx.y * blockDim.y + threadIdx.y; const int e = blockIdx.z * blockDim.z + threadIdx.z; // Python version //output[input > 0] = 1 //output[input <= 0] = -1 if ((c < input.size(0)) && (d < input.size(1)) && (e < input.size(2))) { if (input[c][d][e] > 0) { input[c][d][e] = 1; } else { input[c][d][e] = -1; } } } std::vector<torch::Tensor> binarize_cuda(torch::Tensor input) { // The number of thread blocks in a grid is usually dictated by the size of the data being processed, which typically exceeds the number of processors in the system. // dim3 threadsPerBlock(8,8,8) // <<<number of blocks per grid, number of threads ber block>>> // grid is created with enough blocks to have one thread per matrix element // https://devtalk.nvidia.com/default/topic/1028226/how-many-concurrent-threads-are-running-on-my-geforce-gtx-1080-ti-/ const int input_size_x = input.size(0); const int input_size_y = input.size(1); const int input_size_z = input.size(2); int threads_x = 8; // per block, 8 int threads_y = 8; // per block, 8 int threads_z = 8; // per block, 8 #if DEBUG_1D threads_x = 1; threads_y = 1; threads_z = 1; #endif const dim3 threads(threads_x,threads_y, threads_z); const dim3 blocks((input_size_x + threads_x - 1) / threads_x, (input_size_y + threads_y - 1) / threads_y, (input_size_z + threads_z - 1) / threads_z); AT_DISPATCH_ALL_TYPES(input.type(), "binarize_cuda", ([&] { hipLaunchKernelGGL(( binarize_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, input.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>() ); })); return {input}; }
064ffa52945209bbbce4101228aeaf0599d8c744.cu
#include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> #include <stdio.h> #include <cstdint> #include <curand.h> #include <curand_kernel.h> #include <chrono> #define DEBUG_1D 0 #define DEBUG_THREAD_INFO_FLOAT32 0 #define DEBUG_THREAD_INFO_INT32 0 #define DEBUG_BITS 0 #define DEBUG_SEEDS 0 template <typename scalar_t> __global__ void binarize_kernel( torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> input) { // handle access indices const int c = blockIdx.x * blockDim.x + threadIdx.x; const int d = blockIdx.y * blockDim.y + threadIdx.y; const int e = blockIdx.z * blockDim.z + threadIdx.z; // Python version //output[input > 0] = 1 //output[input <= 0] = -1 if ((c < input.size(0)) && (d < input.size(1)) && (e < input.size(2))) { if (input[c][d][e] > 0) { input[c][d][e] = 1; } else { input[c][d][e] = -1; } } } std::vector<torch::Tensor> binarize_cuda(torch::Tensor input) { // The number of thread blocks in a grid is usually dictated by the size of the data being processed, which typically exceeds the number of processors in the system. // dim3 threadsPerBlock(8,8,8) // <<<number of blocks per grid, number of threads ber block>>> // grid is created with enough blocks to have one thread per matrix element // https://devtalk.nvidia.com/default/topic/1028226/how-many-concurrent-threads-are-running-on-my-geforce-gtx-1080-ti-/ const int input_size_x = input.size(0); const int input_size_y = input.size(1); const int input_size_z = input.size(2); int threads_x = 8; // per block, 8 int threads_y = 8; // per block, 8 int threads_z = 8; // per block, 8 #if DEBUG_1D threads_x = 1; threads_y = 1; threads_z = 1; #endif const dim3 threads(threads_x,threads_y, threads_z); const dim3 blocks((input_size_x + threads_x - 1) / threads_x, (input_size_y + threads_y - 1) / threads_y, (input_size_z + threads_z - 1) / threads_z); AT_DISPATCH_ALL_TYPES(input.type(), "binarize_cuda", ([&] { binarize_kernel<scalar_t><<<blocks, threads>>>( input.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>() ); })); return {input}; }
11eca49197273ebd5522d761a7ecbe360859c61b.hip
// !!! This is a file automatically generated by hipify!!! /** * \file dnn/src/cuda/conv_bias/cutlass_convolution_wrapper.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. */ // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #if !MEGDNN_TEGRA_X1 #include "cutlass/convolution/device/convolution.h" #endif #include "src/common/opr_param_defs_enumv.cuh" #include "src/cuda/conv_bias/cutlass_convolution_wrapper.cuh" #pragma GCC diagnostic pop using namespace megdnn; using namespace cuda; using namespace cutlass_wrapper; /* ================= cutlass kernel wrapper for nchw32 layout ================ */ #if MEGDNN_TEGRA_X1 template <bool NeedLoadFromConstMem> void megdnn::cuda::cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_imma_ncdiv32hw32( const int8_t* /* d_src */, const int8_t* /* d_filter */, const int32_t* /* d_bias */, const int8_t* /* d_z */, int8_t* /* d_dst */, int* /* workspace */, const convolution::ConvParam& /* param */, uint32_t /* nonlinear_mode */, float /* alpha */, float /* beta */, float /* gamma */, float /* scale */, const GemmCoord& /* threadblock_shape */, const GemmCoord& /* warp_shape */, hipStream_t /* stream */) {} #else template <bool NeedLoadFromConstMem> void megdnn::cuda::cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_imma_ncdiv32hw32( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, const convolution::ConvParam& param, uint32_t nonlinear_mode, float alpha, float beta, float gamma, float scale, const GemmCoord& threadblock_shape, const GemmCoord& warp_shape, hipStream_t stream) { #define DISPATCH_KERNEL_WITH_TILE_SHAPE(threadblock_m_, threadblock_n_, \ threadblock_k_, warp_m_, warp_n_, \ warp_k_) \ if (threadblock_shape.m() == threadblock_m_ && \ threadblock_shape.n() == threadblock_n_ && \ threadblock_shape.k() == threadblock_k_ && \ warp_shape.m() == warp_m_ && warp_shape.n() == warp_n_ && \ warp_shape.k() == warp_k_) { \ using ThreadBlockShape = \ cutlass::gemm::GemmShape<threadblock_m_, threadblock_n_, \ threadblock_k_>; \ using WarpShape = cutlass::gemm::GemmShape<warp_m_, warp_n_, warp_k_>; \ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; \ using Convolution = cutlass::conv::device::Convolution< \ int8_t, cutlass::layout::TensorNCxHWx<32>, int8_t, \ cutlass::layout::TensorCxRSKx<32>, ElementOutput, \ cutlass::layout::TensorNCxHWx<32>, int32_t, \ cutlass::layout::TensorNCxHWx<32>, int32_t, \ cutlass::conv::ConvType::kConvolution, \ cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, \ ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, \ cutlass::conv::threadblock:: \ ConvolutionFpropNCxHWxThreadblockSwizzle, \ 2, 16, 16, NeedLoadFromConstMem>; \ typename Convolution::ConvolutionParameter conv_param( \ param.n, param.hi, param.wi, param.ci, param.co, param.fh, \ param.fw, param.ho, param.wo, param.ph, param.pw, param.sh, \ param.sw, 1, 1, cutlass::conv::Mode::kCrossCorrelation); \ return cutlass_convolution_wrapper<Convolution>( \ d_src, d_filter, d_bias, d_z, d_dst, workspace, conv_param, \ epilogue, stream); \ } #define DISPATCH_KERNEL \ DISPATCH_KERNEL_WITH_TILE_SHAPE(256, 128, 64, 64, 64, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 256, 64, 64, 64, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 128, 64, 64, 64, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 128, 64, 32, 64, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 64, 64, 32, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 64, 64, 32, 32, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 64, 64, 32, 16, 64); \ megdnn_assert(false, \ "unsupported threadblock shape (%dx%dx%d) and warp shape " \ "(%dx%dx%d)", \ threadblock_shape.m(), threadblock_shape.n(), \ threadblock_shape.k(), warp_shape.m(), warp_shape.n(), \ warp_shape.k()); using ElementOutput = int8_t; using ElementAccumulator = int32_t; using ElementBias = int32_t; using ElementCompute = float; using NonlineMode = megdnn::param_enumv::ConvBias::NonlineMode; switch (nonlinear_mode) { case NonlineMode::IDENTITY: { using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< ElementOutput, 8, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma}; DISPATCH_KERNEL; } case NonlineMode::RELU: { using EpilogueOp = cutlass::epilogue::thread:: BiasAddLinearCombinationReluClamp< ElementOutput, 8, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma, 0}; DISPATCH_KERNEL; } case NonlineMode::H_SWISH: { using EpilogueOp = cutlass::epilogue::thread:: BiasAddLinearCombinationHSwishClamp< ElementOutput, 8, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma, scale}; DISPATCH_KERNEL; } default: megdnn_assert(false, "unsupported nonlinear mode for conv bias operator"); } #undef DISPATCH_KERNEL_WITH_TILE_SHAPE #undef DISPATCH_KERNEL } #endif #define INST(need_load_from_const_mem) \ template void megdnn::cuda::cutlass_wrapper:: \ do_conv_bias_int8_implicit_gemm_imma_ncdiv32hw32< \ need_load_from_const_mem>( \ const int8_t* d_src, const int8_t* d_filter, \ const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, \ int* workspace, const convolution::ConvParam& param, \ uint32_t nonlinear_mode, float alpha, float beta, \ float gamma, float scale, \ const GemmCoord& threadblock_shape, \ const GemmCoord& warp_shape, hipStream_t stream); INST(true); INST(false); #undef INST /* ==== cutlass kernel wrapper for nchw32 layout and nchw4 output ===== */ #if MEGDNN_TEGRA_X1 template <bool NeedLoadFromConstMem> void megdnn::cuda::cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_ncdiv4hw4( const int8_t* /* d_src */, const int8_t* /* d_filter */, const int32_t* /* d_bias */, const int8_t* /* d_z */, int8_t* /* d_dst */, int* /* workspace */, const convolution::ConvParam& /* param */, uint32_t /* nonlinear_mode */, float /* alpha */, float /* beta */, float /* gamma */, float /* scale */, const GemmCoord& /* threadblock_shape */, const GemmCoord& /* warp_shape */, hipStream_t /* stream */) {} #else template <bool NeedLoadFromConstMem> void megdnn::cuda::cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_ncdiv4hw4( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, const convolution::ConvParam& param, uint32_t nonlinear_mode, float alpha, float beta, float gamma, float scale, const GemmCoord& threadblock_shape, const GemmCoord& warp_shape, hipStream_t stream) { #define DISPATCH_KERNEL_WITH_TILE_SHAPE(threadblock_m_, threadblock_n_, \ threadblock_k_, warp_m_, warp_n_, \ warp_k_) \ if (threadblock_shape.m() == threadblock_m_ && \ threadblock_shape.n() == threadblock_n_ && \ threadblock_shape.k() == threadblock_k_ && \ warp_shape.m() == warp_m_ && warp_shape.n() == warp_n_ && \ warp_shape.k() == warp_k_) { \ using ThreadBlockShape = \ cutlass::gemm::GemmShape<threadblock_m_, threadblock_n_, \ threadblock_k_>; \ using WarpShape = cutlass::gemm::GemmShape<warp_m_, warp_n_, warp_k_>; \ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; \ using Convolution = cutlass::conv::device::Convolution< \ int8_t, cutlass::layout::TensorNCxHWx<32>, int8_t, \ cutlass::layout::TensorCxRSKx<32>, ElementOutput, \ cutlass::layout::TensorNCxHWx<4>, int32_t, \ cutlass::layout::TensorNCxHWx<4>, int32_t, \ cutlass::conv::ConvType::kConvolution, \ cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, \ ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, \ cutlass::conv::threadblock:: \ ConvolutionFpropNCxHWxThreadblockSwizzle, \ 2, 16, 16, NeedLoadFromConstMem>; \ typename Convolution::ConvolutionParameter conv_param( \ param.n, param.hi, param.wi, param.ci, param.co, param.fh, \ param.fw, param.ho, param.wo, param.ph, param.pw, param.sh, \ param.sw, 1, 1, cutlass::conv::Mode::kCrossCorrelation); \ return cutlass_convolution_wrapper<Convolution>( \ d_src, d_filter, d_bias, d_z, d_dst, workspace, conv_param, \ epilogue, stream); \ } #define DISPATCH_KERNEL \ DISPATCH_KERNEL_WITH_TILE_SHAPE(256, 128, 64, 64, 64, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 256, 64, 64, 64, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 128, 64, 64, 64, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 128, 64, 32, 64, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 64, 64, 32, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 64, 64, 32, 32, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 64, 64, 16, 32, 64); \ megdnn_assert(false, \ "unsupported threadblock shape (%dx%dx%d) and warp shape " \ "(%dx%dx%d)", \ threadblock_shape.m(), threadblock_shape.n(), \ threadblock_shape.k(), warp_shape.m(), warp_shape.n(), \ warp_shape.k()); using ElementOutput = int8_t; using ElementAccumulator = int32_t; using ElementBias = int32_t; using ElementCompute = float; using NonlineMode = megdnn::param_enumv::ConvBias::NonlineMode; switch (nonlinear_mode) { case NonlineMode::IDENTITY: { using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< ElementOutput, 4, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma}; DISPATCH_KERNEL; } case NonlineMode::RELU: { using EpilogueOp = cutlass::epilogue::thread:: BiasAddLinearCombinationReluClamp< ElementOutput, 4, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma, 0}; DISPATCH_KERNEL; } case NonlineMode::H_SWISH: { using EpilogueOp = cutlass::epilogue::thread:: BiasAddLinearCombinationHSwishClamp< ElementOutput, 4, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma, scale}; DISPATCH_KERNEL; } default: megdnn_assert(false, "unsupported nonlinear mode for conv bias operator"); } #undef DISPATCH_KERNEL_WITH_TILE_SHAPE #undef DISPATCH_KERNEL } #endif #define INST(need_load_from_const_mem) \ template void megdnn::cuda::cutlass_wrapper:: \ do_conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_ncdiv4hw4< \ need_load_from_const_mem>( \ const int8_t* d_src, const int8_t* d_filter, \ const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, \ int* workspace, const convolution::ConvParam& param, \ uint32_t nonlinear_mode, float alpha, float beta, \ float gamma, float scale, \ const GemmCoord& threadblock_shape, \ const GemmCoord& warp_shape, hipStream_t stream); INST(true); INST(false); #undef INST /* ================ cutlass kernel wrapper for nchw4 layout ================= */ #if MEGDNN_TEGRA_X1 template <bool NeedLoadFromConstMem> void megdnn::cuda::cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4( const int8_t* /* d_src */, const int8_t* /* d_filter */, const int32_t* /* d_bias */, const int8_t* /* d_z */, int8_t* /* d_dst */, int* /* workspace */, const convolution::ConvParam& /* param */, uint32_t /* nonlinear_mode */, float /* alpha */, float /* beta */, float /* gamma */, float /* scale */, const GemmCoord& /* threadblock_shape */, const GemmCoord& /* warp_shape */, int /* stages */, hipStream_t /* stream */) {} #else template <bool NeedLoadFromConstMem> void megdnn::cuda::cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, const convolution::ConvParam& param, uint32_t nonlinear_mode, float alpha, float beta, float gamma, float scale, const GemmCoord& threadblock_shape, const GemmCoord& warp_shape, int stages, hipStream_t stream) { #define DISPATCH_KERNEL_WITH_TILE_SHAPE(threadblock_m_, threadblock_n_, \ threadblock_k_, warp_m_, warp_n_, \ warp_k_, stage_, aligned_) \ if (threadblock_shape.m() == threadblock_m_ && \ threadblock_shape.n() == threadblock_n_ && \ threadblock_shape.k() == threadblock_k_ && \ warp_shape.m() == warp_m_ && warp_shape.n() == warp_n_ && \ warp_shape.k() == warp_k_ && stages == stage_) { \ using ThreadBlockShape = \ cutlass::gemm::GemmShape<threadblock_m_, threadblock_n_, \ threadblock_k_>; \ using WarpShape = cutlass::gemm::GemmShape<warp_m_, warp_n_, warp_k_>; \ using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; \ using Convolution = cutlass::conv::device::Convolution< \ int8_t, cutlass::layout::TensorNCxHWx<4>, int8_t, \ cutlass::layout::TensorCxRSKx<4>, ElementOutput, \ cutlass::layout::TensorNCxHWx<4>, int32_t, \ cutlass::layout::TensorNCxHWx<4>, int32_t, \ cutlass::conv::ConvType::kConvolution, \ cutlass::arch::OpClassSimt, cutlass::arch::Sm61, \ ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, \ cutlass::conv::threadblock:: \ ConvolutionFpropNCxHWxThreadblockSwizzle, \ stage_, 4, aligned_, NeedLoadFromConstMem>; \ typename Convolution::ConvolutionParameter conv_param( \ param.n, param.hi, param.wi, param.ci, param.co, param.fh, \ param.fw, param.ho, param.wo, param.ph, param.pw, param.sh, \ param.sw, 1, 1, cutlass::conv::Mode::kCrossCorrelation); \ return cutlass_convolution_wrapper<Convolution>( \ d_src, d_filter, d_bias, d_z, d_dst, workspace, conv_param, \ epilogue, stream); \ } #define DISPATCH_KERNEL \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 128, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 128, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 32, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 128, 32, 32, 64, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 64, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 64, 32, 32, 64, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 32, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 32, 32, 32, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(16, 128, 16, 16, 128, 16, 1, 8); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(16, 64, 8, 16, 64, 8, 2, 4); \ megdnn_assert(false, \ "unsupported threadblock shape (%dx%dx%d) and warp shape " \ "(%dx%dx%d)", \ threadblock_shape.m(), threadblock_shape.n(), \ threadblock_shape.k(), warp_shape.m(), warp_shape.n(), \ warp_shape.k()); using ElementOutput = int8_t; using ElementAccumulator = int32_t; using ElementBias = int32_t; using ElementCompute = float; using NonlineMode = megdnn::param_enumv::ConvBias::NonlineMode; switch (nonlinear_mode) { case NonlineMode::IDENTITY: { using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< ElementOutput, 4, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma}; DISPATCH_KERNEL; } case NonlineMode::RELU: { using EpilogueOp = cutlass::epilogue::thread:: BiasAddLinearCombinationReluClamp< ElementOutput, 4, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma, 0}; DISPATCH_KERNEL; } case NonlineMode::H_SWISH: { using EpilogueOp = cutlass::epilogue::thread:: BiasAddLinearCombinationHSwishClamp< ElementOutput, 4, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma, scale}; DISPATCH_KERNEL; } default: megdnn_assert(false, "unsupported nonlinear mode for conv bias operator"); } #undef DISPATCH_KERNEL_WITH_TILE_SHAPE #undef DISPATCH_KERNEL } #endif #define INST(need_load_from_const_mem) \ template void megdnn::cuda::cutlass_wrapper:: \ do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4< \ need_load_from_const_mem>( \ const int8_t* d_src, const int8_t* d_filter, \ const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, \ int* workspace, const convolution::ConvParam& param, \ uint32_t nonlinear_mode, float alpha, float beta, \ float gamma, float scale, \ const GemmCoord& threadblock_shape, \ const GemmCoord& warp_shape, int stages, \ hipStream_t stream); INST(true); INST(false); #undef INST /* ===== cutlass kernel wrapper for nchw4 layout and nchw output ===== */ #if MEGDNN_TEGRA_X1 template <bool NeedLoadFromConstMem> void megdnn::cuda::cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw( const int8_t* /* d_src */, const int8_t* /* d_filter */, const float* /* d_bias */, const float* /* d_z */, float* /* d_dst */, int* /* workspace */, const convolution::ConvParam& /* param */, uint32_t /* nonlinear_mode */, float /* alpha */, float /* beta */, float /* gamma */, float /* scale */, const GemmCoord& /* threadblock_shape */, const GemmCoord& /* warp_shape */, int /* stages */, hipStream_t /* stream */) {} #else template <bool NeedLoadFromConstMem> void megdnn::cuda::cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw( const int8_t* d_src, const int8_t* d_filter, const float* d_bias, const float* d_z, float* d_dst, int* workspace, const convolution::ConvParam& param, uint32_t nonlinear_mode, float alpha, float beta, float gamma, float scale, const GemmCoord& threadblock_shape, const GemmCoord& warp_shape, int stages, hipStream_t stream) { #define DISPATCH_KERNEL_WITH_TILE_SHAPE(threadblock_m_, threadblock_n_, \ threadblock_k_, warp_m_, warp_n_, \ warp_k_, stages_, aligned_) \ if (threadblock_shape.m() == threadblock_m_ && \ threadblock_shape.n() == threadblock_n_ && \ threadblock_shape.k() == threadblock_k_ && \ warp_shape.m() == warp_m_ && warp_shape.n() == warp_n_ && \ warp_shape.k() == warp_k_ && stages == stages_) { \ using ThreadBlockShape = \ cutlass::gemm::GemmShape<threadblock_m_, threadblock_n_, \ threadblock_k_>; \ using WarpShape = cutlass::gemm::GemmShape<warp_m_, warp_n_, warp_k_>; \ using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; \ using Convolution = cutlass::conv::device::Convolution< \ int8_t, cutlass::layout::TensorNCxHWx<4>, int8_t, \ cutlass::layout::TensorCxRSKx<4>, ElementOutput, \ cutlass::layout::TensorNCHW, float, \ cutlass::layout::TensorNCHW, int32_t, \ cutlass::conv::ConvType::kConvolution, \ cutlass::arch::OpClassSimt, cutlass::arch::Sm61, \ ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, \ cutlass::conv::threadblock:: \ ConvolutionFpropNCxHWxThreadblockSwizzle, \ stages_, 4, aligned_, NeedLoadFromConstMem, \ cutlass::arch::OpMultiplyAdd>; \ typename Convolution::ConvolutionParameter conv_param( \ param.n, param.hi, param.wi, param.ci, param.co, param.fh, \ param.fw, param.ho, param.wo, param.ph, param.pw, param.sh, \ param.sw, 1, 1, cutlass::conv::Mode::kCrossCorrelation); \ return cutlass_convolution_wrapper<Convolution>( \ d_src, d_filter, d_bias, d_z, d_dst, workspace, conv_param, \ epilogue, stream); \ } #define DISPATCH_KERNEL \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 128, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 128, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 32, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 128, 32, 32, 64, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 64, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 64, 32, 32, 64, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 32, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 32, 32, 32, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(16, 128, 16, 16, 128, 16, 1, 8); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(16, 64, 8, 16, 64, 8, 2, 4); \ megdnn_assert(false, \ "unsupported threadblock shape (%dx%dx%d) and warp shape " \ "(%dx%dx%d)", \ threadblock_shape.m(), threadblock_shape.n(), \ threadblock_shape.k(), warp_shape.m(), warp_shape.n(), \ warp_shape.k()); using ElementOutput = float; using ElementAccumulator = int32_t; using ElementBias = float; using ElementCompute = float; using NonlineMode = megdnn::param_enumv::ConvBias::NonlineMode; switch (nonlinear_mode) { case NonlineMode::IDENTITY: { using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombination< ElementOutput, 1, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma}; DISPATCH_KERNEL; } case NonlineMode::RELU: { using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu< ElementOutput, 1, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma, 0}; DISPATCH_KERNEL; } case NonlineMode::H_SWISH: { using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwish< ElementOutput, 1, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma, scale}; DISPATCH_KERNEL; } default: megdnn_assert(false, "unsupported nonlinear mode for conv bias operator"); } #undef DISPATCH_KERNEL_WITH_TILE_SHAPE #undef DISPATCH_KERNEL } #endif #define INST(need_load_from_const_mem) \ template void megdnn::cuda::cutlass_wrapper:: \ do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw< \ need_load_from_const_mem>( \ const int8_t* d_src, const int8_t* d_filter, \ const float* d_bias, const float* d_z, float* d_dst, \ int* workspace, const convolution::ConvParam& param, \ uint32_t nonlinear_mode, float alpha, float beta, \ float gamma, float scale, \ const GemmCoord& threadblock_shape, \ const GemmCoord& warp_shape, int stages, \ hipStream_t stream); INST(true); INST(false); #undef INST /* ====== cutlass kernel wrapper for nchw4 layout and nchw32 output ====== */ #if MEGDNN_TEGRA_X1 template <bool NeedLoadFromConstMem> void megdnn::cuda::cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_ncdiv32hw32( const int8_t* /* d_src */, const int8_t* /* d_filter */, const int32_t* /* d_bias */, const int8_t* /* d_z */, int8_t* /* d_dst */, int* /* workspace */, const convolution::ConvParam& /* param */, uint32_t /* nonlinear_mode */, float /* alpha */, float /* beta */, float /* gamma */, float /* scale */, const GemmCoord& /* threadblock_shape */, const GemmCoord& /* warp_shape */, int /* stages */, hipStream_t /* stream */) {} #else template <bool NeedLoadFromConstMem> void megdnn::cuda::cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_ncdiv32hw32( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, const convolution::ConvParam& param, uint32_t nonlinear_mode, float alpha, float beta, float gamma, float scale, const GemmCoord& threadblock_shape, const GemmCoord& warp_shape, int stages, hipStream_t stream) { #define DISPATCH_KERNEL_WITH_TILE_SHAPE(threadblock_m_, threadblock_n_, \ threadblock_k_, warp_m_, warp_n_, \ warp_k_, stages_, aligned_) \ if (threadblock_shape.m() == threadblock_m_ && \ threadblock_shape.n() == threadblock_n_ && \ threadblock_shape.k() == threadblock_k_ && \ warp_shape.m() == warp_m_ && warp_shape.n() == warp_n_ && \ warp_shape.k() == warp_k_ && stages == stages_) { \ using ThreadBlockShape = \ cutlass::gemm::GemmShape<threadblock_m_, threadblock_n_, \ threadblock_k_>; \ using WarpShape = cutlass::gemm::GemmShape<warp_m_, warp_n_, warp_k_>; \ using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; \ using Convolution = cutlass::conv::device::Convolution< \ int8_t, cutlass::layout::TensorNCxHWx<4>, int8_t, \ cutlass::layout::TensorCxRSKx<4>, ElementOutput, \ cutlass::layout::TensorNCxHWx<32>, int32_t, \ cutlass::layout::TensorNCxHWx<32>, int32_t, \ cutlass::conv::ConvType::kConvolution, \ cutlass::arch::OpClassSimt, cutlass::arch::Sm61, \ ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, \ cutlass::conv::threadblock:: \ ConvolutionFpropNCxHWxThreadblockSwizzle, \ stages_, 4, aligned_, NeedLoadFromConstMem>; \ typename Convolution::ConvolutionParameter conv_param( \ param.n, param.hi, param.wi, param.ci, param.co, param.fh, \ param.fw, param.ho, param.wo, param.ph, param.pw, param.sh, \ param.sw, 1, 1, cutlass::conv::Mode::kCrossCorrelation); \ return cutlass_convolution_wrapper<Convolution>( \ d_src, d_filter, d_bias, d_z, d_dst, workspace, conv_param, \ epilogue, stream); \ } #define DISPATCH_KERNEL \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 128, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 128, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 32, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 128, 32, 32, 64, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 64, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 64, 32, 32, 64, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 32, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 32, 32, 32, 32, 32, 2, 16); \ megdnn_assert(false, \ "unsupported threadblock shape (%dx%dx%d) and warp shape " \ "(%dx%dx%d)", \ threadblock_shape.m(), threadblock_shape.n(), \ threadblock_shape.k(), warp_shape.m(), warp_shape.n(), \ warp_shape.k()); using ElementOutput = int8_t; using ElementAccumulator = int32_t; using ElementBias = int32_t; using ElementCompute = float; using NonlineMode = megdnn::param_enumv::ConvBias::NonlineMode; switch (nonlinear_mode) { case NonlineMode::IDENTITY: { using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< ElementOutput, 4, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma}; DISPATCH_KERNEL; } case NonlineMode::RELU: { using EpilogueOp = cutlass::epilogue::thread:: BiasAddLinearCombinationReluClamp< ElementOutput, 4, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma, 0}; DISPATCH_KERNEL; } case NonlineMode::H_SWISH: { using EpilogueOp = cutlass::epilogue::thread:: BiasAddLinearCombinationHSwishClamp< ElementOutput, 4, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma, scale}; DISPATCH_KERNEL; } default: megdnn_assert(false, "unsupported nonlinear mode for conv bias operator"); } #undef DISPATCH_KERNEL_WITH_TILE_SHAPE #undef DISPATCH_KERNEL } #endif #define INST(need_load_from_const_mem) \ template void megdnn::cuda::cutlass_wrapper:: \ do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_ncdiv32hw32< \ need_load_from_const_mem>( \ const int8_t* d_src, const int8_t* d_filter, \ const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, \ int* workspace, const convolution::ConvParam& param, \ uint32_t nonlinear_mode, float alpha, float beta, \ float gamma, float scale, \ const GemmCoord& threadblock_shape, \ const GemmCoord& warp_shape, int stages, \ hipStream_t stream); INST(true); INST(false); #undef INST // vim: syntax=cuda.doxygen
11eca49197273ebd5522d761a7ecbe360859c61b.cu
/** * \file dnn/src/cuda/conv_bias/cutlass_convolution_wrapper.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. */ // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #if !MEGDNN_TEGRA_X1 #include "cutlass/convolution/device/convolution.h" #endif #include "src/common/opr_param_defs_enumv.cuh" #include "src/cuda/conv_bias/cutlass_convolution_wrapper.cuh" #pragma GCC diagnostic pop using namespace megdnn; using namespace cuda; using namespace cutlass_wrapper; /* ================= cutlass kernel wrapper for nchw32 layout ================ */ #if MEGDNN_TEGRA_X1 template <bool NeedLoadFromConstMem> void megdnn::cuda::cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_imma_ncdiv32hw32( const int8_t* /* d_src */, const int8_t* /* d_filter */, const int32_t* /* d_bias */, const int8_t* /* d_z */, int8_t* /* d_dst */, int* /* workspace */, const convolution::ConvParam& /* param */, uint32_t /* nonlinear_mode */, float /* alpha */, float /* beta */, float /* gamma */, float /* scale */, const GemmCoord& /* threadblock_shape */, const GemmCoord& /* warp_shape */, cudaStream_t /* stream */) {} #else template <bool NeedLoadFromConstMem> void megdnn::cuda::cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_imma_ncdiv32hw32( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, const convolution::ConvParam& param, uint32_t nonlinear_mode, float alpha, float beta, float gamma, float scale, const GemmCoord& threadblock_shape, const GemmCoord& warp_shape, cudaStream_t stream) { #define DISPATCH_KERNEL_WITH_TILE_SHAPE(threadblock_m_, threadblock_n_, \ threadblock_k_, warp_m_, warp_n_, \ warp_k_) \ if (threadblock_shape.m() == threadblock_m_ && \ threadblock_shape.n() == threadblock_n_ && \ threadblock_shape.k() == threadblock_k_ && \ warp_shape.m() == warp_m_ && warp_shape.n() == warp_n_ && \ warp_shape.k() == warp_k_) { \ using ThreadBlockShape = \ cutlass::gemm::GemmShape<threadblock_m_, threadblock_n_, \ threadblock_k_>; \ using WarpShape = cutlass::gemm::GemmShape<warp_m_, warp_n_, warp_k_>; \ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; \ using Convolution = cutlass::conv::device::Convolution< \ int8_t, cutlass::layout::TensorNCxHWx<32>, int8_t, \ cutlass::layout::TensorCxRSKx<32>, ElementOutput, \ cutlass::layout::TensorNCxHWx<32>, int32_t, \ cutlass::layout::TensorNCxHWx<32>, int32_t, \ cutlass::conv::ConvType::kConvolution, \ cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, \ ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, \ cutlass::conv::threadblock:: \ ConvolutionFpropNCxHWxThreadblockSwizzle, \ 2, 16, 16, NeedLoadFromConstMem>; \ typename Convolution::ConvolutionParameter conv_param( \ param.n, param.hi, param.wi, param.ci, param.co, param.fh, \ param.fw, param.ho, param.wo, param.ph, param.pw, param.sh, \ param.sw, 1, 1, cutlass::conv::Mode::kCrossCorrelation); \ return cutlass_convolution_wrapper<Convolution>( \ d_src, d_filter, d_bias, d_z, d_dst, workspace, conv_param, \ epilogue, stream); \ } #define DISPATCH_KERNEL \ DISPATCH_KERNEL_WITH_TILE_SHAPE(256, 128, 64, 64, 64, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 256, 64, 64, 64, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 128, 64, 64, 64, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 128, 64, 32, 64, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 64, 64, 32, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 64, 64, 32, 32, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 64, 64, 32, 16, 64); \ megdnn_assert(false, \ "unsupported threadblock shape (%dx%dx%d) and warp shape " \ "(%dx%dx%d)", \ threadblock_shape.m(), threadblock_shape.n(), \ threadblock_shape.k(), warp_shape.m(), warp_shape.n(), \ warp_shape.k()); using ElementOutput = int8_t; using ElementAccumulator = int32_t; using ElementBias = int32_t; using ElementCompute = float; using NonlineMode = megdnn::param_enumv::ConvBias::NonlineMode; switch (nonlinear_mode) { case NonlineMode::IDENTITY: { using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< ElementOutput, 8, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma}; DISPATCH_KERNEL; } case NonlineMode::RELU: { using EpilogueOp = cutlass::epilogue::thread:: BiasAddLinearCombinationReluClamp< ElementOutput, 8, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma, 0}; DISPATCH_KERNEL; } case NonlineMode::H_SWISH: { using EpilogueOp = cutlass::epilogue::thread:: BiasAddLinearCombinationHSwishClamp< ElementOutput, 8, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma, scale}; DISPATCH_KERNEL; } default: megdnn_assert(false, "unsupported nonlinear mode for conv bias operator"); } #undef DISPATCH_KERNEL_WITH_TILE_SHAPE #undef DISPATCH_KERNEL } #endif #define INST(need_load_from_const_mem) \ template void megdnn::cuda::cutlass_wrapper:: \ do_conv_bias_int8_implicit_gemm_imma_ncdiv32hw32< \ need_load_from_const_mem>( \ const int8_t* d_src, const int8_t* d_filter, \ const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, \ int* workspace, const convolution::ConvParam& param, \ uint32_t nonlinear_mode, float alpha, float beta, \ float gamma, float scale, \ const GemmCoord& threadblock_shape, \ const GemmCoord& warp_shape, cudaStream_t stream); INST(true); INST(false); #undef INST /* ==== cutlass kernel wrapper for nchw32 layout and nchw4 output ===== */ #if MEGDNN_TEGRA_X1 template <bool NeedLoadFromConstMem> void megdnn::cuda::cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_ncdiv4hw4( const int8_t* /* d_src */, const int8_t* /* d_filter */, const int32_t* /* d_bias */, const int8_t* /* d_z */, int8_t* /* d_dst */, int* /* workspace */, const convolution::ConvParam& /* param */, uint32_t /* nonlinear_mode */, float /* alpha */, float /* beta */, float /* gamma */, float /* scale */, const GemmCoord& /* threadblock_shape */, const GemmCoord& /* warp_shape */, cudaStream_t /* stream */) {} #else template <bool NeedLoadFromConstMem> void megdnn::cuda::cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_ncdiv4hw4( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, const convolution::ConvParam& param, uint32_t nonlinear_mode, float alpha, float beta, float gamma, float scale, const GemmCoord& threadblock_shape, const GemmCoord& warp_shape, cudaStream_t stream) { #define DISPATCH_KERNEL_WITH_TILE_SHAPE(threadblock_m_, threadblock_n_, \ threadblock_k_, warp_m_, warp_n_, \ warp_k_) \ if (threadblock_shape.m() == threadblock_m_ && \ threadblock_shape.n() == threadblock_n_ && \ threadblock_shape.k() == threadblock_k_ && \ warp_shape.m() == warp_m_ && warp_shape.n() == warp_n_ && \ warp_shape.k() == warp_k_) { \ using ThreadBlockShape = \ cutlass::gemm::GemmShape<threadblock_m_, threadblock_n_, \ threadblock_k_>; \ using WarpShape = cutlass::gemm::GemmShape<warp_m_, warp_n_, warp_k_>; \ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; \ using Convolution = cutlass::conv::device::Convolution< \ int8_t, cutlass::layout::TensorNCxHWx<32>, int8_t, \ cutlass::layout::TensorCxRSKx<32>, ElementOutput, \ cutlass::layout::TensorNCxHWx<4>, int32_t, \ cutlass::layout::TensorNCxHWx<4>, int32_t, \ cutlass::conv::ConvType::kConvolution, \ cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, \ ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, \ cutlass::conv::threadblock:: \ ConvolutionFpropNCxHWxThreadblockSwizzle, \ 2, 16, 16, NeedLoadFromConstMem>; \ typename Convolution::ConvolutionParameter conv_param( \ param.n, param.hi, param.wi, param.ci, param.co, param.fh, \ param.fw, param.ho, param.wo, param.ph, param.pw, param.sh, \ param.sw, 1, 1, cutlass::conv::Mode::kCrossCorrelation); \ return cutlass_convolution_wrapper<Convolution>( \ d_src, d_filter, d_bias, d_z, d_dst, workspace, conv_param, \ epilogue, stream); \ } #define DISPATCH_KERNEL \ DISPATCH_KERNEL_WITH_TILE_SHAPE(256, 128, 64, 64, 64, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 256, 64, 64, 64, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 128, 64, 64, 64, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 128, 64, 32, 64, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 64, 64, 32, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 64, 64, 32, 32, 64); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 64, 64, 16, 32, 64); \ megdnn_assert(false, \ "unsupported threadblock shape (%dx%dx%d) and warp shape " \ "(%dx%dx%d)", \ threadblock_shape.m(), threadblock_shape.n(), \ threadblock_shape.k(), warp_shape.m(), warp_shape.n(), \ warp_shape.k()); using ElementOutput = int8_t; using ElementAccumulator = int32_t; using ElementBias = int32_t; using ElementCompute = float; using NonlineMode = megdnn::param_enumv::ConvBias::NonlineMode; switch (nonlinear_mode) { case NonlineMode::IDENTITY: { using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< ElementOutput, 4, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma}; DISPATCH_KERNEL; } case NonlineMode::RELU: { using EpilogueOp = cutlass::epilogue::thread:: BiasAddLinearCombinationReluClamp< ElementOutput, 4, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma, 0}; DISPATCH_KERNEL; } case NonlineMode::H_SWISH: { using EpilogueOp = cutlass::epilogue::thread:: BiasAddLinearCombinationHSwishClamp< ElementOutput, 4, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma, scale}; DISPATCH_KERNEL; } default: megdnn_assert(false, "unsupported nonlinear mode for conv bias operator"); } #undef DISPATCH_KERNEL_WITH_TILE_SHAPE #undef DISPATCH_KERNEL } #endif #define INST(need_load_from_const_mem) \ template void megdnn::cuda::cutlass_wrapper:: \ do_conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_ncdiv4hw4< \ need_load_from_const_mem>( \ const int8_t* d_src, const int8_t* d_filter, \ const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, \ int* workspace, const convolution::ConvParam& param, \ uint32_t nonlinear_mode, float alpha, float beta, \ float gamma, float scale, \ const GemmCoord& threadblock_shape, \ const GemmCoord& warp_shape, cudaStream_t stream); INST(true); INST(false); #undef INST /* ================ cutlass kernel wrapper for nchw4 layout ================= */ #if MEGDNN_TEGRA_X1 template <bool NeedLoadFromConstMem> void megdnn::cuda::cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4( const int8_t* /* d_src */, const int8_t* /* d_filter */, const int32_t* /* d_bias */, const int8_t* /* d_z */, int8_t* /* d_dst */, int* /* workspace */, const convolution::ConvParam& /* param */, uint32_t /* nonlinear_mode */, float /* alpha */, float /* beta */, float /* gamma */, float /* scale */, const GemmCoord& /* threadblock_shape */, const GemmCoord& /* warp_shape */, int /* stages */, cudaStream_t /* stream */) {} #else template <bool NeedLoadFromConstMem> void megdnn::cuda::cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, const convolution::ConvParam& param, uint32_t nonlinear_mode, float alpha, float beta, float gamma, float scale, const GemmCoord& threadblock_shape, const GemmCoord& warp_shape, int stages, cudaStream_t stream) { #define DISPATCH_KERNEL_WITH_TILE_SHAPE(threadblock_m_, threadblock_n_, \ threadblock_k_, warp_m_, warp_n_, \ warp_k_, stage_, aligned_) \ if (threadblock_shape.m() == threadblock_m_ && \ threadblock_shape.n() == threadblock_n_ && \ threadblock_shape.k() == threadblock_k_ && \ warp_shape.m() == warp_m_ && warp_shape.n() == warp_n_ && \ warp_shape.k() == warp_k_ && stages == stage_) { \ using ThreadBlockShape = \ cutlass::gemm::GemmShape<threadblock_m_, threadblock_n_, \ threadblock_k_>; \ using WarpShape = cutlass::gemm::GemmShape<warp_m_, warp_n_, warp_k_>; \ using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; \ using Convolution = cutlass::conv::device::Convolution< \ int8_t, cutlass::layout::TensorNCxHWx<4>, int8_t, \ cutlass::layout::TensorCxRSKx<4>, ElementOutput, \ cutlass::layout::TensorNCxHWx<4>, int32_t, \ cutlass::layout::TensorNCxHWx<4>, int32_t, \ cutlass::conv::ConvType::kConvolution, \ cutlass::arch::OpClassSimt, cutlass::arch::Sm61, \ ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, \ cutlass::conv::threadblock:: \ ConvolutionFpropNCxHWxThreadblockSwizzle, \ stage_, 4, aligned_, NeedLoadFromConstMem>; \ typename Convolution::ConvolutionParameter conv_param( \ param.n, param.hi, param.wi, param.ci, param.co, param.fh, \ param.fw, param.ho, param.wo, param.ph, param.pw, param.sh, \ param.sw, 1, 1, cutlass::conv::Mode::kCrossCorrelation); \ return cutlass_convolution_wrapper<Convolution>( \ d_src, d_filter, d_bias, d_z, d_dst, workspace, conv_param, \ epilogue, stream); \ } #define DISPATCH_KERNEL \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 128, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 128, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 32, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 128, 32, 32, 64, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 64, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 64, 32, 32, 64, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 32, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 32, 32, 32, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(16, 128, 16, 16, 128, 16, 1, 8); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(16, 64, 8, 16, 64, 8, 2, 4); \ megdnn_assert(false, \ "unsupported threadblock shape (%dx%dx%d) and warp shape " \ "(%dx%dx%d)", \ threadblock_shape.m(), threadblock_shape.n(), \ threadblock_shape.k(), warp_shape.m(), warp_shape.n(), \ warp_shape.k()); using ElementOutput = int8_t; using ElementAccumulator = int32_t; using ElementBias = int32_t; using ElementCompute = float; using NonlineMode = megdnn::param_enumv::ConvBias::NonlineMode; switch (nonlinear_mode) { case NonlineMode::IDENTITY: { using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< ElementOutput, 4, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma}; DISPATCH_KERNEL; } case NonlineMode::RELU: { using EpilogueOp = cutlass::epilogue::thread:: BiasAddLinearCombinationReluClamp< ElementOutput, 4, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma, 0}; DISPATCH_KERNEL; } case NonlineMode::H_SWISH: { using EpilogueOp = cutlass::epilogue::thread:: BiasAddLinearCombinationHSwishClamp< ElementOutput, 4, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma, scale}; DISPATCH_KERNEL; } default: megdnn_assert(false, "unsupported nonlinear mode for conv bias operator"); } #undef DISPATCH_KERNEL_WITH_TILE_SHAPE #undef DISPATCH_KERNEL } #endif #define INST(need_load_from_const_mem) \ template void megdnn::cuda::cutlass_wrapper:: \ do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4< \ need_load_from_const_mem>( \ const int8_t* d_src, const int8_t* d_filter, \ const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, \ int* workspace, const convolution::ConvParam& param, \ uint32_t nonlinear_mode, float alpha, float beta, \ float gamma, float scale, \ const GemmCoord& threadblock_shape, \ const GemmCoord& warp_shape, int stages, \ cudaStream_t stream); INST(true); INST(false); #undef INST /* ===== cutlass kernel wrapper for nchw4 layout and nchw output ===== */ #if MEGDNN_TEGRA_X1 template <bool NeedLoadFromConstMem> void megdnn::cuda::cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw( const int8_t* /* d_src */, const int8_t* /* d_filter */, const float* /* d_bias */, const float* /* d_z */, float* /* d_dst */, int* /* workspace */, const convolution::ConvParam& /* param */, uint32_t /* nonlinear_mode */, float /* alpha */, float /* beta */, float /* gamma */, float /* scale */, const GemmCoord& /* threadblock_shape */, const GemmCoord& /* warp_shape */, int /* stages */, cudaStream_t /* stream */) {} #else template <bool NeedLoadFromConstMem> void megdnn::cuda::cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw( const int8_t* d_src, const int8_t* d_filter, const float* d_bias, const float* d_z, float* d_dst, int* workspace, const convolution::ConvParam& param, uint32_t nonlinear_mode, float alpha, float beta, float gamma, float scale, const GemmCoord& threadblock_shape, const GemmCoord& warp_shape, int stages, cudaStream_t stream) { #define DISPATCH_KERNEL_WITH_TILE_SHAPE(threadblock_m_, threadblock_n_, \ threadblock_k_, warp_m_, warp_n_, \ warp_k_, stages_, aligned_) \ if (threadblock_shape.m() == threadblock_m_ && \ threadblock_shape.n() == threadblock_n_ && \ threadblock_shape.k() == threadblock_k_ && \ warp_shape.m() == warp_m_ && warp_shape.n() == warp_n_ && \ warp_shape.k() == warp_k_ && stages == stages_) { \ using ThreadBlockShape = \ cutlass::gemm::GemmShape<threadblock_m_, threadblock_n_, \ threadblock_k_>; \ using WarpShape = cutlass::gemm::GemmShape<warp_m_, warp_n_, warp_k_>; \ using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; \ using Convolution = cutlass::conv::device::Convolution< \ int8_t, cutlass::layout::TensorNCxHWx<4>, int8_t, \ cutlass::layout::TensorCxRSKx<4>, ElementOutput, \ cutlass::layout::TensorNCHW, float, \ cutlass::layout::TensorNCHW, int32_t, \ cutlass::conv::ConvType::kConvolution, \ cutlass::arch::OpClassSimt, cutlass::arch::Sm61, \ ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, \ cutlass::conv::threadblock:: \ ConvolutionFpropNCxHWxThreadblockSwizzle, \ stages_, 4, aligned_, NeedLoadFromConstMem, \ cutlass::arch::OpMultiplyAdd>; \ typename Convolution::ConvolutionParameter conv_param( \ param.n, param.hi, param.wi, param.ci, param.co, param.fh, \ param.fw, param.ho, param.wo, param.ph, param.pw, param.sh, \ param.sw, 1, 1, cutlass::conv::Mode::kCrossCorrelation); \ return cutlass_convolution_wrapper<Convolution>( \ d_src, d_filter, d_bias, d_z, d_dst, workspace, conv_param, \ epilogue, stream); \ } #define DISPATCH_KERNEL \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 128, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 128, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 32, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 128, 32, 32, 64, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 64, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 64, 32, 32, 64, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 32, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 32, 32, 32, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(16, 128, 16, 16, 128, 16, 1, 8); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(16, 64, 8, 16, 64, 8, 2, 4); \ megdnn_assert(false, \ "unsupported threadblock shape (%dx%dx%d) and warp shape " \ "(%dx%dx%d)", \ threadblock_shape.m(), threadblock_shape.n(), \ threadblock_shape.k(), warp_shape.m(), warp_shape.n(), \ warp_shape.k()); using ElementOutput = float; using ElementAccumulator = int32_t; using ElementBias = float; using ElementCompute = float; using NonlineMode = megdnn::param_enumv::ConvBias::NonlineMode; switch (nonlinear_mode) { case NonlineMode::IDENTITY: { using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombination< ElementOutput, 1, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma}; DISPATCH_KERNEL; } case NonlineMode::RELU: { using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu< ElementOutput, 1, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma, 0}; DISPATCH_KERNEL; } case NonlineMode::H_SWISH: { using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwish< ElementOutput, 1, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma, scale}; DISPATCH_KERNEL; } default: megdnn_assert(false, "unsupported nonlinear mode for conv bias operator"); } #undef DISPATCH_KERNEL_WITH_TILE_SHAPE #undef DISPATCH_KERNEL } #endif #define INST(need_load_from_const_mem) \ template void megdnn::cuda::cutlass_wrapper:: \ do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw< \ need_load_from_const_mem>( \ const int8_t* d_src, const int8_t* d_filter, \ const float* d_bias, const float* d_z, float* d_dst, \ int* workspace, const convolution::ConvParam& param, \ uint32_t nonlinear_mode, float alpha, float beta, \ float gamma, float scale, \ const GemmCoord& threadblock_shape, \ const GemmCoord& warp_shape, int stages, \ cudaStream_t stream); INST(true); INST(false); #undef INST /* ====== cutlass kernel wrapper for nchw4 layout and nchw32 output ====== */ #if MEGDNN_TEGRA_X1 template <bool NeedLoadFromConstMem> void megdnn::cuda::cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_ncdiv32hw32( const int8_t* /* d_src */, const int8_t* /* d_filter */, const int32_t* /* d_bias */, const int8_t* /* d_z */, int8_t* /* d_dst */, int* /* workspace */, const convolution::ConvParam& /* param */, uint32_t /* nonlinear_mode */, float /* alpha */, float /* beta */, float /* gamma */, float /* scale */, const GemmCoord& /* threadblock_shape */, const GemmCoord& /* warp_shape */, int /* stages */, cudaStream_t /* stream */) {} #else template <bool NeedLoadFromConstMem> void megdnn::cuda::cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_ncdiv32hw32( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, const convolution::ConvParam& param, uint32_t nonlinear_mode, float alpha, float beta, float gamma, float scale, const GemmCoord& threadblock_shape, const GemmCoord& warp_shape, int stages, cudaStream_t stream) { #define DISPATCH_KERNEL_WITH_TILE_SHAPE(threadblock_m_, threadblock_n_, \ threadblock_k_, warp_m_, warp_n_, \ warp_k_, stages_, aligned_) \ if (threadblock_shape.m() == threadblock_m_ && \ threadblock_shape.n() == threadblock_n_ && \ threadblock_shape.k() == threadblock_k_ && \ warp_shape.m() == warp_m_ && warp_shape.n() == warp_n_ && \ warp_shape.k() == warp_k_ && stages == stages_) { \ using ThreadBlockShape = \ cutlass::gemm::GemmShape<threadblock_m_, threadblock_n_, \ threadblock_k_>; \ using WarpShape = cutlass::gemm::GemmShape<warp_m_, warp_n_, warp_k_>; \ using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; \ using Convolution = cutlass::conv::device::Convolution< \ int8_t, cutlass::layout::TensorNCxHWx<4>, int8_t, \ cutlass::layout::TensorCxRSKx<4>, ElementOutput, \ cutlass::layout::TensorNCxHWx<32>, int32_t, \ cutlass::layout::TensorNCxHWx<32>, int32_t, \ cutlass::conv::ConvType::kConvolution, \ cutlass::arch::OpClassSimt, cutlass::arch::Sm61, \ ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, \ cutlass::conv::threadblock:: \ ConvolutionFpropNCxHWxThreadblockSwizzle, \ stages_, 4, aligned_, NeedLoadFromConstMem>; \ typename Convolution::ConvolutionParameter conv_param( \ param.n, param.hi, param.wi, param.ci, param.co, param.fh, \ param.fw, param.ho, param.wo, param.ph, param.pw, param.sh, \ param.sw, 1, 1, cutlass::conv::Mode::kCrossCorrelation); \ return cutlass_convolution_wrapper<Convolution>( \ d_src, d_filter, d_bias, d_z, d_dst, workspace, conv_param, \ epilogue, stream); \ } #define DISPATCH_KERNEL \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 128, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 128, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 32, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 128, 32, 32, 64, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 64, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 64, 32, 32, 64, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 32, 32, 64, 32, 32, 2, 16); \ DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 32, 32, 32, 32, 32, 2, 16); \ megdnn_assert(false, \ "unsupported threadblock shape (%dx%dx%d) and warp shape " \ "(%dx%dx%d)", \ threadblock_shape.m(), threadblock_shape.n(), \ threadblock_shape.k(), warp_shape.m(), warp_shape.n(), \ warp_shape.k()); using ElementOutput = int8_t; using ElementAccumulator = int32_t; using ElementBias = int32_t; using ElementCompute = float; using NonlineMode = megdnn::param_enumv::ConvBias::NonlineMode; switch (nonlinear_mode) { case NonlineMode::IDENTITY: { using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< ElementOutput, 4, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma}; DISPATCH_KERNEL; } case NonlineMode::RELU: { using EpilogueOp = cutlass::epilogue::thread:: BiasAddLinearCombinationReluClamp< ElementOutput, 4, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma, 0}; DISPATCH_KERNEL; } case NonlineMode::H_SWISH: { using EpilogueOp = cutlass::epilogue::thread:: BiasAddLinearCombinationHSwishClamp< ElementOutput, 4, ElementAccumulator, ElementBias, ElementCompute>; typename EpilogueOp::Params epilogue{alpha, beta, gamma, scale}; DISPATCH_KERNEL; } default: megdnn_assert(false, "unsupported nonlinear mode for conv bias operator"); } #undef DISPATCH_KERNEL_WITH_TILE_SHAPE #undef DISPATCH_KERNEL } #endif #define INST(need_load_from_const_mem) \ template void megdnn::cuda::cutlass_wrapper:: \ do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_ncdiv32hw32< \ need_load_from_const_mem>( \ const int8_t* d_src, const int8_t* d_filter, \ const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, \ int* workspace, const convolution::ConvParam& param, \ uint32_t nonlinear_mode, float alpha, float beta, \ float gamma, float scale, \ const GemmCoord& threadblock_shape, \ const GemmCoord& warp_shape, int stages, \ cudaStream_t stream); INST(true); INST(false); #undef INST // vim: syntax=cuda.doxygen
522c3a803e7820620e0c5f9c07254d4b866e1b7a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Test for measure the performance of the multiple-precision integer routines */ #include <stdio.h> #include <stdlib.h> #include "omp.h" #include "gmp.h" #include "../src/mpint.cuh" #include "tsthelper.cuh" #include "logger.cuh" #include "timers.cuh" enum mpintTestType { add_test, sub_test, mul_test, div_test }; #define ITERATIONS 1000000 /* * CUDA tests */ __global__ static void testCudaMpAdd(mp_int_t * dz, mp_int_t * dx, mp_int_t * dy, int vectorSize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; while (idx < vectorSize) { cuda::mpint_add(&dz[idx],&dx[idx],&dy[idx]); idx += gridDim.x * blockDim.x; } } __global__ static void testCudaMpSub(mp_int_t * dz, mp_int_t * dx, mp_int_t * dy, int vectorSize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; while (idx < vectorSize) { cuda::mpint_sub(&dz[idx],&dx[idx],&dy[idx]); idx += gridDim.x * blockDim.x; } } __global__ static void testCudaMpMul(mp_int_t * dz, mp_int_t * dx, mp_int_t * dy, int vectorSize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; while (idx < vectorSize) { cuda::mpint_mul(&dz[idx],&dx[idx],&dy[idx]); idx += gridDim.x * blockDim.x; } } __global__ static void testCudaMpDiv(mp_int_t * dz, mp_int_t * dx, mp_int_t * dy, int vectorSize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; while (idx < vectorSize) { cuda::mpint_div(&dz[idx],&dx[idx],&dy[idx]); idx += gridDim.x * blockDim.x; } } /* * Common methods */ static void resetResult(mp_int_t * r, int vectorSize){ #pragma omp parallel for for(auto i = 0; i < vectorSize; i++){ mpint_set_i(&r[i], 0); } } static void resetResult(mpz_t * r, int vectorSize){ #pragma omp parallel for for(auto i = 0; i < vectorSize; i++){ mpz_set_ui(r[i], 0); } } __global__ static void resetResultCuda(mp_int_t * r, int vectorSize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; while (idx < vectorSize) { cuda::mpint_set(&r[idx], &cuda::MPINT_ZERO); idx += gridDim.x * blockDim.x; } } static void checkResult(mpz_t * ref, mp_int_t * res, int vectorSize){ int errors = 0; mpz_t mp; mpz_init(mp); for(int i = 0; i < vectorSize; i++){ mpint_get_mpz(mp, &res[i]); if(mpz_cmp(mp, ref[i]) != 0){ errors++; } } if(errors == 0){ printf("All results match\n"); }else{ printf("Count of errors: %i\n", errors); } mpz_clear(mp); } /* * Main test */ static void run_test(int iterations, mpintTestType testType, RandomBoundType randomBoundType, bool allowNegative) { InitCpuTimer(); InitCudaTimer(); //Execution configuration int threads = 32; int blocks = iterations / threads + (iterations % threads ? 1 : 0); // Multiple-precision GMP host data mpz_t * mpzx = new mpz_t[iterations]; mpz_t * mpzy = new mpz_t[iterations]; mpz_t * mpzz = new mpz_t[iterations]; // Multiple-precision mp_int host data auto *hx = new mp_int_t[iterations]; auto *hy = new mp_int_t[iterations]; auto *hz = new mp_int_t[iterations]; //GPU data mp_int_t * dx; mp_int_t * dy; mp_int_t * dz; //Memory allocation for(int i = 0; i < iterations; i++){ mpz_init(mpzx[i]); mpz_init(mpzy[i]); mpz_init(mpzz[i]); } hipMalloc(&dx, sizeof(mp_int_t) * iterations); hipMalloc(&dy, sizeof(mp_int_t) * iterations); hipMalloc(&dz, sizeof(mp_int_t) * iterations); checkDeviceHasErrors(hipDeviceSynchronize()); cudaCheckErrors(); //Generate inputs fill_random_array(mpzx, iterations, randomBoundType, allowNegative); waitFor(5); fill_random_array(mpzy, iterations, randomBoundType, allowNegative); //Convert to the RNS #pragma omp parallel for for(int i = 0; i < iterations; i++){ mpint_set_mpz(&hx[i], mpzx[i]); mpint_set_mpz(&hy[i], mpzy[i]); } // Copying to the GPU hipMemcpy(dx, hx, sizeof(mp_int_t) * iterations, hipMemcpyHostToDevice); hipMemcpy(dy, hy, sizeof(mp_int_t) * iterations, hipMemcpyHostToDevice); checkDeviceHasErrors(hipDeviceSynchronize()); cudaCheckErrors(); switch (testType) { case add_test: //--------------------------------------------------------- // GMP add testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CPU] GNU MP mpz_add"); resetResult(mpzz, iterations); StartCpuTimer(); #pragma omp parallel for for(int i = 0; i < iterations; i++){ mpz_add(mpzz[i],mpzx[i],mpzy[i]); } EndCpuTimer(); PrintCpuTimer("took"); //--------------------------------------------------------- // MPINT add testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CPU] GRNS mpint_add"); resetResult(hz, iterations); StartCpuTimer(); #pragma omp parallel for for(int i = 0; i < iterations; i++){ mpint_add(&hz[i],&hx[i],&hy[i]); } EndCpuTimer(); PrintCpuTimer("took"); checkResult(mpzz, hz, iterations); //--------------------------------------------------------- // MPINT CUDA add testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CUDA] GRNS mpint_add"); resetResult(hz, iterations); hipLaunchKernelGGL(( resetResultCuda), dim3(blocks),dim3(threads), 0, 0, dz, iterations); checkDeviceHasErrors(hipDeviceSynchronize()); cudaCheckErrors(); //Launch StartCudaTimer(); hipLaunchKernelGGL(( testCudaMpAdd), dim3(blocks),dim3(threads), 0, 0, dz, dx, dy, iterations); EndCudaTimer(); PrintCudaTimer("took"); //Copying to the host hipMemcpy(hz, dz, sizeof(mp_int_t) * iterations , hipMemcpyDeviceToHost); checkDeviceHasErrors(hipDeviceSynchronize()); cudaCheckErrors(); checkResult(mpzz, hz, iterations); //--------------------------------------------------------- break; case sub_test: //--------------------------------------------------------- // GMP sub testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CPU] GNU MP mpz_sub"); resetResult(mpzz, iterations); StartCpuTimer(); #pragma omp parallel for for(int i = 0; i < iterations; i++){ mpz_sub(mpzz[i],mpzx[i],mpzy[i]); } EndCpuTimer(); PrintCpuTimer("took"); //--------------------------------------------------------- // MPINT sub testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CPU] GRNS mpint_sub"); resetResult(hz, iterations); StartCpuTimer(); #pragma omp parallel for for(int i = 0; i < iterations; i++){ mpint_sub(&hz[i],&hx[i],&hy[i]); } EndCpuTimer(); PrintCpuTimer("took"); checkResult(mpzz, hz, iterations); //--------------------------------------------------------- // MPINT CUDA sub testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CUDA] GRNS mpint_sub"); resetResult(hz, iterations); hipLaunchKernelGGL(( resetResultCuda), dim3(blocks),dim3(threads), 0, 0, dz, iterations); checkDeviceHasErrors(hipDeviceSynchronize()); cudaCheckErrors(); //Launch StartCudaTimer(); hipLaunchKernelGGL(( testCudaMpSub), dim3(blocks),dim3(threads), 0, 0, dz, dx, dy, iterations); EndCudaTimer(); PrintCudaTimer("took"); //Copying to the host hipMemcpy(hz, dz, sizeof(mp_int_t) * iterations , hipMemcpyDeviceToHost); checkDeviceHasErrors(hipDeviceSynchronize()); cudaCheckErrors(); checkResult(mpzz, hz, iterations); //--------------------------------------------------------- break; case mul_test: //--------------------------------------------------------- // GMP mul testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CPU] GNU MP mpz_mul"); resetResult(mpzz, iterations); StartCpuTimer(); #pragma omp parallel for for(int i = 0; i < iterations; i++){ mpz_mul(mpzz[i],mpzx[i],mpzy[i]); } EndCpuTimer(); PrintCpuTimer("took"); //--------------------------------------------------------- // MPINT mul testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CPU] GRNS mpint_mul"); resetResult(hz, iterations); StartCpuTimer(); #pragma omp parallel for for(int i = 0; i < iterations; i++){ mpint_mul(&hz[i],&hx[i],&hy[i]); } EndCpuTimer(); PrintCpuTimer("took"); checkResult(mpzz, hz, iterations); //--------------------------------------------------------- // MPINT CUDA mul testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CUDA] GRNS mpint_mul"); resetResult(hz, iterations); hipLaunchKernelGGL(( resetResultCuda), dim3(blocks),dim3(threads), 0, 0, dz, iterations); checkDeviceHasErrors(hipDeviceSynchronize()); cudaCheckErrors(); //Launch StartCudaTimer(); hipLaunchKernelGGL(( testCudaMpMul), dim3(blocks),dim3(threads), 0, 0, dz, dx, dy, iterations); EndCudaTimer(); PrintCudaTimer("took"); //Copying to the host hipMemcpy(hz, dz, sizeof(mp_int_t) * iterations , hipMemcpyDeviceToHost); checkDeviceHasErrors(hipDeviceSynchronize()); cudaCheckErrors(); checkResult(mpzz, hz, iterations); break; case div_test: //--------------------------------------------------------- // GMP div testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CPU] GNU MP mpz_fdiv_q"); resetResult(mpzz, iterations); StartCpuTimer(); #pragma omp parallel for for(int i = 0; i < iterations; i++){ mpz_fdiv_q(mpzz[i],mpzx[i],mpzy[i]); } EndCpuTimer(); PrintCpuTimer("took"); //--------------------------------------------------------- // MPINT div testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CPU] GRNS mpint_div"); resetResult(hz, iterations); StartCpuTimer(); #pragma omp parallel for for(int i = 0; i < iterations; i++){ mpint_div(&hz[i],&hx[i],&hy[i]); } EndCpuTimer(); PrintCpuTimer("took"); checkResult(mpzz, hz, iterations); //--------------------------------------------------------- // MPINT CUDA div testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CUDA] GRNS mpint_div"); resetResult(hz, iterations); hipLaunchKernelGGL(( resetResultCuda), dim3(blocks),dim3(threads), 0, 0, dz, iterations); checkDeviceHasErrors(hipDeviceSynchronize()); cudaCheckErrors(); //Launch StartCudaTimer(); hipLaunchKernelGGL(( testCudaMpDiv), dim3(blocks),dim3(threads), 0, 0, dz, dx, dy, iterations); EndCudaTimer(); PrintCudaTimer("took"); //Copying to the host hipMemcpy(hz, dz, sizeof(mp_int_t) * iterations , hipMemcpyDeviceToHost); checkDeviceHasErrors(hipDeviceSynchronize()); cudaCheckErrors(); checkResult(mpzz, hz, iterations); break; default: break; } // Cleanup #pragma omp parallel for for(int i = 0; i < iterations; i++){ mpz_clear(mpzx[i]); mpz_clear(mpzy[i]); mpz_clear(mpzz[i]); } delete [] hx; delete [] hy; delete [] hz; hipFree(dx); hipFree(dy); hipFree(dz); } int main() { hipDeviceReset(); rns_const_init(); mpint_const_init(); Logger::beginTestDescription(Logger::TEST_PERF_MPINT); Logger::printParam("ITERATIONS", ITERATIONS); Logger::printParam("PRECISION", RNS_MODULI_PRODUCT_LOG2); Logger::printDash(); rns_const_print(true); Logger::endSection(true); run_test(ITERATIONS, add_test, BND_RNS_MODULI_PRODUCT_HALF, true); Logger::printSpace(); run_test(ITERATIONS, sub_test, BND_RNS_MODULI_PRODUCT_HALF, true); Logger::printSpace(); run_test(ITERATIONS, mul_test, BND_RNS_MODULI_PRODUCT_SQRT, true); Logger::printSpace(); run_test(ITERATIONS, div_test, BND_RNS_MODULI_PRODUCT, false); Logger::endTestDescription(); return 0; }
522c3a803e7820620e0c5f9c07254d4b866e1b7a.cu
/* * Test for measure the performance of the multiple-precision integer routines */ #include <stdio.h> #include <stdlib.h> #include "omp.h" #include "gmp.h" #include "../src/mpint.cuh" #include "tsthelper.cuh" #include "logger.cuh" #include "timers.cuh" enum mpintTestType { add_test, sub_test, mul_test, div_test }; #define ITERATIONS 1000000 /* * CUDA tests */ __global__ static void testCudaMpAdd(mp_int_t * dz, mp_int_t * dx, mp_int_t * dy, int vectorSize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; while (idx < vectorSize) { cuda::mpint_add(&dz[idx],&dx[idx],&dy[idx]); idx += gridDim.x * blockDim.x; } } __global__ static void testCudaMpSub(mp_int_t * dz, mp_int_t * dx, mp_int_t * dy, int vectorSize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; while (idx < vectorSize) { cuda::mpint_sub(&dz[idx],&dx[idx],&dy[idx]); idx += gridDim.x * blockDim.x; } } __global__ static void testCudaMpMul(mp_int_t * dz, mp_int_t * dx, mp_int_t * dy, int vectorSize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; while (idx < vectorSize) { cuda::mpint_mul(&dz[idx],&dx[idx],&dy[idx]); idx += gridDim.x * blockDim.x; } } __global__ static void testCudaMpDiv(mp_int_t * dz, mp_int_t * dx, mp_int_t * dy, int vectorSize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; while (idx < vectorSize) { cuda::mpint_div(&dz[idx],&dx[idx],&dy[idx]); idx += gridDim.x * blockDim.x; } } /* * Common methods */ static void resetResult(mp_int_t * r, int vectorSize){ #pragma omp parallel for for(auto i = 0; i < vectorSize; i++){ mpint_set_i(&r[i], 0); } } static void resetResult(mpz_t * r, int vectorSize){ #pragma omp parallel for for(auto i = 0; i < vectorSize; i++){ mpz_set_ui(r[i], 0); } } __global__ static void resetResultCuda(mp_int_t * r, int vectorSize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; while (idx < vectorSize) { cuda::mpint_set(&r[idx], &cuda::MPINT_ZERO); idx += gridDim.x * blockDim.x; } } static void checkResult(mpz_t * ref, mp_int_t * res, int vectorSize){ int errors = 0; mpz_t mp; mpz_init(mp); for(int i = 0; i < vectorSize; i++){ mpint_get_mpz(mp, &res[i]); if(mpz_cmp(mp, ref[i]) != 0){ errors++; } } if(errors == 0){ printf("All results match\n"); }else{ printf("Count of errors: %i\n", errors); } mpz_clear(mp); } /* * Main test */ static void run_test(int iterations, mpintTestType testType, RandomBoundType randomBoundType, bool allowNegative) { InitCpuTimer(); InitCudaTimer(); //Execution configuration int threads = 32; int blocks = iterations / threads + (iterations % threads ? 1 : 0); // Multiple-precision GMP host data mpz_t * mpzx = new mpz_t[iterations]; mpz_t * mpzy = new mpz_t[iterations]; mpz_t * mpzz = new mpz_t[iterations]; // Multiple-precision mp_int host data auto *hx = new mp_int_t[iterations]; auto *hy = new mp_int_t[iterations]; auto *hz = new mp_int_t[iterations]; //GPU data mp_int_t * dx; mp_int_t * dy; mp_int_t * dz; //Memory allocation for(int i = 0; i < iterations; i++){ mpz_init(mpzx[i]); mpz_init(mpzy[i]); mpz_init(mpzz[i]); } cudaMalloc(&dx, sizeof(mp_int_t) * iterations); cudaMalloc(&dy, sizeof(mp_int_t) * iterations); cudaMalloc(&dz, sizeof(mp_int_t) * iterations); checkDeviceHasErrors(cudaDeviceSynchronize()); cudaCheckErrors(); //Generate inputs fill_random_array(mpzx, iterations, randomBoundType, allowNegative); waitFor(5); fill_random_array(mpzy, iterations, randomBoundType, allowNegative); //Convert to the RNS #pragma omp parallel for for(int i = 0; i < iterations; i++){ mpint_set_mpz(&hx[i], mpzx[i]); mpint_set_mpz(&hy[i], mpzy[i]); } // Copying to the GPU cudaMemcpy(dx, hx, sizeof(mp_int_t) * iterations, cudaMemcpyHostToDevice); cudaMemcpy(dy, hy, sizeof(mp_int_t) * iterations, cudaMemcpyHostToDevice); checkDeviceHasErrors(cudaDeviceSynchronize()); cudaCheckErrors(); switch (testType) { case add_test: //--------------------------------------------------------- // GMP add testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CPU] GNU MP mpz_add"); resetResult(mpzz, iterations); StartCpuTimer(); #pragma omp parallel for for(int i = 0; i < iterations; i++){ mpz_add(mpzz[i],mpzx[i],mpzy[i]); } EndCpuTimer(); PrintCpuTimer("took"); //--------------------------------------------------------- // MPINT add testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CPU] GRNS mpint_add"); resetResult(hz, iterations); StartCpuTimer(); #pragma omp parallel for for(int i = 0; i < iterations; i++){ mpint_add(&hz[i],&hx[i],&hy[i]); } EndCpuTimer(); PrintCpuTimer("took"); checkResult(mpzz, hz, iterations); //--------------------------------------------------------- // MPINT CUDA add testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CUDA] GRNS mpint_add"); resetResult(hz, iterations); resetResultCuda<<<blocks,threads>>>(dz, iterations); checkDeviceHasErrors(cudaDeviceSynchronize()); cudaCheckErrors(); //Launch StartCudaTimer(); testCudaMpAdd<<<blocks,threads>>>(dz, dx, dy, iterations); EndCudaTimer(); PrintCudaTimer("took"); //Copying to the host cudaMemcpy(hz, dz, sizeof(mp_int_t) * iterations , cudaMemcpyDeviceToHost); checkDeviceHasErrors(cudaDeviceSynchronize()); cudaCheckErrors(); checkResult(mpzz, hz, iterations); //--------------------------------------------------------- break; case sub_test: //--------------------------------------------------------- // GMP sub testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CPU] GNU MP mpz_sub"); resetResult(mpzz, iterations); StartCpuTimer(); #pragma omp parallel for for(int i = 0; i < iterations; i++){ mpz_sub(mpzz[i],mpzx[i],mpzy[i]); } EndCpuTimer(); PrintCpuTimer("took"); //--------------------------------------------------------- // MPINT sub testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CPU] GRNS mpint_sub"); resetResult(hz, iterations); StartCpuTimer(); #pragma omp parallel for for(int i = 0; i < iterations; i++){ mpint_sub(&hz[i],&hx[i],&hy[i]); } EndCpuTimer(); PrintCpuTimer("took"); checkResult(mpzz, hz, iterations); //--------------------------------------------------------- // MPINT CUDA sub testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CUDA] GRNS mpint_sub"); resetResult(hz, iterations); resetResultCuda<<<blocks,threads>>>(dz, iterations); checkDeviceHasErrors(cudaDeviceSynchronize()); cudaCheckErrors(); //Launch StartCudaTimer(); testCudaMpSub<<<blocks,threads>>>(dz, dx, dy, iterations); EndCudaTimer(); PrintCudaTimer("took"); //Copying to the host cudaMemcpy(hz, dz, sizeof(mp_int_t) * iterations , cudaMemcpyDeviceToHost); checkDeviceHasErrors(cudaDeviceSynchronize()); cudaCheckErrors(); checkResult(mpzz, hz, iterations); //--------------------------------------------------------- break; case mul_test: //--------------------------------------------------------- // GMP mul testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CPU] GNU MP mpz_mul"); resetResult(mpzz, iterations); StartCpuTimer(); #pragma omp parallel for for(int i = 0; i < iterations; i++){ mpz_mul(mpzz[i],mpzx[i],mpzy[i]); } EndCpuTimer(); PrintCpuTimer("took"); //--------------------------------------------------------- // MPINT mul testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CPU] GRNS mpint_mul"); resetResult(hz, iterations); StartCpuTimer(); #pragma omp parallel for for(int i = 0; i < iterations; i++){ mpint_mul(&hz[i],&hx[i],&hy[i]); } EndCpuTimer(); PrintCpuTimer("took"); checkResult(mpzz, hz, iterations); //--------------------------------------------------------- // MPINT CUDA mul testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CUDA] GRNS mpint_mul"); resetResult(hz, iterations); resetResultCuda<<<blocks,threads>>>(dz, iterations); checkDeviceHasErrors(cudaDeviceSynchronize()); cudaCheckErrors(); //Launch StartCudaTimer(); testCudaMpMul<<<blocks,threads>>>(dz, dx, dy, iterations); EndCudaTimer(); PrintCudaTimer("took"); //Copying to the host cudaMemcpy(hz, dz, sizeof(mp_int_t) * iterations , cudaMemcpyDeviceToHost); checkDeviceHasErrors(cudaDeviceSynchronize()); cudaCheckErrors(); checkResult(mpzz, hz, iterations); break; case div_test: //--------------------------------------------------------- // GMP div testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CPU] GNU MP mpz_fdiv_q"); resetResult(mpzz, iterations); StartCpuTimer(); #pragma omp parallel for for(int i = 0; i < iterations; i++){ mpz_fdiv_q(mpzz[i],mpzx[i],mpzy[i]); } EndCpuTimer(); PrintCpuTimer("took"); //--------------------------------------------------------- // MPINT div testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CPU] GRNS mpint_div"); resetResult(hz, iterations); StartCpuTimer(); #pragma omp parallel for for(int i = 0; i < iterations; i++){ mpint_div(&hz[i],&hx[i],&hy[i]); } EndCpuTimer(); PrintCpuTimer("took"); checkResult(mpzz, hz, iterations); //--------------------------------------------------------- // MPINT CUDA div testing //--------------------------------------------------------- Logger::printDash(); PrintTimerName("[CUDA] GRNS mpint_div"); resetResult(hz, iterations); resetResultCuda<<<blocks,threads>>>(dz, iterations); checkDeviceHasErrors(cudaDeviceSynchronize()); cudaCheckErrors(); //Launch StartCudaTimer(); testCudaMpDiv<<<blocks,threads>>>(dz, dx, dy, iterations); EndCudaTimer(); PrintCudaTimer("took"); //Copying to the host cudaMemcpy(hz, dz, sizeof(mp_int_t) * iterations , cudaMemcpyDeviceToHost); checkDeviceHasErrors(cudaDeviceSynchronize()); cudaCheckErrors(); checkResult(mpzz, hz, iterations); break; default: break; } // Cleanup #pragma omp parallel for for(int i = 0; i < iterations; i++){ mpz_clear(mpzx[i]); mpz_clear(mpzy[i]); mpz_clear(mpzz[i]); } delete [] hx; delete [] hy; delete [] hz; cudaFree(dx); cudaFree(dy); cudaFree(dz); } int main() { cudaDeviceReset(); rns_const_init(); mpint_const_init(); Logger::beginTestDescription(Logger::TEST_PERF_MPINT); Logger::printParam("ITERATIONS", ITERATIONS); Logger::printParam("PRECISION", RNS_MODULI_PRODUCT_LOG2); Logger::printDash(); rns_const_print(true); Logger::endSection(true); run_test(ITERATIONS, add_test, BND_RNS_MODULI_PRODUCT_HALF, true); Logger::printSpace(); run_test(ITERATIONS, sub_test, BND_RNS_MODULI_PRODUCT_HALF, true); Logger::printSpace(); run_test(ITERATIONS, mul_test, BND_RNS_MODULI_PRODUCT_SQRT, true); Logger::printSpace(); run_test(ITERATIONS, div_test, BND_RNS_MODULI_PRODUCT, false); Logger::endTestDescription(); return 0; }
1b4e0ae459bbe782c3e354de40e72ae45dbe1051.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/get_value.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/strings/detail/utilities.hpp> #include <cudf/strings/json.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/error.hpp> #include <io/utilities/parsing_utils.cuh> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/optional.h> #include <thrust/pair.h> #include <thrust/scan.h> #include <thrust/tuple.h> namespace cudf { namespace strings { namespace detail { namespace { // change to "\n" and 1 to make output more readable #define DEBUG_NEWLINE constexpr int DEBUG_NEWLINE_LEN = 0; /** * @brief Result of calling a parse function. * * The primary use of this is to distinguish between "success" and * "success but no data" return cases. For example, if you are reading the * values of an array you might call a parse function in a while loop. You * would want to continue doing this until you either encounter an error (parse_result::ERROR) * or you get nothing back (parse_result::EMPTY) */ enum class parse_result { ERROR, // failure SUCCESS, // success MISSING_FIELD, // success, but the field is missing EMPTY, // success, but no data }; /** * @brief Base parser class inherited by the (device-side) json_state class and * (host-side) path_state class. * * Contains a number of useful utility functions common to parsing json and * JSONPath strings. */ class parser { protected: CUDF_HOST_DEVICE inline parser() {} CUDF_HOST_DEVICE inline parser(char const* _input, int64_t _input_len) : input(_input), input_len(_input_len), pos(_input) { parse_whitespace(); } CUDF_HOST_DEVICE inline parser(parser const& p) : input(p.input), input_len(p.input_len), pos(p.pos) { } CUDF_HOST_DEVICE inline bool eof(char const* p) { return p - input >= input_len; } CUDF_HOST_DEVICE inline bool eof() { return eof(pos); } CUDF_HOST_DEVICE inline bool parse_whitespace() { while (!eof()) { if (is_whitespace(*pos)) { pos++; } else { return true; } } return false; } CUDF_HOST_DEVICE inline bool is_hex_digit(char c) { return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F'); } CUDF_HOST_DEVICE inline int64_t chars_left() { return input_len - ((pos - input) + 1); } /** * @brief Parse an escape sequence. * * Must be a valid sequence as specified by the JSON format * https://www.json.org/json-en.html * * @returns True on success or false on fail. */ CUDF_HOST_DEVICE inline bool parse_escape_seq() { if (*pos != '\\') { return false; } char c = *++pos; // simple case if (c == '\"' || c == '\\' || c == '/' || c == 'b' || c == 'f' || c == 'n' || c == 'r' || c == 't') { pos++; return true; } // hex digits: must be of the form uXXXX where each X is a valid hex digit if (c == 'u' && chars_left() >= 4 && is_hex_digit(pos[1]) && is_hex_digit(pos[2]) && is_hex_digit(pos[3]) && is_hex_digit(pos[4])) { pos += 5; return true; } // an illegal escape sequence. return false; } /** * @brief Parse a quote-enclosed JSON string. * * @param[out] str The resulting string. * @param can_be_empty Parameter indicating whether it is valid for the string * to not be present. * @param quote Character expected as the surrounding quotes. A value of 0 * indicates allowing either single or double quotes (but not a mixture of both). * @returns A result code indicating success, failure or other result. */ CUDF_HOST_DEVICE inline parse_result parse_string(string_view& str, bool can_be_empty, char quote) { str = string_view(nullptr, 0); if (parse_whitespace()) { // if the user specifies 0 for quote, allow either ' or ". otherwise // use the char directly if ((quote == 0 && (*pos == '\'' || *pos == '\"')) || (quote == *pos)) { quote = *pos; char const* start = ++pos; while (!eof()) { // handle escaped characters if (*pos == '\\') { if (!parse_escape_seq()) { return parse_result::ERROR; } } else if (*pos == quote) { str = string_view(start, pos - start); pos++; return parse_result::SUCCESS; } else { pos++; } } } } return can_be_empty ? parse_result::EMPTY : parse_result::ERROR; } protected: char const* input{nullptr}; int64_t input_len{0}; char const* pos{nullptr}; CUDF_HOST_DEVICE inline bool is_whitespace(char c) { return c <= ' '; } }; /** * @brief Output buffer object. Used during the preprocess/size-computation step * and the actual output step. * * There is an important distinction between two cases: * * - producing no output at all. that is, the query matched nothing in the input. * - producing empty output. the query matched something in the input, but the * value of the result is an empty string. * * The `has_output` field is the flag which indicates whether or not the output * from the query should be considered empty or null. * */ struct json_output { size_t output_max_len; char* output; thrust::optional<size_t> output_len; __device__ void add_output(char const* str, size_t len) { if (output != nullptr) { memcpy(output + output_len.value_or(0), str, len); } output_len = output_len.value_or(0) + len; } __device__ void add_output(string_view const& str) { add_output(str.data(), str.size_bytes()); } }; enum json_element_type { NONE, OBJECT, ARRAY, VALUE }; /** * @brief Parsing class that holds the current state of the json to be parse and provides * functions for navigating through it. */ class json_state : private parser { public: __device__ json_state() : parser() {} __device__ json_state(char const* _input, int64_t _input_len, get_json_object_options _options) : parser(_input, _input_len), options(_options) { } __device__ json_state(json_state const& j) : parser(j), cur_el_start(j.cur_el_start), cur_el_type(j.cur_el_type), parent_el_type(j.parent_el_type), options(j.options) { } // retrieve the entire current element into the output __device__ parse_result extract_element(json_output* output, bool list_element) { char const* start = cur_el_start; char const* end = start; // if we're a value type, do a simple value parse. if (cur_el_type == VALUE) { pos = cur_el_start; if (parse_value() != parse_result::SUCCESS) { return parse_result::ERROR; } end = pos; // potentially strip quotes from individually returned string values. if (options.get_strip_quotes_from_single_strings() && !list_element && is_quote(*start) && *(end - 1) == *start) { start++; end--; } } // otherwise, march through everything inside else { int obj_count = 0; int arr_count = 0; while (!eof(end)) { // parse strings explicitly so we handle all interesting corner cases (such as strings // containing {, }, [ or ] if (is_quote(*end)) { string_view str; pos = end; if (parse_string(str, false, *end) == parse_result::ERROR) { return parse_result::ERROR; } end = pos; } else { char const c = *end++; switch (c) { case '{': obj_count++; break; case '}': obj_count--; break; case '[': arr_count++; break; case ']': arr_count--; break; default: break; } } if (obj_count == 0 && arr_count == 0) { break; } } if (obj_count > 0 || arr_count > 0) { return parse_result::ERROR; } pos = end; } // parse trailing , if (parse_whitespace()) { if (*pos == ',') { pos++; } } if (output != nullptr) { output->add_output({start, static_cast<size_type>(end - start)}); } return parse_result::SUCCESS; } // skip the next element __device__ parse_result skip_element() { return extract_element(nullptr, false); } // advance to the next element __device__ parse_result next_element() { return next_element_internal(false); } // advance inside the current element __device__ parse_result child_element(json_element_type expected_type) { if (expected_type != NONE && cur_el_type != expected_type) { return parse_result::ERROR; } // if we succeed, record our parent element type. auto const prev_el_type = cur_el_type; auto const result = next_element_internal(true); if (result == parse_result::SUCCESS) { parent_el_type = prev_el_type; } return result; } // return the next element that matches the specified name. __device__ parse_result next_matching_element(string_view const& name, bool inclusive) { // if we're not including the current element, skip it if (!inclusive) { parse_result result = next_element_internal(false); if (result != parse_result::SUCCESS) { return result; } } // loop until we find a match or there's nothing left do { if (name.size_bytes() == 1 && name.data()[0] == '*') { return parse_result::SUCCESS; } else if (cur_el_name == name) { return parse_result::SUCCESS; } // next parse_result result = next_element_internal(false); if (result != parse_result::SUCCESS) { return options.get_missing_fields_as_nulls() && result == parse_result::EMPTY ? parse_result::MISSING_FIELD : result; } } while (true); return parse_result::ERROR; } /** * @brief Parse a name field for a JSON element. * * When parsing JSON objects, it is not always a requirement that the name * actually exists. For example, the outer object bounded by {} here has * no name, while the inner element "a" does. * * ``` * { * "a" : "b" * } * ``` * * The user can specify whether or not the name string must be present via * the `can_be_empty` flag. * * When a name is present, it must be followed by a colon `:` * * @param[out] name The resulting name. * @param can_be_empty Parameter indicating whether it is valid for the name * to not be present. * @returns A result code indicating success, failure or other result. */ CUDF_HOST_DEVICE inline parse_result parse_name(string_view& name, bool can_be_empty) { char const quote = options.get_allow_single_quotes() ? 0 : '\"'; if (parse_string(name, can_be_empty, quote) == parse_result::ERROR) { return parse_result::ERROR; } // if we got a real string, the next char must be a : if (name.size_bytes() > 0) { if (!parse_whitespace()) { return parse_result::ERROR; } if (*pos == ':') { pos++; return parse_result::SUCCESS; } } return parse_result::EMPTY; } private: /** * @brief Parse a non-string JSON value. * * Non-string values include numbers, true, false, or null. This function does not * do any validation of the value. * * @param val (Output) The string containing the parsed value * @returns A result code indicating success, failure or other result. */ CUDF_HOST_DEVICE inline parse_result parse_non_string_value(string_view& val) { if (!parse_whitespace()) { return parse_result::ERROR; } // parse to the end of the value char const* start = pos; char const* end = start; while (!eof(end)) { char const c = *end; if (c == ',' || c == '}' || c == ']' || is_whitespace(c)) { break; } // illegal chars if (c == '[' || c == '{' || c == ':' || is_quote(c)) { return parse_result::ERROR; } end++; } pos = end; val = string_view(start, end - start); return parse_result::SUCCESS; } // parse a value - either a string or a number/null/bool __device__ parse_result parse_value() { if (!parse_whitespace()) { return parse_result::ERROR; } // string or number? string_view unused; return is_quote(*pos) ? parse_string(unused, false, *pos) : parse_non_string_value(unused); } __device__ parse_result next_element_internal(bool child) { // if we're not getting a child element, skip the current element. // this will leave pos as the first character -after- the close of // the current element if (!child && cur_el_start != nullptr) { if (skip_element() == parse_result::ERROR) { return parse_result::ERROR; } cur_el_start = nullptr; } // otherwise pos will be at the first character within the current element // can only get the child of an object or array. // this could theoretically be handled as an error, but the evaluators I've found // seem to treat this as "it's nothing" if (child && (cur_el_type == VALUE || cur_el_type == NONE)) { return parse_result::EMPTY; } // what's next if (!parse_whitespace()) { return parse_result::EMPTY; } // if we're closing off a parent element, we're done char const c = *pos; if (c == ']' || c == '}') { return parse_result::EMPTY; } // if we're not accessing elements of an array, check for name. bool const array_access = (cur_el_type == ARRAY && child) || (parent_el_type == ARRAY && !child); if (!array_access && parse_name(cur_el_name, true) == parse_result::ERROR) { return parse_result::ERROR; } // element type if (!parse_whitespace()) { return parse_result::EMPTY; } switch (*pos++) { case '[': cur_el_type = ARRAY; break; case '{': cur_el_type = OBJECT; break; case ',': case ':': return parse_result::ERROR; case '\'': if (!options.get_allow_single_quotes()) { return parse_result::ERROR; } cur_el_type = VALUE; break; // value type default: cur_el_type = VALUE; break; } // the start of the current element is always at the value, not the name cur_el_start = pos - 1; return parse_result::SUCCESS; } CUDF_HOST_DEVICE inline bool is_quote(char c) { return (c == '\"') || (options.get_allow_single_quotes() && (c == '\'')); } char const* cur_el_start{nullptr}; // pointer to the first character of the -value- of the // current element - not the name string_view cur_el_name; // name of the current element (if applicable) json_element_type cur_el_type{json_element_type::NONE}; // type of the current element json_element_type parent_el_type{json_element_type::NONE}; // parent element type get_json_object_options options; // behavior options }; enum class path_operator_type { ROOT, CHILD, CHILD_WILDCARD, CHILD_INDEX, ERROR, END }; /** * @brief A "command" operator used to query a json string. A full query is * an array of these operators applied to the incoming json string, */ struct path_operator { CUDF_HOST_DEVICE inline path_operator() {} CUDF_HOST_DEVICE inline path_operator(path_operator_type _type, json_element_type _expected_type = NONE) : type(_type), expected_type{_expected_type} { } path_operator_type type{path_operator_type::ERROR}; // operator type // the expected element type we're applying this operation to. // for example: // - you cannot retrieve a subscripted field (eg [5]) from an object. // - you cannot retrieve a field by name (eg .book) from an array. // - you -can- use .* for both arrays and objects // a value of NONE implies any type accepted json_element_type expected_type{NONE}; // the expected type of the element we're working with string_view name; // name to match against (if applicable) int index{-1}; // index for subscript operator }; /** * @brief Parsing class that holds the current state of the JSONPath string to be parsed * and provides functions for navigating through it. This is only called on the host * during the preprocess step which builds a command buffer that the gpu uses. */ class path_state : private parser { public: path_state(char const* _path, size_t _path_len) : parser(_path, _path_len) {} // get the next operator in the JSONPath string path_operator get_next_operator() { if (eof()) { return {path_operator_type::END}; } switch (*pos++) { case '$': return {path_operator_type::ROOT}; case '.': { path_operator op; string_view term{".[", 2}; if (parse_path_name(op.name, term)) { // this is another potential use case for __SPARK_BEHAVIORS / configurability // Spark currently only handles the wildcard operator inside [*], it does // not handle .* if (op.name.size_bytes() == 1 && op.name.data()[0] == '*') { op.type = path_operator_type::CHILD_WILDCARD; op.expected_type = NONE; } else { op.type = path_operator_type::CHILD; op.expected_type = OBJECT; } return op; } } break; // 3 ways this can be used // indices: [0] // name: ['book'] // wildcard: [*] case '[': { path_operator op; string_view term{"]", 1}; bool const is_string = *pos == '\''; if (parse_path_name(op.name, term)) { pos++; if (op.name.size_bytes() == 1 && op.name.data()[0] == '*') { op.type = path_operator_type::CHILD_WILDCARD; op.expected_type = NONE; } else { if (is_string) { op.type = path_operator_type::CHILD; op.expected_type = OBJECT; } else { op.type = path_operator_type::CHILD_INDEX; auto const value = cudf::io::parse_numeric<int>( op.name.data(), op.name.data() + op.name.size_bytes(), json_opts); op.index = value.value_or(-1); CUDF_EXPECTS(op.index >= 0, "Invalid numeric index specified in JSONPath"); op.expected_type = ARRAY; } } return op; } } break; // wildcard operator case '*': { pos++; return path_operator{path_operator_type::CHILD_WILDCARD}; } break; default: CUDF_FAIL("Unrecognized JSONPath operator", std::invalid_argument); break; } return {path_operator_type::ERROR}; } private: cudf::io::parse_options_view json_opts{',', '\n', '\"', '.'}; bool parse_path_name(string_view& name, string_view const& terminators) { switch (*pos) { case '*': name = string_view(pos, 1); pos++; break; case '\'': if (parse_string(name, false, '\'') != parse_result::SUCCESS) { return false; } break; default: { size_t const chars_left = input_len - (pos - input); char const* end = std::find_first_of( pos, pos + chars_left, terminators.data(), terminators.data() + terminators.size_bytes()); if (end) { name = string_view(pos, end - pos); pos = end; } else { name = string_view(pos, chars_left); pos = input + input_len; } break; } } // an empty name is not valid CUDF_EXPECTS( name.size_bytes() > 0, "Invalid empty name in JSONPath query string", std::invalid_argument); return true; } }; /** * @brief Preprocess the incoming JSONPath string on the host to generate a * command buffer for use by the GPU. * * @param json_path The incoming json path * @param stream Cuda stream to perform any gpu actions on * @returns A pair containing the command buffer, and maximum stack depth required. */ std::pair<thrust::optional<rmm::device_uvector<path_operator>>, int> build_command_buffer( cudf::string_scalar const& json_path, rmm::cuda_stream_view stream) { std::string h_json_path = json_path.to_string(stream); path_state p_state(h_json_path.data(), static_cast<size_type>(h_json_path.size())); std::vector<path_operator> h_operators; path_operator op; int max_stack_depth = 1; do { op = p_state.get_next_operator(); if (op.type == path_operator_type::ERROR) { CUDF_FAIL("Encountered invalid JSONPath input string"); } if (op.type == path_operator_type::CHILD_WILDCARD) { max_stack_depth++; } // convert pointer to device pointer if (op.name.size_bytes() > 0) { op.name = string_view(json_path.data() + (op.name.data() - h_json_path.data()), op.name.size_bytes()); } if (op.type == path_operator_type::ROOT) { CUDF_EXPECTS(h_operators.size() == 0, "Root operator ($) can only exist at the root"); } // if we have not gotten a root operator to start, and we're not empty, quietly push a // root operator now. if (h_operators.size() == 0 && op.type != path_operator_type::ROOT && op.type != path_operator_type::END) { h_operators.push_back(path_operator{path_operator_type::ROOT}); } h_operators.push_back(op); } while (op.type != path_operator_type::END); auto const is_empty = h_operators.size() == 1 && h_operators[0].type == path_operator_type::END; return is_empty ? std::pair(thrust::nullopt, 0) : std::pair(thrust::make_optional(cudf::detail::make_device_uvector_sync( h_operators, stream, rmm::mr::get_current_device_resource())), max_stack_depth); } #define PARSE_TRY(_x) \ do { \ last_result = _x; \ if (last_result == parse_result::ERROR) { return parse_result::ERROR; } \ } while (0) /** * @brief Parse a single json string using the provided command buffer * * @param j_state The incoming json string and associated parser * @param commands The command buffer to be applied to the string. Always ends with a * path_operator_type::END * @param output Buffer user to store the results of the query * @returns A result code indicating success/fail/empty. */ template <int max_command_stack_depth> __device__ parse_result parse_json_path(json_state& j_state, path_operator const* commands, json_output& output) { // manually maintained context stack in lieu of calling parse_json_path recursively. struct context { json_state j_state; path_operator const* commands; bool list_element; bool state_flag; }; context stack[max_command_stack_depth]; int stack_pos = 0; auto push_context = [&stack, &stack_pos](json_state const& _j_state, path_operator const* _commands, bool _list_element = false, bool _state_flag = false) { if (stack_pos == max_command_stack_depth - 1) { return false; } stack[stack_pos++] = context{_j_state, _commands, _list_element, _state_flag}; return true; }; auto pop_context = [&stack, &stack_pos](context& c) { if (stack_pos > 0) { c = stack[--stack_pos]; return true; } return false; }; push_context(j_state, commands, false); parse_result last_result = parse_result::SUCCESS; context ctx; int element_count = 0; while (pop_context(ctx)) { path_operator op = *ctx.commands; switch (op.type) { // whatever the first object is case path_operator_type::ROOT: PARSE_TRY(ctx.j_state.next_element()); push_context(ctx.j_state, ctx.commands + 1); break; // .name // ['name'] // [1] // will return a single thing case path_operator_type::CHILD: { PARSE_TRY(ctx.j_state.child_element(op.expected_type)); if (last_result == parse_result::SUCCESS) { PARSE_TRY(ctx.j_state.next_matching_element(op.name, true)); if (last_result == parse_result::SUCCESS) { push_context(ctx.j_state, ctx.commands + 1, ctx.list_element); } else if (last_result == parse_result::MISSING_FIELD) { if (ctx.list_element && element_count > 0) { output.add_output({"," DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN}); } output.add_output({"null", 4}); element_count++; } } } break; // .* // [*] // will return an array of things case path_operator_type::CHILD_WILDCARD: { // if we're on the first element of this wildcard if (!ctx.state_flag) { // we will only ever be returning 1 array if (!ctx.list_element) { output.add_output({"[" DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN}); } // step into the child element PARSE_TRY(ctx.j_state.child_element(op.expected_type)); if (last_result == parse_result::EMPTY) { if (!ctx.list_element) { output.add_output({"]" DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN}); } last_result = parse_result::SUCCESS; break; } // first element PARSE_TRY(ctx.j_state.next_matching_element({"*", 1}, true)); if (last_result == parse_result::EMPTY) { if (!ctx.list_element) { output.add_output({"]" DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN}); } last_result = parse_result::SUCCESS; break; } // re-push ourselves push_context(ctx.j_state, ctx.commands, ctx.list_element, true); // push the next command push_context(ctx.j_state, ctx.commands + 1, true); } else { // next element PARSE_TRY(ctx.j_state.next_matching_element({"*", 1}, false)); if (last_result == parse_result::EMPTY) { if (!ctx.list_element) { output.add_output({"]" DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN}); } last_result = parse_result::SUCCESS; break; } // re-push ourselves push_context(ctx.j_state, ctx.commands, ctx.list_element, true); // push the next command push_context(ctx.j_state, ctx.commands + 1, true); } } break; // [0] // [1] // etc // returns a single thing case path_operator_type::CHILD_INDEX: { PARSE_TRY(ctx.j_state.child_element(op.expected_type)); if (last_result == parse_result::SUCCESS) { string_view const any{"*", 1}; PARSE_TRY(ctx.j_state.next_matching_element(any, true)); if (last_result == parse_result::SUCCESS) { int idx; for (idx = 1; idx <= op.index; idx++) { PARSE_TRY(ctx.j_state.next_matching_element(any, false)); if (last_result == parse_result::EMPTY) { break; } } // if we didn't end up at the index we requested, this is an invalid index if (idx - 1 != op.index) { return parse_result::ERROR; } push_context(ctx.j_state, ctx.commands + 1, ctx.list_element); } } } break; // some sort of error. case path_operator_type::ERROR: return parse_result::ERROR; break; // END case default: { if (ctx.list_element && element_count > 0) { output.add_output({"," DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN}); } PARSE_TRY(ctx.j_state.extract_element(&output, ctx.list_element)); if (ctx.list_element && last_result != parse_result::EMPTY) { element_count++; } } break; } } return parse_result::SUCCESS; } // hardcoding this for now. to reach a stack depth of 8 would require // a JSONPath containing 7 nested wildcards so this is probably reasonable. constexpr int max_command_stack_depth = 8; /** * @brief Parse a single json string using the provided command buffer * * This function exists primarily as a shim for debugging purposes. * * @param input The incoming json string * @param input_len Size of the incoming json string * @param commands The command buffer to be applied to the string. Always ends with a * path_operator_type::END * @param out_buf Buffer user to store the results of the query (nullptr in the size computation * step) * @param out_buf_size Size of the output buffer * @param options Options controlling behavior * @returns A pair containing the result code the output buffer. */ __device__ thrust::pair<parse_result, json_output> get_json_object_single( char const* input, size_t input_len, path_operator const* const commands, char* out_buf, size_t out_buf_size, get_json_object_options options) { json_state j_state(input, input_len, options); json_output output{out_buf_size, out_buf}; auto const result = parse_json_path<max_command_stack_depth>(j_state, commands, output); return {result, output}; } /** * @brief Kernel for running the JSONPath query. * * This kernel operates in a 2-pass way. On the first pass, it computes * output sizes. On the second pass it fills in the provided output buffers * (chars and validity) * * @param col Device view of the incoming string * @param commands JSONPath command buffer * @param output_offsets Buffer used to store the string offsets for the results of the query * @param out_buf Buffer used to store the results of the query * @param out_validity Output validity buffer * @param out_valid_count Output count of # of valid bits * @param options Options controlling behavior */ template <int block_size> __launch_bounds__(block_size) __global__ void get_json_object_kernel(column_device_view col, path_operator const* const commands, size_type* output_offsets, thrust::optional<char*> out_buf, thrust::optional<bitmask_type*> out_validity, thrust::optional<size_type*> out_valid_count, get_json_object_options options) { auto tid = cudf::detail::grid_1d::global_thread_id(); auto const stride = cudf::thread_index_type{blockDim.x} * cudf::thread_index_type{gridDim.x}; size_type warp_valid_count{0}; auto active_threads = __ballot_sync(0xffff'ffffu, tid < col.size()); while (tid < col.size()) { bool is_valid = false; string_view const str = col.element<string_view>(tid); size_type output_size = 0; if (str.size_bytes() > 0) { char* dst = out_buf.has_value() ? out_buf.value() + output_offsets[tid] : nullptr; size_t const dst_size = out_buf.has_value() ? output_offsets[tid + 1] - output_offsets[tid] : 0; parse_result result; json_output out; thrust::tie(result, out) = get_json_object_single(str.data(), str.size_bytes(), commands, dst, dst_size, options); output_size = out.output_len.value_or(0); if (out.output_len.has_value() && result == parse_result::SUCCESS) { is_valid = true; } } // filled in only during the precompute step. during the compute step, the offsets // are fed back in so we do -not- want to write them out if (!out_buf.has_value()) { output_offsets[tid] = static_cast<size_type>(output_size); } // validity filled in only during the output step if (out_validity.has_value()) { uint32_t mask = __ballot_sync(active_threads, is_valid); // 0th lane of the warp writes the validity if (!(tid % cudf::detail::warp_size)) { out_validity.value()[cudf::word_index(tid)] = mask; warp_valid_count += __popc(mask); } } tid += stride; active_threads = __ballot_sync(active_threads, tid < col.size()); } // sum the valid counts across the whole block if (out_valid_count) { size_type block_valid_count = cudf::detail::single_lane_block_sum_reduce<block_size, 0>(warp_valid_count); if (threadIdx.x == 0) { atomicAdd(out_valid_count.value(), block_valid_count); } } } /** * @copydoc cudf::strings::detail::get_json_object */ std::unique_ptr<cudf::column> get_json_object(cudf::strings_column_view const& col, cudf::string_scalar const& json_path, get_json_object_options options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // preprocess the json_path into a command buffer auto preprocess = build_command_buffer(json_path, stream); CUDF_EXPECTS(std::get<1>(preprocess) <= max_command_stack_depth, "Encountered JSONPath string that is too complex"); if (col.is_empty()) return make_empty_column(type_id::STRING); // allocate output offsets buffer. auto offsets = cudf::make_fixed_width_column( data_type{type_id::INT32}, col.size() + 1, mask_state::UNALLOCATED, stream, mr); cudf::mutable_column_view offsets_view(*offsets); // if the query is empty, return a string column containing all nulls if (!std::get<0>(preprocess).has_value()) { return std::make_unique<column>( data_type{type_id::STRING}, col.size(), rmm::device_buffer{0, stream, mr}, // no data cudf::detail::create_null_mask(col.size(), mask_state::ALL_NULL, stream, mr), col.size()); // null count } constexpr int block_size = 512; cudf::detail::grid_1d const grid{col.size(), block_size}; auto cdv = column_device_view::create(col.parent(), stream); // preprocess sizes (returned in the offsets buffer) hipLaunchKernelGGL(( get_json_object_kernel<block_size>) , dim3(grid.num_blocks), dim3(grid.num_threads_per_block), 0, stream.value(), *cdv, std::get<0>(preprocess).value().data(), offsets_view.head<size_type>(), thrust::nullopt, thrust::nullopt, thrust::nullopt, options); // convert sizes to offsets thrust::exclusive_scan(rmm::exec_policy(stream), offsets_view.head<size_type>(), offsets_view.head<size_type>() + col.size() + 1, offsets_view.head<size_type>(), 0); size_type const output_size = cudf::detail::get_value<size_type>(offsets_view, col.size(), stream); // allocate output string column auto chars = create_chars_child_column(output_size, stream, mr); // potential optimization : if we know that all outputs are valid, we could skip creating // the validity mask altogether rmm::device_buffer validity = cudf::detail::create_null_mask(col.size(), mask_state::UNINITIALIZED, stream, mr); // compute results cudf::mutable_column_view chars_view(*chars); rmm::device_scalar<size_type> d_valid_count{0, stream}; hipLaunchKernelGGL(( get_json_object_kernel<block_size>) , dim3(grid.num_blocks), dim3(grid.num_threads_per_block), 0, stream.value(), *cdv, std::get<0>(preprocess).value().data(), offsets_view.head<size_type>(), chars_view.head<char>(), static_cast<bitmask_type*>(validity.data()), d_valid_count.data(), options); return make_strings_column(col.size(), std::move(offsets), std::move(chars), col.size() - d_valid_count.value(stream), std::move(validity)); } } // namespace } // namespace detail /** * @copydoc cudf::strings::get_json_object */ std::unique_ptr<cudf::column> get_json_object(cudf::strings_column_view const& col, cudf::string_scalar const& json_path, get_json_object_options options, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::get_json_object(col, json_path, options, cudf::get_default_stream(), mr); } } // namespace strings } // namespace cudf
1b4e0ae459bbe782c3e354de40e72ae45dbe1051.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/get_value.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/strings/detail/utilities.hpp> #include <cudf/strings/json.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/error.hpp> #include <io/utilities/parsing_utils.cuh> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/optional.h> #include <thrust/pair.h> #include <thrust/scan.h> #include <thrust/tuple.h> namespace cudf { namespace strings { namespace detail { namespace { // change to "\n" and 1 to make output more readable #define DEBUG_NEWLINE constexpr int DEBUG_NEWLINE_LEN = 0; /** * @brief Result of calling a parse function. * * The primary use of this is to distinguish between "success" and * "success but no data" return cases. For example, if you are reading the * values of an array you might call a parse function in a while loop. You * would want to continue doing this until you either encounter an error (parse_result::ERROR) * or you get nothing back (parse_result::EMPTY) */ enum class parse_result { ERROR, // failure SUCCESS, // success MISSING_FIELD, // success, but the field is missing EMPTY, // success, but no data }; /** * @brief Base parser class inherited by the (device-side) json_state class and * (host-side) path_state class. * * Contains a number of useful utility functions common to parsing json and * JSONPath strings. */ class parser { protected: CUDF_HOST_DEVICE inline parser() {} CUDF_HOST_DEVICE inline parser(char const* _input, int64_t _input_len) : input(_input), input_len(_input_len), pos(_input) { parse_whitespace(); } CUDF_HOST_DEVICE inline parser(parser const& p) : input(p.input), input_len(p.input_len), pos(p.pos) { } CUDF_HOST_DEVICE inline bool eof(char const* p) { return p - input >= input_len; } CUDF_HOST_DEVICE inline bool eof() { return eof(pos); } CUDF_HOST_DEVICE inline bool parse_whitespace() { while (!eof()) { if (is_whitespace(*pos)) { pos++; } else { return true; } } return false; } CUDF_HOST_DEVICE inline bool is_hex_digit(char c) { return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F'); } CUDF_HOST_DEVICE inline int64_t chars_left() { return input_len - ((pos - input) + 1); } /** * @brief Parse an escape sequence. * * Must be a valid sequence as specified by the JSON format * https://www.json.org/json-en.html * * @returns True on success or false on fail. */ CUDF_HOST_DEVICE inline bool parse_escape_seq() { if (*pos != '\\') { return false; } char c = *++pos; // simple case if (c == '\"' || c == '\\' || c == '/' || c == 'b' || c == 'f' || c == 'n' || c == 'r' || c == 't') { pos++; return true; } // hex digits: must be of the form uXXXX where each X is a valid hex digit if (c == 'u' && chars_left() >= 4 && is_hex_digit(pos[1]) && is_hex_digit(pos[2]) && is_hex_digit(pos[3]) && is_hex_digit(pos[4])) { pos += 5; return true; } // an illegal escape sequence. return false; } /** * @brief Parse a quote-enclosed JSON string. * * @param[out] str The resulting string. * @param can_be_empty Parameter indicating whether it is valid for the string * to not be present. * @param quote Character expected as the surrounding quotes. A value of 0 * indicates allowing either single or double quotes (but not a mixture of both). * @returns A result code indicating success, failure or other result. */ CUDF_HOST_DEVICE inline parse_result parse_string(string_view& str, bool can_be_empty, char quote) { str = string_view(nullptr, 0); if (parse_whitespace()) { // if the user specifies 0 for quote, allow either ' or ". otherwise // use the char directly if ((quote == 0 && (*pos == '\'' || *pos == '\"')) || (quote == *pos)) { quote = *pos; char const* start = ++pos; while (!eof()) { // handle escaped characters if (*pos == '\\') { if (!parse_escape_seq()) { return parse_result::ERROR; } } else if (*pos == quote) { str = string_view(start, pos - start); pos++; return parse_result::SUCCESS; } else { pos++; } } } } return can_be_empty ? parse_result::EMPTY : parse_result::ERROR; } protected: char const* input{nullptr}; int64_t input_len{0}; char const* pos{nullptr}; CUDF_HOST_DEVICE inline bool is_whitespace(char c) { return c <= ' '; } }; /** * @brief Output buffer object. Used during the preprocess/size-computation step * and the actual output step. * * There is an important distinction between two cases: * * - producing no output at all. that is, the query matched nothing in the input. * - producing empty output. the query matched something in the input, but the * value of the result is an empty string. * * The `has_output` field is the flag which indicates whether or not the output * from the query should be considered empty or null. * */ struct json_output { size_t output_max_len; char* output; thrust::optional<size_t> output_len; __device__ void add_output(char const* str, size_t len) { if (output != nullptr) { memcpy(output + output_len.value_or(0), str, len); } output_len = output_len.value_or(0) + len; } __device__ void add_output(string_view const& str) { add_output(str.data(), str.size_bytes()); } }; enum json_element_type { NONE, OBJECT, ARRAY, VALUE }; /** * @brief Parsing class that holds the current state of the json to be parse and provides * functions for navigating through it. */ class json_state : private parser { public: __device__ json_state() : parser() {} __device__ json_state(char const* _input, int64_t _input_len, get_json_object_options _options) : parser(_input, _input_len), options(_options) { } __device__ json_state(json_state const& j) : parser(j), cur_el_start(j.cur_el_start), cur_el_type(j.cur_el_type), parent_el_type(j.parent_el_type), options(j.options) { } // retrieve the entire current element into the output __device__ parse_result extract_element(json_output* output, bool list_element) { char const* start = cur_el_start; char const* end = start; // if we're a value type, do a simple value parse. if (cur_el_type == VALUE) { pos = cur_el_start; if (parse_value() != parse_result::SUCCESS) { return parse_result::ERROR; } end = pos; // potentially strip quotes from individually returned string values. if (options.get_strip_quotes_from_single_strings() && !list_element && is_quote(*start) && *(end - 1) == *start) { start++; end--; } } // otherwise, march through everything inside else { int obj_count = 0; int arr_count = 0; while (!eof(end)) { // parse strings explicitly so we handle all interesting corner cases (such as strings // containing {, }, [ or ] if (is_quote(*end)) { string_view str; pos = end; if (parse_string(str, false, *end) == parse_result::ERROR) { return parse_result::ERROR; } end = pos; } else { char const c = *end++; switch (c) { case '{': obj_count++; break; case '}': obj_count--; break; case '[': arr_count++; break; case ']': arr_count--; break; default: break; } } if (obj_count == 0 && arr_count == 0) { break; } } if (obj_count > 0 || arr_count > 0) { return parse_result::ERROR; } pos = end; } // parse trailing , if (parse_whitespace()) { if (*pos == ',') { pos++; } } if (output != nullptr) { output->add_output({start, static_cast<size_type>(end - start)}); } return parse_result::SUCCESS; } // skip the next element __device__ parse_result skip_element() { return extract_element(nullptr, false); } // advance to the next element __device__ parse_result next_element() { return next_element_internal(false); } // advance inside the current element __device__ parse_result child_element(json_element_type expected_type) { if (expected_type != NONE && cur_el_type != expected_type) { return parse_result::ERROR; } // if we succeed, record our parent element type. auto const prev_el_type = cur_el_type; auto const result = next_element_internal(true); if (result == parse_result::SUCCESS) { parent_el_type = prev_el_type; } return result; } // return the next element that matches the specified name. __device__ parse_result next_matching_element(string_view const& name, bool inclusive) { // if we're not including the current element, skip it if (!inclusive) { parse_result result = next_element_internal(false); if (result != parse_result::SUCCESS) { return result; } } // loop until we find a match or there's nothing left do { if (name.size_bytes() == 1 && name.data()[0] == '*') { return parse_result::SUCCESS; } else if (cur_el_name == name) { return parse_result::SUCCESS; } // next parse_result result = next_element_internal(false); if (result != parse_result::SUCCESS) { return options.get_missing_fields_as_nulls() && result == parse_result::EMPTY ? parse_result::MISSING_FIELD : result; } } while (true); return parse_result::ERROR; } /** * @brief Parse a name field for a JSON element. * * When parsing JSON objects, it is not always a requirement that the name * actually exists. For example, the outer object bounded by {} here has * no name, while the inner element "a" does. * * ``` * { * "a" : "b" * } * ``` * * The user can specify whether or not the name string must be present via * the `can_be_empty` flag. * * When a name is present, it must be followed by a colon `:` * * @param[out] name The resulting name. * @param can_be_empty Parameter indicating whether it is valid for the name * to not be present. * @returns A result code indicating success, failure or other result. */ CUDF_HOST_DEVICE inline parse_result parse_name(string_view& name, bool can_be_empty) { char const quote = options.get_allow_single_quotes() ? 0 : '\"'; if (parse_string(name, can_be_empty, quote) == parse_result::ERROR) { return parse_result::ERROR; } // if we got a real string, the next char must be a : if (name.size_bytes() > 0) { if (!parse_whitespace()) { return parse_result::ERROR; } if (*pos == ':') { pos++; return parse_result::SUCCESS; } } return parse_result::EMPTY; } private: /** * @brief Parse a non-string JSON value. * * Non-string values include numbers, true, false, or null. This function does not * do any validation of the value. * * @param val (Output) The string containing the parsed value * @returns A result code indicating success, failure or other result. */ CUDF_HOST_DEVICE inline parse_result parse_non_string_value(string_view& val) { if (!parse_whitespace()) { return parse_result::ERROR; } // parse to the end of the value char const* start = pos; char const* end = start; while (!eof(end)) { char const c = *end; if (c == ',' || c == '}' || c == ']' || is_whitespace(c)) { break; } // illegal chars if (c == '[' || c == '{' || c == ':' || is_quote(c)) { return parse_result::ERROR; } end++; } pos = end; val = string_view(start, end - start); return parse_result::SUCCESS; } // parse a value - either a string or a number/null/bool __device__ parse_result parse_value() { if (!parse_whitespace()) { return parse_result::ERROR; } // string or number? string_view unused; return is_quote(*pos) ? parse_string(unused, false, *pos) : parse_non_string_value(unused); } __device__ parse_result next_element_internal(bool child) { // if we're not getting a child element, skip the current element. // this will leave pos as the first character -after- the close of // the current element if (!child && cur_el_start != nullptr) { if (skip_element() == parse_result::ERROR) { return parse_result::ERROR; } cur_el_start = nullptr; } // otherwise pos will be at the first character within the current element // can only get the child of an object or array. // this could theoretically be handled as an error, but the evaluators I've found // seem to treat this as "it's nothing" if (child && (cur_el_type == VALUE || cur_el_type == NONE)) { return parse_result::EMPTY; } // what's next if (!parse_whitespace()) { return parse_result::EMPTY; } // if we're closing off a parent element, we're done char const c = *pos; if (c == ']' || c == '}') { return parse_result::EMPTY; } // if we're not accessing elements of an array, check for name. bool const array_access = (cur_el_type == ARRAY && child) || (parent_el_type == ARRAY && !child); if (!array_access && parse_name(cur_el_name, true) == parse_result::ERROR) { return parse_result::ERROR; } // element type if (!parse_whitespace()) { return parse_result::EMPTY; } switch (*pos++) { case '[': cur_el_type = ARRAY; break; case '{': cur_el_type = OBJECT; break; case ',': case ':': return parse_result::ERROR; case '\'': if (!options.get_allow_single_quotes()) { return parse_result::ERROR; } cur_el_type = VALUE; break; // value type default: cur_el_type = VALUE; break; } // the start of the current element is always at the value, not the name cur_el_start = pos - 1; return parse_result::SUCCESS; } CUDF_HOST_DEVICE inline bool is_quote(char c) { return (c == '\"') || (options.get_allow_single_quotes() && (c == '\'')); } char const* cur_el_start{nullptr}; // pointer to the first character of the -value- of the // current element - not the name string_view cur_el_name; // name of the current element (if applicable) json_element_type cur_el_type{json_element_type::NONE}; // type of the current element json_element_type parent_el_type{json_element_type::NONE}; // parent element type get_json_object_options options; // behavior options }; enum class path_operator_type { ROOT, CHILD, CHILD_WILDCARD, CHILD_INDEX, ERROR, END }; /** * @brief A "command" operator used to query a json string. A full query is * an array of these operators applied to the incoming json string, */ struct path_operator { CUDF_HOST_DEVICE inline path_operator() {} CUDF_HOST_DEVICE inline path_operator(path_operator_type _type, json_element_type _expected_type = NONE) : type(_type), expected_type{_expected_type} { } path_operator_type type{path_operator_type::ERROR}; // operator type // the expected element type we're applying this operation to. // for example: // - you cannot retrieve a subscripted field (eg [5]) from an object. // - you cannot retrieve a field by name (eg .book) from an array. // - you -can- use .* for both arrays and objects // a value of NONE implies any type accepted json_element_type expected_type{NONE}; // the expected type of the element we're working with string_view name; // name to match against (if applicable) int index{-1}; // index for subscript operator }; /** * @brief Parsing class that holds the current state of the JSONPath string to be parsed * and provides functions for navigating through it. This is only called on the host * during the preprocess step which builds a command buffer that the gpu uses. */ class path_state : private parser { public: path_state(char const* _path, size_t _path_len) : parser(_path, _path_len) {} // get the next operator in the JSONPath string path_operator get_next_operator() { if (eof()) { return {path_operator_type::END}; } switch (*pos++) { case '$': return {path_operator_type::ROOT}; case '.': { path_operator op; string_view term{".[", 2}; if (parse_path_name(op.name, term)) { // this is another potential use case for __SPARK_BEHAVIORS / configurability // Spark currently only handles the wildcard operator inside [*], it does // not handle .* if (op.name.size_bytes() == 1 && op.name.data()[0] == '*') { op.type = path_operator_type::CHILD_WILDCARD; op.expected_type = NONE; } else { op.type = path_operator_type::CHILD; op.expected_type = OBJECT; } return op; } } break; // 3 ways this can be used // indices: [0] // name: ['book'] // wildcard: [*] case '[': { path_operator op; string_view term{"]", 1}; bool const is_string = *pos == '\''; if (parse_path_name(op.name, term)) { pos++; if (op.name.size_bytes() == 1 && op.name.data()[0] == '*') { op.type = path_operator_type::CHILD_WILDCARD; op.expected_type = NONE; } else { if (is_string) { op.type = path_operator_type::CHILD; op.expected_type = OBJECT; } else { op.type = path_operator_type::CHILD_INDEX; auto const value = cudf::io::parse_numeric<int>( op.name.data(), op.name.data() + op.name.size_bytes(), json_opts); op.index = value.value_or(-1); CUDF_EXPECTS(op.index >= 0, "Invalid numeric index specified in JSONPath"); op.expected_type = ARRAY; } } return op; } } break; // wildcard operator case '*': { pos++; return path_operator{path_operator_type::CHILD_WILDCARD}; } break; default: CUDF_FAIL("Unrecognized JSONPath operator", std::invalid_argument); break; } return {path_operator_type::ERROR}; } private: cudf::io::parse_options_view json_opts{',', '\n', '\"', '.'}; bool parse_path_name(string_view& name, string_view const& terminators) { switch (*pos) { case '*': name = string_view(pos, 1); pos++; break; case '\'': if (parse_string(name, false, '\'') != parse_result::SUCCESS) { return false; } break; default: { size_t const chars_left = input_len - (pos - input); char const* end = std::find_first_of( pos, pos + chars_left, terminators.data(), terminators.data() + terminators.size_bytes()); if (end) { name = string_view(pos, end - pos); pos = end; } else { name = string_view(pos, chars_left); pos = input + input_len; } break; } } // an empty name is not valid CUDF_EXPECTS( name.size_bytes() > 0, "Invalid empty name in JSONPath query string", std::invalid_argument); return true; } }; /** * @brief Preprocess the incoming JSONPath string on the host to generate a * command buffer for use by the GPU. * * @param json_path The incoming json path * @param stream Cuda stream to perform any gpu actions on * @returns A pair containing the command buffer, and maximum stack depth required. */ std::pair<thrust::optional<rmm::device_uvector<path_operator>>, int> build_command_buffer( cudf::string_scalar const& json_path, rmm::cuda_stream_view stream) { std::string h_json_path = json_path.to_string(stream); path_state p_state(h_json_path.data(), static_cast<size_type>(h_json_path.size())); std::vector<path_operator> h_operators; path_operator op; int max_stack_depth = 1; do { op = p_state.get_next_operator(); if (op.type == path_operator_type::ERROR) { CUDF_FAIL("Encountered invalid JSONPath input string"); } if (op.type == path_operator_type::CHILD_WILDCARD) { max_stack_depth++; } // convert pointer to device pointer if (op.name.size_bytes() > 0) { op.name = string_view(json_path.data() + (op.name.data() - h_json_path.data()), op.name.size_bytes()); } if (op.type == path_operator_type::ROOT) { CUDF_EXPECTS(h_operators.size() == 0, "Root operator ($) can only exist at the root"); } // if we have not gotten a root operator to start, and we're not empty, quietly push a // root operator now. if (h_operators.size() == 0 && op.type != path_operator_type::ROOT && op.type != path_operator_type::END) { h_operators.push_back(path_operator{path_operator_type::ROOT}); } h_operators.push_back(op); } while (op.type != path_operator_type::END); auto const is_empty = h_operators.size() == 1 && h_operators[0].type == path_operator_type::END; return is_empty ? std::pair(thrust::nullopt, 0) : std::pair(thrust::make_optional(cudf::detail::make_device_uvector_sync( h_operators, stream, rmm::mr::get_current_device_resource())), max_stack_depth); } #define PARSE_TRY(_x) \ do { \ last_result = _x; \ if (last_result == parse_result::ERROR) { return parse_result::ERROR; } \ } while (0) /** * @brief Parse a single json string using the provided command buffer * * @param j_state The incoming json string and associated parser * @param commands The command buffer to be applied to the string. Always ends with a * path_operator_type::END * @param output Buffer user to store the results of the query * @returns A result code indicating success/fail/empty. */ template <int max_command_stack_depth> __device__ parse_result parse_json_path(json_state& j_state, path_operator const* commands, json_output& output) { // manually maintained context stack in lieu of calling parse_json_path recursively. struct context { json_state j_state; path_operator const* commands; bool list_element; bool state_flag; }; context stack[max_command_stack_depth]; int stack_pos = 0; auto push_context = [&stack, &stack_pos](json_state const& _j_state, path_operator const* _commands, bool _list_element = false, bool _state_flag = false) { if (stack_pos == max_command_stack_depth - 1) { return false; } stack[stack_pos++] = context{_j_state, _commands, _list_element, _state_flag}; return true; }; auto pop_context = [&stack, &stack_pos](context& c) { if (stack_pos > 0) { c = stack[--stack_pos]; return true; } return false; }; push_context(j_state, commands, false); parse_result last_result = parse_result::SUCCESS; context ctx; int element_count = 0; while (pop_context(ctx)) { path_operator op = *ctx.commands; switch (op.type) { // whatever the first object is case path_operator_type::ROOT: PARSE_TRY(ctx.j_state.next_element()); push_context(ctx.j_state, ctx.commands + 1); break; // .name // ['name'] // [1] // will return a single thing case path_operator_type::CHILD: { PARSE_TRY(ctx.j_state.child_element(op.expected_type)); if (last_result == parse_result::SUCCESS) { PARSE_TRY(ctx.j_state.next_matching_element(op.name, true)); if (last_result == parse_result::SUCCESS) { push_context(ctx.j_state, ctx.commands + 1, ctx.list_element); } else if (last_result == parse_result::MISSING_FIELD) { if (ctx.list_element && element_count > 0) { output.add_output({"," DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN}); } output.add_output({"null", 4}); element_count++; } } } break; // .* // [*] // will return an array of things case path_operator_type::CHILD_WILDCARD: { // if we're on the first element of this wildcard if (!ctx.state_flag) { // we will only ever be returning 1 array if (!ctx.list_element) { output.add_output({"[" DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN}); } // step into the child element PARSE_TRY(ctx.j_state.child_element(op.expected_type)); if (last_result == parse_result::EMPTY) { if (!ctx.list_element) { output.add_output({"]" DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN}); } last_result = parse_result::SUCCESS; break; } // first element PARSE_TRY(ctx.j_state.next_matching_element({"*", 1}, true)); if (last_result == parse_result::EMPTY) { if (!ctx.list_element) { output.add_output({"]" DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN}); } last_result = parse_result::SUCCESS; break; } // re-push ourselves push_context(ctx.j_state, ctx.commands, ctx.list_element, true); // push the next command push_context(ctx.j_state, ctx.commands + 1, true); } else { // next element PARSE_TRY(ctx.j_state.next_matching_element({"*", 1}, false)); if (last_result == parse_result::EMPTY) { if (!ctx.list_element) { output.add_output({"]" DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN}); } last_result = parse_result::SUCCESS; break; } // re-push ourselves push_context(ctx.j_state, ctx.commands, ctx.list_element, true); // push the next command push_context(ctx.j_state, ctx.commands + 1, true); } } break; // [0] // [1] // etc // returns a single thing case path_operator_type::CHILD_INDEX: { PARSE_TRY(ctx.j_state.child_element(op.expected_type)); if (last_result == parse_result::SUCCESS) { string_view const any{"*", 1}; PARSE_TRY(ctx.j_state.next_matching_element(any, true)); if (last_result == parse_result::SUCCESS) { int idx; for (idx = 1; idx <= op.index; idx++) { PARSE_TRY(ctx.j_state.next_matching_element(any, false)); if (last_result == parse_result::EMPTY) { break; } } // if we didn't end up at the index we requested, this is an invalid index if (idx - 1 != op.index) { return parse_result::ERROR; } push_context(ctx.j_state, ctx.commands + 1, ctx.list_element); } } } break; // some sort of error. case path_operator_type::ERROR: return parse_result::ERROR; break; // END case default: { if (ctx.list_element && element_count > 0) { output.add_output({"," DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN}); } PARSE_TRY(ctx.j_state.extract_element(&output, ctx.list_element)); if (ctx.list_element && last_result != parse_result::EMPTY) { element_count++; } } break; } } return parse_result::SUCCESS; } // hardcoding this for now. to reach a stack depth of 8 would require // a JSONPath containing 7 nested wildcards so this is probably reasonable. constexpr int max_command_stack_depth = 8; /** * @brief Parse a single json string using the provided command buffer * * This function exists primarily as a shim for debugging purposes. * * @param input The incoming json string * @param input_len Size of the incoming json string * @param commands The command buffer to be applied to the string. Always ends with a * path_operator_type::END * @param out_buf Buffer user to store the results of the query (nullptr in the size computation * step) * @param out_buf_size Size of the output buffer * @param options Options controlling behavior * @returns A pair containing the result code the output buffer. */ __device__ thrust::pair<parse_result, json_output> get_json_object_single( char const* input, size_t input_len, path_operator const* const commands, char* out_buf, size_t out_buf_size, get_json_object_options options) { json_state j_state(input, input_len, options); json_output output{out_buf_size, out_buf}; auto const result = parse_json_path<max_command_stack_depth>(j_state, commands, output); return {result, output}; } /** * @brief Kernel for running the JSONPath query. * * This kernel operates in a 2-pass way. On the first pass, it computes * output sizes. On the second pass it fills in the provided output buffers * (chars and validity) * * @param col Device view of the incoming string * @param commands JSONPath command buffer * @param output_offsets Buffer used to store the string offsets for the results of the query * @param out_buf Buffer used to store the results of the query * @param out_validity Output validity buffer * @param out_valid_count Output count of # of valid bits * @param options Options controlling behavior */ template <int block_size> __launch_bounds__(block_size) __global__ void get_json_object_kernel(column_device_view col, path_operator const* const commands, size_type* output_offsets, thrust::optional<char*> out_buf, thrust::optional<bitmask_type*> out_validity, thrust::optional<size_type*> out_valid_count, get_json_object_options options) { auto tid = cudf::detail::grid_1d::global_thread_id(); auto const stride = cudf::thread_index_type{blockDim.x} * cudf::thread_index_type{gridDim.x}; size_type warp_valid_count{0}; auto active_threads = __ballot_sync(0xffff'ffffu, tid < col.size()); while (tid < col.size()) { bool is_valid = false; string_view const str = col.element<string_view>(tid); size_type output_size = 0; if (str.size_bytes() > 0) { char* dst = out_buf.has_value() ? out_buf.value() + output_offsets[tid] : nullptr; size_t const dst_size = out_buf.has_value() ? output_offsets[tid + 1] - output_offsets[tid] : 0; parse_result result; json_output out; thrust::tie(result, out) = get_json_object_single(str.data(), str.size_bytes(), commands, dst, dst_size, options); output_size = out.output_len.value_or(0); if (out.output_len.has_value() && result == parse_result::SUCCESS) { is_valid = true; } } // filled in only during the precompute step. during the compute step, the offsets // are fed back in so we do -not- want to write them out if (!out_buf.has_value()) { output_offsets[tid] = static_cast<size_type>(output_size); } // validity filled in only during the output step if (out_validity.has_value()) { uint32_t mask = __ballot_sync(active_threads, is_valid); // 0th lane of the warp writes the validity if (!(tid % cudf::detail::warp_size)) { out_validity.value()[cudf::word_index(tid)] = mask; warp_valid_count += __popc(mask); } } tid += stride; active_threads = __ballot_sync(active_threads, tid < col.size()); } // sum the valid counts across the whole block if (out_valid_count) { size_type block_valid_count = cudf::detail::single_lane_block_sum_reduce<block_size, 0>(warp_valid_count); if (threadIdx.x == 0) { atomicAdd(out_valid_count.value(), block_valid_count); } } } /** * @copydoc cudf::strings::detail::get_json_object */ std::unique_ptr<cudf::column> get_json_object(cudf::strings_column_view const& col, cudf::string_scalar const& json_path, get_json_object_options options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // preprocess the json_path into a command buffer auto preprocess = build_command_buffer(json_path, stream); CUDF_EXPECTS(std::get<1>(preprocess) <= max_command_stack_depth, "Encountered JSONPath string that is too complex"); if (col.is_empty()) return make_empty_column(type_id::STRING); // allocate output offsets buffer. auto offsets = cudf::make_fixed_width_column( data_type{type_id::INT32}, col.size() + 1, mask_state::UNALLOCATED, stream, mr); cudf::mutable_column_view offsets_view(*offsets); // if the query is empty, return a string column containing all nulls if (!std::get<0>(preprocess).has_value()) { return std::make_unique<column>( data_type{type_id::STRING}, col.size(), rmm::device_buffer{0, stream, mr}, // no data cudf::detail::create_null_mask(col.size(), mask_state::ALL_NULL, stream, mr), col.size()); // null count } constexpr int block_size = 512; cudf::detail::grid_1d const grid{col.size(), block_size}; auto cdv = column_device_view::create(col.parent(), stream); // preprocess sizes (returned in the offsets buffer) get_json_object_kernel<block_size> <<<grid.num_blocks, grid.num_threads_per_block, 0, stream.value()>>>( *cdv, std::get<0>(preprocess).value().data(), offsets_view.head<size_type>(), thrust::nullopt, thrust::nullopt, thrust::nullopt, options); // convert sizes to offsets thrust::exclusive_scan(rmm::exec_policy(stream), offsets_view.head<size_type>(), offsets_view.head<size_type>() + col.size() + 1, offsets_view.head<size_type>(), 0); size_type const output_size = cudf::detail::get_value<size_type>(offsets_view, col.size(), stream); // allocate output string column auto chars = create_chars_child_column(output_size, stream, mr); // potential optimization : if we know that all outputs are valid, we could skip creating // the validity mask altogether rmm::device_buffer validity = cudf::detail::create_null_mask(col.size(), mask_state::UNINITIALIZED, stream, mr); // compute results cudf::mutable_column_view chars_view(*chars); rmm::device_scalar<size_type> d_valid_count{0, stream}; get_json_object_kernel<block_size> <<<grid.num_blocks, grid.num_threads_per_block, 0, stream.value()>>>( *cdv, std::get<0>(preprocess).value().data(), offsets_view.head<size_type>(), chars_view.head<char>(), static_cast<bitmask_type*>(validity.data()), d_valid_count.data(), options); return make_strings_column(col.size(), std::move(offsets), std::move(chars), col.size() - d_valid_count.value(stream), std::move(validity)); } } // namespace } // namespace detail /** * @copydoc cudf::strings::get_json_object */ std::unique_ptr<cudf::column> get_json_object(cudf::strings_column_view const& col, cudf::string_scalar const& json_path, get_json_object_options options, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::get_json_object(col, json_path, options, cudf::get_default_stream(), mr); } } // namespace strings } // namespace cudf
cc7218f875e268e5cefe2e519a16eb314c7269cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void vec_add(int* A, int* B, int* C, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { C[index] = A[index] + B[index]; } }
cc7218f875e268e5cefe2e519a16eb314c7269cb.cu
#include "includes.h" __global__ void vec_add(int* A, int* B, int* C, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { C[index] = A[index] + B[index]; } }
abd9ced3a23083ae6a06676181eb6d39d7769d41.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "global.h" #include <hip/hip_runtime.h> #include "hip/hip_runtime_api.h" #include "gpu_trace.h" #include "timer.h" hipArray* d_pGaussWeights = NULL; texture<float, 2, hipReadModeElementType> t_gaussian_weights; // float4 -x UL - y UR - z BL - w BR -- one lookup table for each cell in the block hipArray* d_pBilinearWeights = NULL; texture<float4, 2, hipReadModeElementType> t_bilinear_weights; __global__ void d_voc_compute_block_energy(int blocks_0, int blocks_1, float* d_pHists, float* d_pNorms) { const int posx = blockDim.x * blockIdx.x + threadIdx.x; const int posy = blockDim.y * blockIdx.y + threadIdx.y; if (posx < blocks_1 && posy < blocks_0) { int o; const int pos = posx*blocks_0 + posy; float* dst = d_pNorms + pos; const int bin_step = blocks_0*blocks_1; float sum = 0.0f; for (o=0; o<9; ++o){ float* src1 = d_pHists + pos + o*bin_step; float* src2 = d_pHists + pos + (o+9)*bin_step; sum += (*src1+*src2) * (*src1+*src2); } atomicAdd(dst,sum); } } __host__ int voc_compute_block_energy(int blocks_0, int blocks_1, float* d_pHists, float* d_pNorms) { dim3 grid; grid.x = (int)ceil((blocks_1+7) / 8); grid.y = (int)ceil((blocks_0+7) / 8); dim3 threads; threads.x = 8; threads.y = 8; //printf("grid.x = %d, grid.y = %d, blocks_0 = %d, blocks_1 = %d\n", grid.x, grid.y, blocks_0, blocks_1); #ifdef DEBUG_TIME_EACH_STEP Timer tt; startTimer(&tt); #endif hipLaunchKernelGGL(( d_voc_compute_block_energy), dim3(grid) , dim3(threads) , 0, 0, blocks_0, blocks_1, d_pHists, d_pNorms); ONFAIL("compute_blocks kernel failed"); #ifdef DEBUG_TIME_EACH_STEP stopTimer(&tt); printf("time in voc_compute_block_energy = %f\n", getTimerValue(&tt)); #endif //#define DEBUG_voc_compute_block_energy #ifdef DEBUG_voc_compute_block_energy float *h_pNorm = (float*)malloc(blocks_0*blocks_1*sizeof(float)); if(!h_pNorm) { printf("h_pNorm: malloc failed \n"); return -1; } // copy results hipMemcpy(h_pNorm, d_pNorms, blocks_0*blocks_1*sizeof(float), hipMemcpyDeviceToHost); int out[3]; out[0] = max(blocks_0-2, 0); out[1] = max(blocks_1-2, 0); out[2] = 32; char filname[50]; sprintf(filname, "energy_blocks_gpu_%d_%d.txt", out[1], out[0]); // write complete output to file FILE* fp = fopen(filname, "w"); if(!fp) printf("failed to open output file: fmag\n"); int ci, cj; for (ci = 0; ci < blocks_0; ci++) { for (cj = 0; cj < blocks_1; cj++) { fprintf(fp, "(i=%d,j=%d)\n", ci, cj); //for (cb = 0; cb < 18; cb++) { fprintf(fp, "%f ", *(h_pNorm + cj * blocks_0 + ci)); } fprintf(fp, "\n"); } } fclose(fp); free(h_pNorm); #endif return 0; } __global__ void d_voc_compute_features /*__traceable__*/ (int out_0, int out_1, int blocks_0, int blocks_1, float* d_pHists, float* d_pNorms, float* d_pOut) { const int posx = blockDim.x * blockIdx.x + threadIdx.x; const int posy = blockDim.y * blockIdx.y + threadIdx.y; volatile __shared__ float s_norm[10][10]; // s_norm[blockDim.x+1][blockDim.y+1] //__trace("pos", "int", posx); //__trace("pos", "int", posy); if (posx < blocks_1 && posy < blocks_0) { s_norm[threadIdx.x][threadIdx.y] = *(d_pNorms + (posx)*blocks_0 + posy); //__syncthreads(); // !!!potential 2-bank conflicts if (threadIdx.x == blockDim.x - 1 && (posx+2) < blocks_1) { s_norm[threadIdx.x + 1][threadIdx.y] = *(d_pNorms + (posx+1)*blocks_0 + posy); s_norm[threadIdx.x + 2][threadIdx.y] = *(d_pNorms + (posx+2)*blocks_0 + posy); //__syncthreads(); } if (threadIdx.y == blockDim.y - 1 && (posy+2) < blocks_0) { s_norm[threadIdx.x][threadIdx.y + 1] = *(d_pNorms + (posx)*blocks_0 + posy+1); s_norm[threadIdx.x][threadIdx.y + 2] = *(d_pNorms + (posx)*blocks_0 + posy+2); //__syncthreads(); } if (threadIdx.y == blockDim.y - 1 && threadIdx.x == blockDim.x - 1 && (posy+2) < blocks_0 && (posx+2) < blocks_1) { s_norm[threadIdx.x + 1][threadIdx.y + 1] = *(d_pNorms + (posx+1)*blocks_0 + posy+1); s_norm[threadIdx.x + 1][threadIdx.y + 2] = *(d_pNorms + (posx+1)*blocks_0 + posy+2); s_norm[threadIdx.x + 2][threadIdx.y + 1] = *(d_pNorms + (posx+2)*blocks_0 + posy+1); s_norm[threadIdx.x + 2][threadIdx.y + 2] = *(d_pNorms + (posx+2)*blocks_0 + posy+2); //__syncthreads(); } __syncthreads(); } //__syncthreads(); if (posx < out_1 && posy < out_0) { float* dst = d_pOut + posx*out_0 + posy; float* src = 0; int px, py; float n1, n2, n3, n4; int bx = blockDim.x * blockIdx.x; int by = blockDim.y * blockIdx.y; float h1, h2, h3, h4; float t1 = 0; float t2 = 0; float t3 = 0; float t4 = 0; int o; px = posx - bx + 1; py = posy - by + 1; n1 = 1.0f / sqrtf(s_norm[px][py] + s_norm[px][py+1] + s_norm[px+1][py] + s_norm[px+1][py+1] + eps); /* __trace("T", "float", s_norm[px][py]); __trace("T", "float", s_norm[px][py+1]); __trace("T", "float", s_norm[px+1][py]); __trace("T", "float", s_norm[px+1][py+1]); */ px = posx - bx + 1; py = posy - by; n2 = 1.0f / sqrtf(s_norm[px][py] + s_norm[px][py+1] + s_norm[px+1][py] + s_norm[px+1][py+1] + eps); /* __trace("T", "float", s_norm[px][py]); __trace("T", "float", s_norm[px][py+1]); __trace("T", "float", s_norm[px+1][py]); __trace("T", "float", s_norm[px+1][py+1]); */ px = posx - bx; py = posy - by + 1; n3 = 1.0f / sqrtf(s_norm[px][py] + s_norm[px][py+1] + s_norm[px+1][py] + s_norm[px+1][py+1] + eps); /* __trace("T", "float", s_norm[px][py]); __trace("T", "float", s_norm[px][py+1]); __trace("T", "float", s_norm[px+1][py]); __trace("T", "float", s_norm[px+1][py+1]); */ px = posx - bx; py = posy - by; n4 = 1.0f / sqrtf(s_norm[px][py] + s_norm[px][py+1] + s_norm[px+1][py] + s_norm[px+1][py+1] + eps); /* __trace("T", "float", s_norm[px][py]); __trace("T", "float", s_norm[px][py+1]); __trace("T", "float", s_norm[px+1][py]); __trace("T", "float", s_norm[px+1][py+1]); */ src = d_pHists + (posx+1)*blocks_0 + (posy+1); for (o = 0; o < 18; o++) { float src_val = *src; h1 = (src_val * n1 <= 0.2f ? src_val * n1 : 0.2f); h2 = (src_val * n2 <= 0.2f ? src_val * n2 : 0.2f); h3 = (src_val * n3 <= 0.2f ? src_val * n3 : 0.2f); h4 = (src_val * n4 <= 0.2f ? src_val * n4 : 0.2f); *dst = 0.5f * (h1+h2+h3+h4); t1 += h1; t2 += h2; t3 += h3; t4 += h4; dst += out_0 * out_1; src += blocks_0 * blocks_1; } src = d_pHists + (posx+1)*blocks_0 + (posy+1); for (o = 0; o < 9; o++) { float sum_val = *src + *(src + 9 * blocks_0 * blocks_1); h1 = (sum_val * n1 <= 0.2f ? sum_val * n1 : 0.2f); h2 = (sum_val * n2 <= 0.2f ? sum_val * n2 : 0.2f); h3 = (sum_val * n3 <= 0.2f ? sum_val * n3 : 0.2f); h4 = (sum_val * n4 <= 0.2f ? sum_val * n4 : 0.2f); *dst = 0.5f * (h1+h2+h3+h4); dst += out_0 * out_1; src += blocks_0 * blocks_1; } *dst = 0.2357f * t1; dst += out_0 * out_1; *dst = 0.2357f * t2; dst += out_0 * out_1; *dst = 0.2357f * t3; dst += out_0 * out_1; *dst = 0.2357f * t4; dst += out_0 * out_1; } } __host__ int voc_compute_features(int blocks_0, int blocks_1, float* d_pHists, float* d_pNorms, float* d_pOut) { dim3 grid; grid.x = (int)ceil((blocks_1+7) / 8); grid.y = (int)ceil((blocks_0+7) / 8); dim3 threads; threads.x = 8; threads.y = 8; int out[3]; out[0] = max(blocks_0-2, 0); out[1] = max(blocks_1-2, 0); out[2] = 32; //INITIALIZE_TRACE_DATA(); #ifdef DEBUG_TIME_EACH_STEP Timer tt; startTimer(&tt); #endif hipLaunchKernelGGL(( d_voc_compute_features), dim3(grid) , dim3(threads) , 0, 0, /*__traceable_call__*/ out[0], out[1], blocks_0, blocks_1, d_pHists, d_pNorms, d_pOut); ONFAIL("compute_blocks kernel failed"); #ifdef DEBUG_TIME_EACH_STEP stopTimer(&tt); printf("time in voc_compute_features = %f\n", getTimerValue(&tt)); #endif /* hipError_t ErrorCode = hipGetLastError(); if (ErrorCode != hipSuccess) printf("*** Kernel did not launch, %s ***\n", hipGetErrorString(ErrorCode)); ErrorCode = hipDeviceSynchronize(); if (ErrorCode != hipSuccess) printf("*** Kernel exited while executing, %s ***\n", hipGetErrorString(ErrorCode)); FINALIZE_TRACE_DATA(); PRINT_TRACE_DATA(stdout); */ #define DEBUG_voc_compute_features #ifdef DEBUG_voc_compute_features float *h_pOut = (float*)malloc(out[0]*out[1]*out[2]*sizeof(float)); if(!h_pOut) { printf("h_pNorm: malloc failed \n"); return -1; } // copy results hipMemcpy(h_pOut, d_pOut, out[0]*out[1]*out[2]*sizeof(float), hipMemcpyDeviceToHost); // write complete output to file FILE* fp = fopen("cell_feats.txt", "w"); if(!fp) printf("failed to open output file: fmag\n"); int ci, cj, cb; for (ci = 0; ci < out[0]; ci++) { for (cj = 0; cj < out[1]; cj++) { fprintf(fp, "(i=%d,j=%d)\n", ci, cj); for (cb = 0; cb < 32; cb++) { fprintf(fp, "%f ", *(h_pOut + cj * out[0] + ci + cb*out[0]*out[1])); } fprintf(fp, "\n"); } } fclose(fp); free(h_pOut); #endif return 0; }
abd9ced3a23083ae6a06676181eb6d39d7769d41.cu
#include <stdio.h> #include "global.h" #include <cuda.h> #include "cuda_runtime_api.h" #include "gpu_trace.h" #include "timer.h" cudaArray* d_pGaussWeights = NULL; texture<float, 2, cudaReadModeElementType> t_gaussian_weights; // float4 -x UL - y UR - z BL - w BR -- one lookup table for each cell in the block cudaArray* d_pBilinearWeights = NULL; texture<float4, 2, cudaReadModeElementType> t_bilinear_weights; __global__ void d_voc_compute_block_energy(int blocks_0, int blocks_1, float* d_pHists, float* d_pNorms) { const int posx = blockDim.x * blockIdx.x + threadIdx.x; const int posy = blockDim.y * blockIdx.y + threadIdx.y; if (posx < blocks_1 && posy < blocks_0) { int o; const int pos = posx*blocks_0 + posy; float* dst = d_pNorms + pos; const int bin_step = blocks_0*blocks_1; float sum = 0.0f; for (o=0; o<9; ++o){ float* src1 = d_pHists + pos + o*bin_step; float* src2 = d_pHists + pos + (o+9)*bin_step; sum += (*src1+*src2) * (*src1+*src2); } atomicAdd(dst,sum); } } __host__ int voc_compute_block_energy(int blocks_0, int blocks_1, float* d_pHists, float* d_pNorms) { dim3 grid; grid.x = (int)ceil((blocks_1+7) / 8); grid.y = (int)ceil((blocks_0+7) / 8); dim3 threads; threads.x = 8; threads.y = 8; //printf("grid.x = %d, grid.y = %d, blocks_0 = %d, blocks_1 = %d\n", grid.x, grid.y, blocks_0, blocks_1); #ifdef DEBUG_TIME_EACH_STEP Timer tt; startTimer(&tt); #endif d_voc_compute_block_energy<<< grid , threads >>>(blocks_0, blocks_1, d_pHists, d_pNorms); ONFAIL("compute_blocks kernel failed"); #ifdef DEBUG_TIME_EACH_STEP stopTimer(&tt); printf("time in voc_compute_block_energy = %f\n", getTimerValue(&tt)); #endif //#define DEBUG_voc_compute_block_energy #ifdef DEBUG_voc_compute_block_energy float *h_pNorm = (float*)malloc(blocks_0*blocks_1*sizeof(float)); if(!h_pNorm) { printf("h_pNorm: malloc failed \n"); return -1; } // copy results cudaMemcpy(h_pNorm, d_pNorms, blocks_0*blocks_1*sizeof(float), cudaMemcpyDeviceToHost); int out[3]; out[0] = max(blocks_0-2, 0); out[1] = max(blocks_1-2, 0); out[2] = 32; char filname[50]; sprintf(filname, "energy_blocks_gpu_%d_%d.txt", out[1], out[0]); // write complete output to file FILE* fp = fopen(filname, "w"); if(!fp) printf("failed to open output file: fmag\n"); int ci, cj; for (ci = 0; ci < blocks_0; ci++) { for (cj = 0; cj < blocks_1; cj++) { fprintf(fp, "(i=%d,j=%d)\n", ci, cj); //for (cb = 0; cb < 18; cb++) { fprintf(fp, "%f ", *(h_pNorm + cj * blocks_0 + ci)); } fprintf(fp, "\n"); } } fclose(fp); free(h_pNorm); #endif return 0; } __global__ void d_voc_compute_features /*__traceable__*/ (int out_0, int out_1, int blocks_0, int blocks_1, float* d_pHists, float* d_pNorms, float* d_pOut) { const int posx = blockDim.x * blockIdx.x + threadIdx.x; const int posy = blockDim.y * blockIdx.y + threadIdx.y; volatile __shared__ float s_norm[10][10]; // s_norm[blockDim.x+1][blockDim.y+1] //__trace("pos", "int", posx); //__trace("pos", "int", posy); if (posx < blocks_1 && posy < blocks_0) { s_norm[threadIdx.x][threadIdx.y] = *(d_pNorms + (posx)*blocks_0 + posy); //__syncthreads(); // !!!potential 2-bank conflicts if (threadIdx.x == blockDim.x - 1 && (posx+2) < blocks_1) { s_norm[threadIdx.x + 1][threadIdx.y] = *(d_pNorms + (posx+1)*blocks_0 + posy); s_norm[threadIdx.x + 2][threadIdx.y] = *(d_pNorms + (posx+2)*blocks_0 + posy); //__syncthreads(); } if (threadIdx.y == blockDim.y - 1 && (posy+2) < blocks_0) { s_norm[threadIdx.x][threadIdx.y + 1] = *(d_pNorms + (posx)*blocks_0 + posy+1); s_norm[threadIdx.x][threadIdx.y + 2] = *(d_pNorms + (posx)*blocks_0 + posy+2); //__syncthreads(); } if (threadIdx.y == blockDim.y - 1 && threadIdx.x == blockDim.x - 1 && (posy+2) < blocks_0 && (posx+2) < blocks_1) { s_norm[threadIdx.x + 1][threadIdx.y + 1] = *(d_pNorms + (posx+1)*blocks_0 + posy+1); s_norm[threadIdx.x + 1][threadIdx.y + 2] = *(d_pNorms + (posx+1)*blocks_0 + posy+2); s_norm[threadIdx.x + 2][threadIdx.y + 1] = *(d_pNorms + (posx+2)*blocks_0 + posy+1); s_norm[threadIdx.x + 2][threadIdx.y + 2] = *(d_pNorms + (posx+2)*blocks_0 + posy+2); //__syncthreads(); } __syncthreads(); } //__syncthreads(); if (posx < out_1 && posy < out_0) { float* dst = d_pOut + posx*out_0 + posy; float* src = 0; int px, py; float n1, n2, n3, n4; int bx = blockDim.x * blockIdx.x; int by = blockDim.y * blockIdx.y; float h1, h2, h3, h4; float t1 = 0; float t2 = 0; float t3 = 0; float t4 = 0; int o; px = posx - bx + 1; py = posy - by + 1; n1 = 1.0f / sqrtf(s_norm[px][py] + s_norm[px][py+1] + s_norm[px+1][py] + s_norm[px+1][py+1] + eps); /* __trace("T", "float", s_norm[px][py]); __trace("T", "float", s_norm[px][py+1]); __trace("T", "float", s_norm[px+1][py]); __trace("T", "float", s_norm[px+1][py+1]); */ px = posx - bx + 1; py = posy - by; n2 = 1.0f / sqrtf(s_norm[px][py] + s_norm[px][py+1] + s_norm[px+1][py] + s_norm[px+1][py+1] + eps); /* __trace("T", "float", s_norm[px][py]); __trace("T", "float", s_norm[px][py+1]); __trace("T", "float", s_norm[px+1][py]); __trace("T", "float", s_norm[px+1][py+1]); */ px = posx - bx; py = posy - by + 1; n3 = 1.0f / sqrtf(s_norm[px][py] + s_norm[px][py+1] + s_norm[px+1][py] + s_norm[px+1][py+1] + eps); /* __trace("T", "float", s_norm[px][py]); __trace("T", "float", s_norm[px][py+1]); __trace("T", "float", s_norm[px+1][py]); __trace("T", "float", s_norm[px+1][py+1]); */ px = posx - bx; py = posy - by; n4 = 1.0f / sqrtf(s_norm[px][py] + s_norm[px][py+1] + s_norm[px+1][py] + s_norm[px+1][py+1] + eps); /* __trace("T", "float", s_norm[px][py]); __trace("T", "float", s_norm[px][py+1]); __trace("T", "float", s_norm[px+1][py]); __trace("T", "float", s_norm[px+1][py+1]); */ src = d_pHists + (posx+1)*blocks_0 + (posy+1); for (o = 0; o < 18; o++) { float src_val = *src; h1 = (src_val * n1 <= 0.2f ? src_val * n1 : 0.2f); h2 = (src_val * n2 <= 0.2f ? src_val * n2 : 0.2f); h3 = (src_val * n3 <= 0.2f ? src_val * n3 : 0.2f); h4 = (src_val * n4 <= 0.2f ? src_val * n4 : 0.2f); *dst = 0.5f * (h1+h2+h3+h4); t1 += h1; t2 += h2; t3 += h3; t4 += h4; dst += out_0 * out_1; src += blocks_0 * blocks_1; } src = d_pHists + (posx+1)*blocks_0 + (posy+1); for (o = 0; o < 9; o++) { float sum_val = *src + *(src + 9 * blocks_0 * blocks_1); h1 = (sum_val * n1 <= 0.2f ? sum_val * n1 : 0.2f); h2 = (sum_val * n2 <= 0.2f ? sum_val * n2 : 0.2f); h3 = (sum_val * n3 <= 0.2f ? sum_val * n3 : 0.2f); h4 = (sum_val * n4 <= 0.2f ? sum_val * n4 : 0.2f); *dst = 0.5f * (h1+h2+h3+h4); dst += out_0 * out_1; src += blocks_0 * blocks_1; } *dst = 0.2357f * t1; dst += out_0 * out_1; *dst = 0.2357f * t2; dst += out_0 * out_1; *dst = 0.2357f * t3; dst += out_0 * out_1; *dst = 0.2357f * t4; dst += out_0 * out_1; } } __host__ int voc_compute_features(int blocks_0, int blocks_1, float* d_pHists, float* d_pNorms, float* d_pOut) { dim3 grid; grid.x = (int)ceil((blocks_1+7) / 8); grid.y = (int)ceil((blocks_0+7) / 8); dim3 threads; threads.x = 8; threads.y = 8; int out[3]; out[0] = max(blocks_0-2, 0); out[1] = max(blocks_1-2, 0); out[2] = 32; //INITIALIZE_TRACE_DATA(); #ifdef DEBUG_TIME_EACH_STEP Timer tt; startTimer(&tt); #endif d_voc_compute_features<<< grid , threads >>> /*__traceable_call__*/ (out[0], out[1], blocks_0, blocks_1, d_pHists, d_pNorms, d_pOut); ONFAIL("compute_blocks kernel failed"); #ifdef DEBUG_TIME_EACH_STEP stopTimer(&tt); printf("time in voc_compute_features = %f\n", getTimerValue(&tt)); #endif /* cudaError_t ErrorCode = cudaGetLastError(); if (ErrorCode != cudaSuccess) printf("*** Kernel did not launch, %s ***\n", cudaGetErrorString(ErrorCode)); ErrorCode = cudaThreadSynchronize(); if (ErrorCode != cudaSuccess) printf("*** Kernel exited while executing, %s ***\n", cudaGetErrorString(ErrorCode)); FINALIZE_TRACE_DATA(); PRINT_TRACE_DATA(stdout); */ #define DEBUG_voc_compute_features #ifdef DEBUG_voc_compute_features float *h_pOut = (float*)malloc(out[0]*out[1]*out[2]*sizeof(float)); if(!h_pOut) { printf("h_pNorm: malloc failed \n"); return -1; } // copy results cudaMemcpy(h_pOut, d_pOut, out[0]*out[1]*out[2]*sizeof(float), cudaMemcpyDeviceToHost); // write complete output to file FILE* fp = fopen("cell_feats.txt", "w"); if(!fp) printf("failed to open output file: fmag\n"); int ci, cj, cb; for (ci = 0; ci < out[0]; ci++) { for (cj = 0; cj < out[1]; cj++) { fprintf(fp, "(i=%d,j=%d)\n", ci, cj); for (cb = 0; cb < 32; cb++) { fprintf(fp, "%f ", *(h_pOut + cj * out[0] + ci + cb*out[0]*out[1])); } fprintf(fp, "\n"); } } fclose(fp); free(h_pOut); #endif return 0; }
a7be75f5b7a305f9a0cd5e8b480933a1da6bfbda.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* */ #include "../include/cuSolverForMCMPC.cuh" __global__ void make_Diagonalization(float *vec, float *mat) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; if(threadIdx.x == blockIdx.x) { mat[id] = sqrtf(vec[threadIdx.x]); }else{ mat[id] = 0.0f; } } __global__ void calc_Var_Cov_matrix(float *d_mat,Data1 *d_Data, float *Us_dev, int Blocks) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; float pows; //int counter = 0; //int denominator; //denominator = 1/(Blocks - 1); //printf("hoge::id=%d %d@ =%f $ = %f\n",id,threadIdx.x,d_Data[5].Input[threadIdx.x],Us_dev[0]); for(int z = 0; z < Blocks; z++) { pows += (d_Data[z].Input[threadIdx.x] - Us_dev[threadIdx.x]) * (d_Data[z].Input[blockIdx.x] - Us_dev[blockIdx.x]); } if(threadIdx.x == blockIdx.x && pows < 0.00001f){ pows += (Blocks -1); //pows = d_mat[id]; } __syncthreads(); d_mat[id] = pows /(Blocks - 1); //unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; //float values; /*if(threadIdx.x == 0 && blockIdx.x ==0) { values[threadIdx.x] = 1.0f; }*/ /*if(threadIdx.x == blockIdx.x) { for(int z = 0; z < Blocks; z++) { d_mat[id] += (d_Data[z].Input[threadIdx.x] - Us_dev[threadIdx.x]) * (d_Data[z].Input[blockIdx.x] - Us_dev[blockIdx.x]); } //d_mat[id] = (d_Data[z].Input[threadIdx.x] - Us_dev[threadIdx.x]) * (d_Data[z].Input[blockIdx.x] - Us_dev[blockIdx.x]);; //values[threadIdx.x] = 1.0f; }else{ d_mat[id] = 0.0f; //values[threadIdx.x] = 0.0f; } __syncthreads(); d_mat[id] = d_mat[id] /(Blocks - 1); /*if(threadIdx.x == 0) { for(int i =0; i < blockDim.x; i++) Mat[id] = values[i]; } */ } // A * B B __global__ void pwr_matrix_answerB(float *A, float *B) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; int row_index, column_index; float pows; if(blockIdx.x == 0) { row_index = (int)blockDim.x * blockIdx.x; }else{ row_index = ((int)blockDim.x * blockIdx.x) -1; } if(threadIdx.x == 0) { column_index = (int)blockDim.x * threadIdx.x; }else{ column_index = ((int)blockDim.x * threadIdx.x) -1; } for(int k = 0; k < HORIZON; k++){ //row[id] += A[column_index + k] * B[row_index + k]; pows += A[column_index + k] * B[row_index + k]; } __syncthreads(); B[id] = pows; /*if(threadIdx.x == 0) { B[id] = row[id]; }*/ } // A * B A __global__ void pwr_matrix_answerA(float *A, float *B) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; int row_index, column_index; float pows; if(blockIdx.x == 0) { row_index = (int)blockDim.x * blockIdx.x; }else{ row_index = ((int)blockDim.x * blockIdx.x) -1; } if(threadIdx.x == 0) { column_index = (int)blockDim.x * threadIdx.x; }else{ column_index = ((int)blockDim.x * threadIdx.x) -1; } for(int k = 0; k < HORIZON; k++){ //row[id] += A[column_index + k] * B[row_index + k]; pows += A[column_index + k] * B[row_index + k]; } __syncthreads(); A[id] = pows; /*if(threadIdx.x == 0) { A[id] = row[id]; }*/ } __global__ void tanspose(float *Out, float *In) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; int In_index = blockIdx.x + blockDim.x * threadIdx.x; Out[id] = In[In_index]; __syncthreads(); } void get_eigen_values(float *A, float *D) { hipsolverDnHandle_t cusolverH = NULL; cusolverStatus_t cusolver_status = CUSOLVER_STATUS_SUCCESS; hipError_t cudaStat1 = hipSuccess; hipError_t cudaStat2 = hipSuccess; hipError_t cudaStat3 = hipSuccess; const int m = HORIZON; const int lda = m; float eig_vec[m]; float *d_A = NULL; float *d_W = NULL; int *devInfo = NULL; float *d_work = NULL; int lwork = 0; int info_gpu = 0; cusolver_status = hipsolverDnCreate(&cusolverH); assert(CUSOLVER_STATUS_SUCCESS == cusolver_status); cudaStat1 = hipMalloc ((void**)&d_A, sizeof(float) * lda * m); cudaStat2 = hipMalloc ((void**)&d_W, sizeof(double) * m); cudaStat3 = hipMalloc ((void**)&devInfo, sizeof(int)); assert(hipSuccess == cudaStat1); assert(hipSuccess == cudaStat2); assert(hipSuccess == cudaStat3); cudaStat1 = hipMemcpy(d_A, A, sizeof(double) * lda * m, hipMemcpyHostToDevice); assert(hipSuccess == cudaStat1); hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_VECTOR; // compute eigenvalues and eigenvectors. hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_LOWER; cusolver_status = hipsolverDnSsyevd_bufferSize( cusolverH, jobz, uplo, m, d_A, lda, d_W, &lwork); assert (cusolver_status == CUSOLVER_STATUS_SUCCESS); cudaStat1 = hipMalloc((void**)&d_work, sizeof(double)*lwork); assert(hipSuccess == cudaStat1); cusolver_status = hipsolverDnSsyevd( cusolverH, jobz, uplo, m, d_A, lda, d_W, d_work, lwork, devInfo); cudaStat1 = hipDeviceSynchronize(); assert(CUSOLVER_STATUS_SUCCESS == cusolver_status); assert(hipSuccess == cudaStat1); cudaStat1 = hipMemcpy(eig_vec, d_W, sizeof(double)*m, hipMemcpyDeviceToHost); cudaStat2 = hipMemcpy(D, d_A, sizeof(double)*lda*m, hipMemcpyDeviceToHost); cudaStat3 = hipMemcpy(&info_gpu, devInfo, sizeof(int), hipMemcpyDeviceToHost); assert(hipSuccess == cudaStat1); assert(hipSuccess == cudaStat2); assert(hipSuccess == cudaStat3); hipLaunchKernelGGL(( make_Diagonalization), dim3(HORIZON),dim3(HORIZON), 0, 0, d_W, d_A); hipMemcpy(A, d_A, sizeof(float)*lda*m, hipMemcpyDeviceToHost); if (d_A ) hipFree(d_A); if (d_W ) hipFree(d_W); if (devInfo) hipFree(devInfo); if (d_work ) hipFree(d_work); if (cusolverH) hipsolverDnDestroy(cusolverH); }
a7be75f5b7a305f9a0cd5e8b480933a1da6bfbda.cu
/* */ #include "../include/cuSolverForMCMPC.cuh" __global__ void make_Diagonalization(float *vec, float *mat) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; if(threadIdx.x == blockIdx.x) { mat[id] = sqrtf(vec[threadIdx.x]); }else{ mat[id] = 0.0f; } } __global__ void calc_Var_Cov_matrix(float *d_mat,Data1 *d_Data, float *Us_dev, int Blocks) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; float pows; //int counter = 0; //int denominator; //denominator = 1/(Blocks - 1); //printf("hoge::id=%d %d@ =%f $ = %f\n",id,threadIdx.x,d_Data[5].Input[threadIdx.x],Us_dev[0]); for(int z = 0; z < Blocks; z++) { pows += (d_Data[z].Input[threadIdx.x] - Us_dev[threadIdx.x]) * (d_Data[z].Input[blockIdx.x] - Us_dev[blockIdx.x]); } if(threadIdx.x == blockIdx.x && pows < 0.00001f){ pows += (Blocks -1); //pows = d_mat[id]; } __syncthreads(); d_mat[id] = pows /(Blocks - 1); //unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; //float values; /*if(threadIdx.x == 0 && blockIdx.x ==0) { values[threadIdx.x] = 1.0f; }*/ /*if(threadIdx.x == blockIdx.x) { for(int z = 0; z < Blocks; z++) { d_mat[id] += (d_Data[z].Input[threadIdx.x] - Us_dev[threadIdx.x]) * (d_Data[z].Input[blockIdx.x] - Us_dev[blockIdx.x]); } //d_mat[id] = (d_Data[z].Input[threadIdx.x] - Us_dev[threadIdx.x]) * (d_Data[z].Input[blockIdx.x] - Us_dev[blockIdx.x]);; //values[threadIdx.x] = 1.0f; }else{ d_mat[id] = 0.0f; //values[threadIdx.x] = 0.0f; } __syncthreads(); d_mat[id] = d_mat[id] /(Blocks - 1); /*if(threadIdx.x == 0) { for(int i =0; i < blockDim.x; i++) Mat[id] = values[i]; } */ } // A * B → B __global__ void pwr_matrix_answerB(float *A, float *B) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; int row_index, column_index; float pows; if(blockIdx.x == 0) { row_index = (int)blockDim.x * blockIdx.x; }else{ row_index = ((int)blockDim.x * blockIdx.x) -1; } if(threadIdx.x == 0) { column_index = (int)blockDim.x * threadIdx.x; }else{ column_index = ((int)blockDim.x * threadIdx.x) -1; } for(int k = 0; k < HORIZON; k++){ //row[id] += A[column_index + k] * B[row_index + k]; pows += A[column_index + k] * B[row_index + k]; } __syncthreads(); B[id] = pows; /*if(threadIdx.x == 0) { B[id] = row[id]; }*/ } // A * B → A __global__ void pwr_matrix_answerA(float *A, float *B) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; int row_index, column_index; float pows; if(blockIdx.x == 0) { row_index = (int)blockDim.x * blockIdx.x; }else{ row_index = ((int)blockDim.x * blockIdx.x) -1; } if(threadIdx.x == 0) { column_index = (int)blockDim.x * threadIdx.x; }else{ column_index = ((int)blockDim.x * threadIdx.x) -1; } for(int k = 0; k < HORIZON; k++){ //row[id] += A[column_index + k] * B[row_index + k]; pows += A[column_index + k] * B[row_index + k]; } __syncthreads(); A[id] = pows; /*if(threadIdx.x == 0) { A[id] = row[id]; }*/ } __global__ void tanspose(float *Out, float *In) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; int In_index = blockIdx.x + blockDim.x * threadIdx.x; Out[id] = In[In_index]; __syncthreads(); } void get_eigen_values(float *A, float *D) { cusolverDnHandle_t cusolverH = NULL; cusolverStatus_t cusolver_status = CUSOLVER_STATUS_SUCCESS; cudaError_t cudaStat1 = cudaSuccess; cudaError_t cudaStat2 = cudaSuccess; cudaError_t cudaStat3 = cudaSuccess; const int m = HORIZON; const int lda = m; float eig_vec[m]; float *d_A = NULL; float *d_W = NULL; int *devInfo = NULL; float *d_work = NULL; int lwork = 0; int info_gpu = 0; cusolver_status = cusolverDnCreate(&cusolverH); assert(CUSOLVER_STATUS_SUCCESS == cusolver_status); cudaStat1 = cudaMalloc ((void**)&d_A, sizeof(float) * lda * m); cudaStat2 = cudaMalloc ((void**)&d_W, sizeof(double) * m); cudaStat3 = cudaMalloc ((void**)&devInfo, sizeof(int)); assert(cudaSuccess == cudaStat1); assert(cudaSuccess == cudaStat2); assert(cudaSuccess == cudaStat3); cudaStat1 = cudaMemcpy(d_A, A, sizeof(double) * lda * m, cudaMemcpyHostToDevice); assert(cudaSuccess == cudaStat1); cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR; // compute eigenvalues and eigenvectors. cublasFillMode_t uplo = CUBLAS_FILL_MODE_LOWER; cusolver_status = cusolverDnSsyevd_bufferSize( cusolverH, jobz, uplo, m, d_A, lda, d_W, &lwork); assert (cusolver_status == CUSOLVER_STATUS_SUCCESS); cudaStat1 = cudaMalloc((void**)&d_work, sizeof(double)*lwork); assert(cudaSuccess == cudaStat1); cusolver_status = cusolverDnSsyevd( cusolverH, jobz, uplo, m, d_A, lda, d_W, d_work, lwork, devInfo); cudaStat1 = cudaDeviceSynchronize(); assert(CUSOLVER_STATUS_SUCCESS == cusolver_status); assert(cudaSuccess == cudaStat1); cudaStat1 = cudaMemcpy(eig_vec, d_W, sizeof(double)*m, cudaMemcpyDeviceToHost); cudaStat2 = cudaMemcpy(D, d_A, sizeof(double)*lda*m, cudaMemcpyDeviceToHost); cudaStat3 = cudaMemcpy(&info_gpu, devInfo, sizeof(int), cudaMemcpyDeviceToHost); assert(cudaSuccess == cudaStat1); assert(cudaSuccess == cudaStat2); assert(cudaSuccess == cudaStat3); make_Diagonalization<<<HORIZON,HORIZON>>>(d_W, d_A); cudaMemcpy(A, d_A, sizeof(float)*lda*m, cudaMemcpyDeviceToHost); if (d_A ) cudaFree(d_A); if (d_W ) cudaFree(d_W); if (devInfo) cudaFree(devInfo); if (d_work ) cudaFree(d_work); if (cusolverH) cusolverDnDestroy(cusolverH); }
6e89790b04c5fcbec8ae0631f5a9b94d27c1dd4f.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // Kernels for efficient prefix scan __global__ void kernUpScan(int n, int *data, const int offset, const int offset2) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) return; if (index % offset2 != 0) return; data[index + offset2 - 1] += data[index + offset - 1]; } __global__ void kernDownScan(int n, int *data, const int offset, const int offset2) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) return; if (index % offset2 != 0) return; int temp = data[index + offset - 1]; data[index + offset - 1] = data[index + offset2 - 1]; data[index + offset2 - 1] += temp; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scanNoTimer(int n, int *odata, const int *idata) { int *dev_data; hipMalloc((void**)&dev_data, n * sizeof(int)); hipMemcpy(dev_data, idata, n * sizeof(int), hipMemcpyHostToDevice); const int blockSize = 256; dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); for (int d = 0; d < ilog2ceil(n); ++d) { int offset = pow(2, d); kernUpScan << <fullBlocksPerGrid, blockSize >> > (n, dev_data, offset, offset * 2); } hipMemset(dev_data + n - 1, 0, 1); for (int d = ilog2ceil(n); d >= 0; --d) { int offset = pow(2, d); kernDownScan << <fullBlocksPerGrid, blockSize >> > (n, dev_data, offset, offset * 2); } hipMemcpy(odata, dev_data, n * sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_data); } void scan(int n, int *odata, const int *idata) { timer().startGpuTimer(); scanNoTimer(n, odata, idata); timer().endGpuTimer(); } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { int *dev_idata, *dev_odata, *dev_mapped, *dev_scanned; hipMalloc((void**)&dev_idata, n * sizeof(int)); hipMalloc((void**)&dev_odata, n * sizeof(int)); hipMalloc((void**)&dev_mapped, n * sizeof(int)); hipMalloc((void**)&dev_scanned, n * sizeof(int)); hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice); const int blockSize = 256; dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); timer().startGpuTimer(); Common::kernMapToBoolean << <fullBlocksPerGrid, blockSize >> > (n, dev_mapped, dev_idata); scanNoTimer(n, dev_scanned, dev_mapped); Common::kernScatter << <fullBlocksPerGrid, blockSize >> > (n, dev_odata, dev_idata, dev_mapped, dev_scanned); timer().endGpuTimer(); int count, lastbool; hipMemcpy(&count, dev_scanned + n - 1, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&lastbool, dev_mapped + n - 1, sizeof(int), hipMemcpyDeviceToHost); count += lastbool; hipMemcpy(odata, dev_odata, n * sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_idata); hipFree(dev_odata); hipFree(dev_mapped); hipFree(dev_scanned); return count; } } }
6e89790b04c5fcbec8ae0631f5a9b94d27c1dd4f.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // Kernels for efficient prefix scan __global__ void kernUpScan(int n, int *data, const int offset, const int offset2) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) return; if (index % offset2 != 0) return; data[index + offset2 - 1] += data[index + offset - 1]; } __global__ void kernDownScan(int n, int *data, const int offset, const int offset2) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) return; if (index % offset2 != 0) return; int temp = data[index + offset - 1]; data[index + offset - 1] = data[index + offset2 - 1]; data[index + offset2 - 1] += temp; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scanNoTimer(int n, int *odata, const int *idata) { int *dev_data; cudaMalloc((void**)&dev_data, n * sizeof(int)); cudaMemcpy(dev_data, idata, n * sizeof(int), cudaMemcpyHostToDevice); const int blockSize = 256; dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); for (int d = 0; d < ilog2ceil(n); ++d) { int offset = pow(2, d); kernUpScan << <fullBlocksPerGrid, blockSize >> > (n, dev_data, offset, offset * 2); } cudaMemset(dev_data + n - 1, 0, 1); for (int d = ilog2ceil(n); d >= 0; --d) { int offset = pow(2, d); kernDownScan << <fullBlocksPerGrid, blockSize >> > (n, dev_data, offset, offset * 2); } cudaMemcpy(odata, dev_data, n * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_data); } void scan(int n, int *odata, const int *idata) { timer().startGpuTimer(); scanNoTimer(n, odata, idata); timer().endGpuTimer(); } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { int *dev_idata, *dev_odata, *dev_mapped, *dev_scanned; cudaMalloc((void**)&dev_idata, n * sizeof(int)); cudaMalloc((void**)&dev_odata, n * sizeof(int)); cudaMalloc((void**)&dev_mapped, n * sizeof(int)); cudaMalloc((void**)&dev_scanned, n * sizeof(int)); cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); const int blockSize = 256; dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); timer().startGpuTimer(); Common::kernMapToBoolean << <fullBlocksPerGrid, blockSize >> > (n, dev_mapped, dev_idata); scanNoTimer(n, dev_scanned, dev_mapped); Common::kernScatter << <fullBlocksPerGrid, blockSize >> > (n, dev_odata, dev_idata, dev_mapped, dev_scanned); timer().endGpuTimer(); int count, lastbool; cudaMemcpy(&count, dev_scanned + n - 1, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&lastbool, dev_mapped + n - 1, sizeof(int), cudaMemcpyDeviceToHost); count += lastbool; cudaMemcpy(odata, dev_odata, n * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_idata); cudaFree(dev_odata); cudaFree(dev_mapped); cudaFree(dev_scanned); return count; } } }
b927bf874902b31242d36c289d7ee46e8c5b5c81.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> __global__ void sum (int a, int b) { printf ("sum of a + b = %d\n", a + b); } int main () { int a, b; std::cout << "Enter num a:"; std::cin >> a; std::cout << "Enter num b:"; std::cin >> b; hipLaunchKernelGGL(( sum) , dim3(1), dim3(1), 0, 0, a, b); getchar (); return 0; }
b927bf874902b31242d36c289d7ee46e8c5b5c81.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> __global__ void sum (int a, int b) { printf ("sum of a + b = %d\n", a + b); } int main () { int a, b; std::cout << "Enter num a:"; std::cin >> a; std::cout << "Enter num b:"; std::cin >> b; sum <<<1, 1>>>(a, b); getchar (); return 0; }
734324e6e1d5359abd188c09f9d504bea2f13e02.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stdio.h" #define COLUMNS 4 #define ROWS 3 __global__ void add(int *a, int *b) { int x = threadIdx.x; int sum =0; for (unsigned int i=0; i < ROWS; i++){ sum += a[i*COLUMNS + x]; } b[x] = sum; } int main() { int a[ROWS][COLUMNS], b[COLUMNS]; int *dev_a; int *dev_b; int sum =0; int cudSum =0; hipMalloc((void **)&dev_a, ROWS*COLUMNS*sizeof(int)); hipMalloc((void **)&dev_b, COLUMNS*sizeof(int)); for(int y=0; y<ROWS; y++) { for(int x=0; x<COLUMNS; x++){ a[y][x] = x; sum+= x; } } printf("Sum is: %d \n", sum); hipMemcpy(dev_a, a, ROWS*COLUMNS*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, COLUMNS*sizeof(int), hipMemcpyHostToDevice); dim3 thread(COLUMNS,ROWS); hipLaunchKernelGGL(( add), dim3(1),dim3(COLUMNS), 0, 0, dev_a,dev_b); hipMemcpy(b,dev_b, COLUMNS*sizeof(int),hipMemcpyDeviceToHost); for (int i=0; i<COLUMNS; i++) { cudSum+= b[i]; } printf("cuda sum is: %d \n", cudSum); hipFree(dev_a); hipFree(dev_b); return 0; }
734324e6e1d5359abd188c09f9d504bea2f13e02.cu
#include "stdio.h" #define COLUMNS 4 #define ROWS 3 __global__ void add(int *a, int *b) { int x = threadIdx.x; int sum =0; for (unsigned int i=0; i < ROWS; i++){ sum += a[i*COLUMNS + x]; } b[x] = sum; } int main() { int a[ROWS][COLUMNS], b[COLUMNS]; int *dev_a; int *dev_b; int sum =0; int cudSum =0; cudaMalloc((void **)&dev_a, ROWS*COLUMNS*sizeof(int)); cudaMalloc((void **)&dev_b, COLUMNS*sizeof(int)); for(int y=0; y<ROWS; y++) { for(int x=0; x<COLUMNS; x++){ a[y][x] = x; sum+= x; } } printf("Sum is: %d \n", sum); cudaMemcpy(dev_a, a, ROWS*COLUMNS*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, COLUMNS*sizeof(int), cudaMemcpyHostToDevice); dim3 thread(COLUMNS,ROWS); add<<<1,COLUMNS>>>(dev_a,dev_b); cudaMemcpy(b,dev_b, COLUMNS*sizeof(int),cudaMemcpyDeviceToHost); for (int i=0; i<COLUMNS; i++) { cudSum+= b[i]; } printf("cuda sum is: %d \n", cudSum); cudaFree(dev_a); cudaFree(dev_b); return 0; }
f49a5be50c2b88a8c5e648ad6182c6aed270a1d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include <cstdlib> #include <cstring> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } template <typename Dtype> __global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = sqrt(a[index]); } } template <> void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sqrt_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sqrt_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); __global__ void popc_kernel(const int n, const float* a, const float* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popc(static_cast<uint32_t>(a[index]) ^ static_cast<uint32_t>(b[index])); } } __global__ void popcll_kernel(const int n, const double* a, const double* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popcll(static_cast<uint64_t>(a[index]) ^ static_cast<uint64_t>(b[index])); } } template <> uint32_t caffe_gpu_hamming_distance<float>(const int n, const float* x, const float* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; thrust::device_vector<uint8_t> popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( popc_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), (uint32_t) 0, thrust::plus<uint32_t>()); } template <> uint32_t caffe_gpu_hamming_distance<double>(const int n, const double* x, const double* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; thrust::device_vector<uint8_t> popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( popcll_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), /* NOLINT_NEXT_LINE(build/include_what_you_use) */ (uint32_t) 0, thrust::plus<uint32_t>()); } void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } } // namespace caffe
f49a5be50c2b88a8c5e648ad6182c6aed270a1d5.cu
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include <cstdlib> #include <cstring> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <typename Dtype> __global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = sqrt(a[index]); } } template <> void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sqrt_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sqrt_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); __global__ void popc_kernel(const int n, const float* a, const float* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popc(static_cast<uint32_t>(a[index]) ^ static_cast<uint32_t>(b[index])); } } __global__ void popcll_kernel(const int n, const double* a, const double* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popcll(static_cast<uint64_t>(a[index]) ^ static_cast<uint64_t>(b[index])); } } template <> uint32_t caffe_gpu_hamming_distance<float>(const int n, const float* x, const float* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; thrust::device_vector<uint8_t> popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) popc_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), (uint32_t) 0, thrust::plus<uint32_t>()); } template <> uint32_t caffe_gpu_hamming_distance<double>(const int n, const double* x, const double* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; thrust::device_vector<uint8_t> popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) popcll_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), /* NOLINT_NEXT_LINE(build/include_what_you_use) */ (uint32_t) 0, thrust::plus<uint32_t>()); } void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } } // namespace caffe
475ce11e64b0d2a31fc50ad271699de01f583529.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @author Tingxing Dong @author Azzam Haidar @generated from magmablas/ztrsv.cu normal z -> c, Tue Feb 9 16:05:35 2016 */ #include "magma_internal.h" #include "magma_templates.h" #define PRECISION_c #define NB 256 //NB is the 1st level blocking in recursive blocking, NUM_THREADS is the 2ed level, NB=256, NUM_THREADS=64 is optimal for batched #define NUM_THREADS 128 //64 //128 #define BLOCK_SIZE_N 128 #define DIM_X_N 128 #define DIM_Y_N 1 #define BLOCK_SIZE_T 32 #define DIM_X_T 16 #define DIM_Y_T 8 #include "ctrsv_template_device.cuh" #define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column extern __shared__ magmaFloatComplex shared_data[]; //============================================================================== template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag> __global__ void ctrsv_notrans_kernel_outplace( int n, const magmaFloatComplex * __restrict__ A, int lda, magmaFloatComplex *b, int incb, magmaFloatComplex *x) { ctrsv_notrans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x); } //============================================================================== template<const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag> __global__ void ctrsv_trans_kernel_outplace( int n, const magmaFloatComplex * __restrict__ A, int lda, magmaFloatComplex *b, int incb, magmaFloatComplex *x) { ctrsv_trans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x); } //============================================================================== extern "C" void magmablas_ctrsv_outofplace( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t n, magmaFloatComplex_const_ptr A, magma_int_t lda, magmaFloatComplex_ptr b, magma_int_t incb, magmaFloatComplex_ptr x, magma_queue_t queue, magma_int_t flag=0) { /* Check arguments */ magma_int_t info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower ) { info = -1; } else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) { info = -2; } else if ( diag != MagmaUnit && diag != MagmaNonUnit ) { info = -3; } else if (n < 0) { info = -5; } else if (lda < max(1,n)) { info = -8; } if (info != 0) { magma_xerbla( __func__, -(info) ); return; } // quick return if possible. if (n == 0) return; dim3 threads( NUM_THREADS ); dim3 blocks( 1, 1, 1 ); size_t shmem = n * sizeof(magmaFloatComplex); if (trans == MagmaNoTrans) { if (uplo == MagmaUpper) { if (diag == MagmaNonUnit) { if (flag == 0) { hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } } else //Lower { if (diag == MagmaNonUnit) { if (flag == 0) { hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaUnit>) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaUnit>) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } } } else if (trans == MagmaTrans) { if (uplo == MagmaUpper) { if (diag == MagmaNonUnit) { if (flag == 0) { hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } } else { if (diag == MagmaNonUnit) { if (flag == 0) { hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } } } else if (trans == MagmaConjTrans) { if (uplo == MagmaUpper) { if (diag == MagmaNonUnit) { if (flag == 0) { hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } } else { if (diag == MagmaNonUnit) { if (flag == 0) { hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } } } } /* README: flag decides if the ctrsv_outplace see an updated x or not. 0: No; other: Yes In recursive, flag must be nonzero except the 1st call */ extern "C" void magmablas_ctrsv_recursive_outofplace( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t n, magmaFloatComplex_const_ptr A, magma_int_t lda, magmaFloatComplex_ptr b, magma_int_t incb, magmaFloatComplex_ptr x, magma_queue_t queue) { /* Check arguments */ magma_int_t info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower ) { info = -1; } else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) { info = -2; } else if ( diag != MagmaUnit && diag != MagmaNonUnit ) { info = -3; } else if (n < 0) { info = -5; } else if (lda < max(1,n)) { info = -8; } if (info != 0) { magma_xerbla( __func__, -(info) ); return; } // quick return if possible. if (n == 0) return; //Init x with zero //magmablas_claset( MagmaFull, n, incb, MAGMA_C_ZERO, MAGMA_C_ZERO, x, n, queue ); magma_int_t col = n; if (trans == MagmaNoTrans) { for (magma_int_t i=0; i < n; i+= NB) { magma_int_t jb = min(NB, n-i); if (uplo == MagmaUpper) { col -= jb; //assume x_array contains zero elements, magmablas_cgemv will cause slow down magma_cgemv( MagmaNoTrans, jb, i, MAGMA_C_ONE, A(col, col+jb), lda, x+col+jb, 1, MAGMA_C_ONE, x+col, 1, queue ); } else { col = i; magma_cgemv( MagmaNoTrans, jb, i, MAGMA_C_ONE, A(col, 0), lda, x, 1, MAGMA_C_ONE, x+col, 1, queue ); } magmablas_ctrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i ); } } else { for (magma_int_t i=0; i < n; i += NB) { magma_int_t jb = min(NB, n-i); if (uplo == MagmaLower) { col -= jb; magma_cgemv( MagmaConjTrans, i, jb, MAGMA_C_ONE, A(col+jb, col), lda, x+col+jb, 1, MAGMA_C_ONE, x+col, 1, queue ); } else { col = i; magma_cgemv( MagmaConjTrans, i, jb, MAGMA_C_ONE, A(0, col), lda, x, 1, MAGMA_C_ONE, x+col, 1, queue ); } magmablas_ctrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i ); } } } //============================================================================== /** Purpose ------- ctrsv solves one of the matrix equations on gpu op(A)*x = B, or x*op(A) = B, where alpha is a scalar, X and B are vectors, A is a unit, or non-unit, upper or lower triangular matrix and op(A) is one of op(A) = A, or op(A) = A^T, or op(A) = A^H. The vector x is overwritten on b. Arguments ---------- @param[in] uplo magma_uplo_t. On entry, uplo specifies whether the matrix A is an upper or lower triangular matrix as follows: - = MagmaUpper: A is an upper triangular matrix. - = MagmaLower: A is a lower triangular matrix. @param[in] trans magma_trans_t. On entry, trans specifies the form of op(A) to be used in the matrix multiplication as follows: - = MagmaNoTrans: op(A) = A. - = MagmaTrans: op(A) = A^T. - = MagmaConjTrans: op(A) = A^H. @param[in] diag magma_diag_t. On entry, diag specifies whether or not A is unit triangular as follows: - = MagmaUnit: A is assumed to be unit triangular. - = MagmaNonUnit: A is not assumed to be unit triangular. @param[in] n INTEGER. On entry, n N specifies the order of the matrix A. n >= 0. @param[in] dA COMPLEX array of dimension ( lda, n ) Before entry with uplo = MagmaUpper, the leading n by n upper triangular part of the array A must contain the upper triangular matrix and the strictly lower triangular part of A is not referenced. Before entry with uplo = MagmaLower, the leading n by n lower triangular part of the array A must contain the lower triangular matrix and the strictly upper triangular part of A is not referenced. Note that when diag = MagmaUnit, the diagonal elements of A are not referenced either, but are assumed to be unity. @param[in] ldda INTEGER. On entry, lda specifies the first dimension of A. lda >= max( 1, n ). @param[in] db COMPLEX array of dimension n On exit, b is overwritten with the solution vector X. @param[in] incb INTEGER. On entry, incb specifies the increment for the elements of b. incb must not be zero. Unchanged on exit. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_cblas2 ********************************************************************/ extern "C" void magmablas_ctrsv( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t n, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_ptr db, magma_int_t incb, magma_queue_t queue) { magma_int_t size_x = n * incb; magmaFloatComplex_ptr dx=NULL; magma_cmalloc( &dx, size_x ); magmablas_claset( MagmaFull, n, 1, MAGMA_C_ZERO, MAGMA_C_ZERO, dx, n, queue ); magmablas_ctrsv_recursive_outofplace( uplo, trans, diag, n, dA, ldda, db, incb, dx, queue ); magmablas_clacpy( MagmaFull, n, 1, dx, n, db, n, queue ); magma_free( dx ); }
475ce11e64b0d2a31fc50ad271699de01f583529.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @author Tingxing Dong @author Azzam Haidar @generated from magmablas/ztrsv.cu normal z -> c, Tue Feb 9 16:05:35 2016 */ #include "magma_internal.h" #include "magma_templates.h" #define PRECISION_c #define NB 256 //NB is the 1st level blocking in recursive blocking, NUM_THREADS is the 2ed level, NB=256, NUM_THREADS=64 is optimal for batched #define NUM_THREADS 128 //64 //128 #define BLOCK_SIZE_N 128 #define DIM_X_N 128 #define DIM_Y_N 1 #define BLOCK_SIZE_T 32 #define DIM_X_T 16 #define DIM_Y_T 8 #include "ctrsv_template_device.cuh" #define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column extern __shared__ magmaFloatComplex shared_data[]; //============================================================================== template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag> __global__ void ctrsv_notrans_kernel_outplace( int n, const magmaFloatComplex * __restrict__ A, int lda, magmaFloatComplex *b, int incb, magmaFloatComplex *x) { ctrsv_notrans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x); } //============================================================================== template<const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag> __global__ void ctrsv_trans_kernel_outplace( int n, const magmaFloatComplex * __restrict__ A, int lda, magmaFloatComplex *b, int incb, magmaFloatComplex *x) { ctrsv_trans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x); } //============================================================================== extern "C" void magmablas_ctrsv_outofplace( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t n, magmaFloatComplex_const_ptr A, magma_int_t lda, magmaFloatComplex_ptr b, magma_int_t incb, magmaFloatComplex_ptr x, magma_queue_t queue, magma_int_t flag=0) { /* Check arguments */ magma_int_t info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower ) { info = -1; } else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) { info = -2; } else if ( diag != MagmaUnit && diag != MagmaNonUnit ) { info = -3; } else if (n < 0) { info = -5; } else if (lda < max(1,n)) { info = -8; } if (info != 0) { magma_xerbla( __func__, -(info) ); return; } // quick return if possible. if (n == 0) return; dim3 threads( NUM_THREADS ); dim3 blocks( 1, 1, 1 ); size_t shmem = n * sizeof(magmaFloatComplex); if (trans == MagmaNoTrans) { if (uplo == MagmaUpper) { if (diag == MagmaNonUnit) { if (flag == 0) { ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } } else //Lower { if (diag == MagmaNonUnit) { if (flag == 0) { ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaUnit> <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaUnit> <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } } } else if (trans == MagmaTrans) { if (uplo == MagmaUpper) { if (diag == MagmaNonUnit) { if (flag == 0) { ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } } else { if (diag == MagmaNonUnit) { if (flag == 0) { ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } } } else if (trans == MagmaConjTrans) { if (uplo == MagmaUpper) { if (diag == MagmaNonUnit) { if (flag == 0) { ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } } else { if (diag == MagmaNonUnit) { if (flag == 0) { ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } } } } /* README: flag decides if the ctrsv_outplace see an updated x or not. 0: No; other: Yes In recursive, flag must be nonzero except the 1st call */ extern "C" void magmablas_ctrsv_recursive_outofplace( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t n, magmaFloatComplex_const_ptr A, magma_int_t lda, magmaFloatComplex_ptr b, magma_int_t incb, magmaFloatComplex_ptr x, magma_queue_t queue) { /* Check arguments */ magma_int_t info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower ) { info = -1; } else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) { info = -2; } else if ( diag != MagmaUnit && diag != MagmaNonUnit ) { info = -3; } else if (n < 0) { info = -5; } else if (lda < max(1,n)) { info = -8; } if (info != 0) { magma_xerbla( __func__, -(info) ); return; } // quick return if possible. if (n == 0) return; //Init x with zero //magmablas_claset( MagmaFull, n, incb, MAGMA_C_ZERO, MAGMA_C_ZERO, x, n, queue ); magma_int_t col = n; if (trans == MagmaNoTrans) { for (magma_int_t i=0; i < n; i+= NB) { magma_int_t jb = min(NB, n-i); if (uplo == MagmaUpper) { col -= jb; //assume x_array contains zero elements, magmablas_cgemv will cause slow down magma_cgemv( MagmaNoTrans, jb, i, MAGMA_C_ONE, A(col, col+jb), lda, x+col+jb, 1, MAGMA_C_ONE, x+col, 1, queue ); } else { col = i; magma_cgemv( MagmaNoTrans, jb, i, MAGMA_C_ONE, A(col, 0), lda, x, 1, MAGMA_C_ONE, x+col, 1, queue ); } magmablas_ctrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i ); } } else { for (magma_int_t i=0; i < n; i += NB) { magma_int_t jb = min(NB, n-i); if (uplo == MagmaLower) { col -= jb; magma_cgemv( MagmaConjTrans, i, jb, MAGMA_C_ONE, A(col+jb, col), lda, x+col+jb, 1, MAGMA_C_ONE, x+col, 1, queue ); } else { col = i; magma_cgemv( MagmaConjTrans, i, jb, MAGMA_C_ONE, A(0, col), lda, x, 1, MAGMA_C_ONE, x+col, 1, queue ); } magmablas_ctrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i ); } } } //============================================================================== /** Purpose ------- ctrsv solves one of the matrix equations on gpu op(A)*x = B, or x*op(A) = B, where alpha is a scalar, X and B are vectors, A is a unit, or non-unit, upper or lower triangular matrix and op(A) is one of op(A) = A, or op(A) = A^T, or op(A) = A^H. The vector x is overwritten on b. Arguments ---------- @param[in] uplo magma_uplo_t. On entry, uplo specifies whether the matrix A is an upper or lower triangular matrix as follows: - = MagmaUpper: A is an upper triangular matrix. - = MagmaLower: A is a lower triangular matrix. @param[in] trans magma_trans_t. On entry, trans specifies the form of op(A) to be used in the matrix multiplication as follows: - = MagmaNoTrans: op(A) = A. - = MagmaTrans: op(A) = A^T. - = MagmaConjTrans: op(A) = A^H. @param[in] diag magma_diag_t. On entry, diag specifies whether or not A is unit triangular as follows: - = MagmaUnit: A is assumed to be unit triangular. - = MagmaNonUnit: A is not assumed to be unit triangular. @param[in] n INTEGER. On entry, n N specifies the order of the matrix A. n >= 0. @param[in] dA COMPLEX array of dimension ( lda, n ) Before entry with uplo = MagmaUpper, the leading n by n upper triangular part of the array A must contain the upper triangular matrix and the strictly lower triangular part of A is not referenced. Before entry with uplo = MagmaLower, the leading n by n lower triangular part of the array A must contain the lower triangular matrix and the strictly upper triangular part of A is not referenced. Note that when diag = MagmaUnit, the diagonal elements of A are not referenced either, but are assumed to be unity. @param[in] ldda INTEGER. On entry, lda specifies the first dimension of A. lda >= max( 1, n ). @param[in] db COMPLEX array of dimension n On exit, b is overwritten with the solution vector X. @param[in] incb INTEGER. On entry, incb specifies the increment for the elements of b. incb must not be zero. Unchanged on exit. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_cblas2 ********************************************************************/ extern "C" void magmablas_ctrsv( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t n, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_ptr db, magma_int_t incb, magma_queue_t queue) { magma_int_t size_x = n * incb; magmaFloatComplex_ptr dx=NULL; magma_cmalloc( &dx, size_x ); magmablas_claset( MagmaFull, n, 1, MAGMA_C_ZERO, MAGMA_C_ZERO, dx, n, queue ); magmablas_ctrsv_recursive_outofplace( uplo, trans, diag, n, dA, ldda, db, incb, dx, queue ); magmablas_clacpy( MagmaFull, n, 1, dx, n, db, n, queue ); magma_free( dx ); }
042ba4b42d4b4505ba0b86445a8bdfc374a11be1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "/home/ywang15/Documents/cuda/cuda_by_examples/common/book.h" #include "/home/ywang15/Documents/cuda/cuda_by_examples/common/cpu_bitmap.h" #define len 200 struct hipComplex{ float r; float i; hipComplex(float a, float b) : r(a), i(b) {} __device__ hipComplex operator*(hipComplex &c){ real = r*a - i*b; imag = a*i + r*b; return hipComplex(real, imag); } __device__ cuComplx operator+(hipComplex &c){ return hipComplex(r+c.r, i+c.i); } __device__ float magnitude2( void ){ return r*r + i*i; } } __device__ int julia(int x, int y){ const float scale = 1.5; const float tmp = (float)(len) / 2.0; float ix = scale * (tmp-x) / tmp; float iy = scale * (tmp-y) / tmp; hipComplex a(ix,iy), c(-0.8, 0.156); for (int i=0; i<200; i++){ a = a*a + c; } if (a.magnitude2() > 1000.0){ return 0; } return 1; } __global__ void kernal(unsigned char *ptr){ int idx = blockIdx.x; int idy = blockIdx.y; int id = idx + idy * gridDim.x; int value = julia(idx, idy); ptr[id*4 + 0] = 255 * value; ptr[id*4 + 1] = 0; ptr[id*4 + 2] = 0; ptr[id*4 + 3] = 255; } int main( void ){ CPUBitMap bitmap(len, len); unsigned char *d_bmp; HANDLE_ERROR( hipMalloc( (void**) &d_map, bitmap.image_size() ) ); dims3 grid(len, len); hipLaunchKernelGGL(( kernal), dim3(dims), dim3(1), 0, 0, d_bmp); HANDLE_ERROR( hipMemcpy( bitmap.get_ptr(), d_map, bitmap.image_size(), hipMemcpyDeviceToHost ) ); hipFree(d_map); bitmap.display_and_exit(); return 0; }
042ba4b42d4b4505ba0b86445a8bdfc374a11be1.cu
#include "/home/ywang15/Documents/cuda/cuda_by_examples/common/book.h" #include "/home/ywang15/Documents/cuda/cuda_by_examples/common/cpu_bitmap.h" #define len 200 struct cuComplex{ float r; float i; cuComplex(float a, float b) : r(a), i(b) {} __device__ cuComplex operator*(cuComplex &c){ real = r*a - i*b; imag = a*i + r*b; return cuComplex(real, imag); } __device__ cuComplx operator+(cuComplex &c){ return cuComplex(r+c.r, i+c.i); } __device__ float magnitude2( void ){ return r*r + i*i; } } __device__ int julia(int x, int y){ const float scale = 1.5; const float tmp = (float)(len) / 2.0; float ix = scale * (tmp-x) / tmp; float iy = scale * (tmp-y) / tmp; cuComplex a(ix,iy), c(-0.8, 0.156); for (int i=0; i<200; i++){ a = a*a + c; } if (a.magnitude2() > 1000.0){ return 0; } return 1; } __global__ void kernal(unsigned char *ptr){ int idx = blockIdx.x; int idy = blockIdx.y; int id = idx + idy * gridDim.x; int value = julia(idx, idy); ptr[id*4 + 0] = 255 * value; ptr[id*4 + 1] = 0; ptr[id*4 + 2] = 0; ptr[id*4 + 3] = 255; } int main( void ){ CPUBitMap bitmap(len, len); unsigned char *d_bmp; HANDLE_ERROR( cudaMalloc( (void**) &d_map, bitmap.image_size() ) ); dims3 grid(len, len); kernal<<<dims, 1>>>(d_bmp); HANDLE_ERROR( cudaMemcpy( bitmap.get_ptr(), d_map, bitmap.image_size(), cudaMemcpyDeviceToHost ) ); cudaFree(d_map); bitmap.display_and_exit(); return 0; }
7ffa3789e89543a9fd1cbf717b5c881da4884403.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <sys/time.h> #include <stdio.h> #include <math.h> #define Row 1024 #define Col 1024 __global__ void matrix_mul_gpu(int *M, int* N, int* P, int width) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; int sum = 0; for(int k=0;k<width;k++) { int a = M[j*width+k]; int b = N[k*width+i]; sum += a*b; } P[j*width+i] = sum; } int main() { struct timeval start, end; gettimeofday( &start, NULL ); int *A = (int *)malloc(sizeof(int) * Row * Col); int *B = (int *)malloc(sizeof(int) * Row * Col); int *C = (int *)malloc(sizeof(int) * Row * Col); //malloc device memory int *d_dataA, *d_dataB, *d_dataC; hipMalloc((void**)&d_dataA, sizeof(int) *Row*Col); hipMalloc((void**)&d_dataB, sizeof(int) *Row*Col); hipMalloc((void**)&d_dataC, sizeof(int) *Row*Col); //set value for (int i = 0; i < Row*Col; i++) { A[i] = 90; B[i] = 10; } hipMemcpy(d_dataA, A, sizeof(int) * Row * Col, hipMemcpyHostToDevice); hipMemcpy(d_dataB, B, sizeof(int) * Row * Col, hipMemcpyHostToDevice); dim3 threadPerBlock(16, 16); dim3 blockNumber((Col+threadPerBlock.x-1)/ threadPerBlock.x, (Row+threadPerBlock.y-1)/ threadPerBlock.y ); printf("Block(%d,%d) Grid(%d,%d).\n", threadPerBlock.x, threadPerBlock.y, blockNumber.x, blockNumber.y); matrix_mul_gpu << <blockNumber, threadPerBlock >> > (d_dataA, d_dataB, d_dataC, Col); //- hipMemcpy(C, d_dataC, sizeof(int) * Row * Col, hipMemcpyDeviceToHost); // free(A); free(B); free(C); hipFree(d_dataA); hipFree(d_dataB); hipFree(d_dataC); gettimeofday( &end, NULL ); int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec; printf("total time is %d ms\n", timeuse/1000); return 0; }
7ffa3789e89543a9fd1cbf717b5c881da4884403.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <sys/time.h> #include <stdio.h> #include <math.h> #define Row 1024 #define Col 1024 __global__ void matrix_mul_gpu(int *M, int* N, int* P, int width) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; int sum = 0; for(int k=0;k<width;k++) { int a = M[j*width+k]; int b = N[k*width+i]; sum += a*b; } P[j*width+i] = sum; } int main() { struct timeval start, end; gettimeofday( &start, NULL ); int *A = (int *)malloc(sizeof(int) * Row * Col); int *B = (int *)malloc(sizeof(int) * Row * Col); int *C = (int *)malloc(sizeof(int) * Row * Col); //malloc device memory int *d_dataA, *d_dataB, *d_dataC; cudaMalloc((void**)&d_dataA, sizeof(int) *Row*Col); cudaMalloc((void**)&d_dataB, sizeof(int) *Row*Col); cudaMalloc((void**)&d_dataC, sizeof(int) *Row*Col); //set value for (int i = 0; i < Row*Col; i++) { A[i] = 90; B[i] = 10; } cudaMemcpy(d_dataA, A, sizeof(int) * Row * Col, cudaMemcpyHostToDevice); cudaMemcpy(d_dataB, B, sizeof(int) * Row * Col, cudaMemcpyHostToDevice); dim3 threadPerBlock(16, 16); dim3 blockNumber((Col+threadPerBlock.x-1)/ threadPerBlock.x, (Row+threadPerBlock.y-1)/ threadPerBlock.y ); printf("Block(%d,%d) Grid(%d,%d).\n", threadPerBlock.x, threadPerBlock.y, blockNumber.x, blockNumber.y); matrix_mul_gpu << <blockNumber, threadPerBlock >> > (d_dataA, d_dataB, d_dataC, Col); //拷贝计算数据-一级数据指针 cudaMemcpy(C, d_dataC, sizeof(int) * Row * Col, cudaMemcpyDeviceToHost); //释放内存 free(A); free(B); free(C); cudaFree(d_dataA); cudaFree(d_dataB); cudaFree(d_dataC); gettimeofday( &end, NULL ); int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec; printf("total time is %d ms\n", timeuse/1000); return 0; }
a814b0faa904c44f7c2e96d63ac2e583b0816ac5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ __global__ void reduce2(int *g_idata, int *g_odata, unsigned int n) { __shared__ int sdata[n]; // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { sdata[tid] = g_idata[i]; } // do reduction in shared mem for (unsigned int s=2; s<blockDim.x; s*=2) { int si = blockDim.x / s; if (tid < si) { sdata[tid] += sdata[tid + si]; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; }
a814b0faa904c44f7c2e96d63ac2e583b0816ac5.cu
/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ __global__ void reduce2(int *g_idata, int *g_odata, unsigned int n) { __shared__ int sdata[n]; // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { sdata[tid] = g_idata[i]; } // do reduction in shared mem for (unsigned int s=2; s<blockDim.x; s*=2) { int si = blockDim.x / s; if (tid < si) { sdata[tid] += sdata[tid + si]; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; }
290585182350058305e5e6672237c06646055e19.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> __global__ void reverse (int* d, const int len) { __shared__ int s[256]; int t = threadIdx.x; int tr = len-t-1; s[t] = d[t]; __syncthreads(); d[t] = s[tr]; } int main() { const int len = 256; const int iteration = 1 << 20; int d[len]; for (int i = 0; i < len; i++) d[i] = i; int *dd; hipMalloc((void**)&dd, sizeof(int)*len); hipMemcpy(dd, d, sizeof(int)*len, hipMemcpyHostToDevice); for (int i = 0; i <= iteration; i++) hipLaunchKernelGGL(( reverse), dim3(1), dim3(256), 0, 0, dd, len); hipMemcpy(d, dd, sizeof(int)*len, hipMemcpyDeviceToHost); hipFree(dd); for (int i = 0; i < len; i++) assert(d[i] == len-i-1); printf("PASS\n"); return 0; }
290585182350058305e5e6672237c06646055e19.cu
#include <stdio.h> #include <assert.h> #include <cuda.h> __global__ void reverse (int* d, const int len) { __shared__ int s[256]; int t = threadIdx.x; int tr = len-t-1; s[t] = d[t]; __syncthreads(); d[t] = s[tr]; } int main() { const int len = 256; const int iteration = 1 << 20; int d[len]; for (int i = 0; i < len; i++) d[i] = i; int *dd; cudaMalloc((void**)&dd, sizeof(int)*len); cudaMemcpy(dd, d, sizeof(int)*len, cudaMemcpyHostToDevice); for (int i = 0; i <= iteration; i++) reverse<<<1, 256>>> (dd, len); cudaMemcpy(d, dd, sizeof(int)*len, cudaMemcpyDeviceToHost); cudaFree(dd); for (int i = 0; i < len; i++) assert(d[i] == len-i-1); printf("PASS\n"); return 0; }
a7a7683d127f85fa3c4d58fd50cb8175f1d72443.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <vector> #include "caffe/layers/eltwise_adaptive_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data, int* mask) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype maxval = -FLT_MAX; int maxidx = -1; if (bottom_data_a[index] > bottom_data_b[index]) { // only update for very first bottom_data blob (blob_idx == 0) if (blob_idx == 0) { maxval = bottom_data_a[index]; top_data[index] = maxval; maxidx = blob_idx; mask[index] = maxidx; } } else { maxval = bottom_data_b[index]; top_data[index] = maxval; maxidx = blob_idx + 1; mask[index] = maxidx; } } } template <typename Dtype> void EltwiseAdaptiveLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int* mask = NULL; const int count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), top_data); for (int i = 2; i < bottom.size(); ++i) { caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_SUM: caffe_gpu_set(count, Dtype(0.), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? crop_layer_->Forward(crop_bottom_vec_, crop_top_vec_); caffe_gpu_axpy(count, coeffs_[0], crop_top_blob_.gpu_data(), top_data); //bottom[0] = &crop_top_blob_; //for (int i = 0; i < bottom.size(); ++i) { //caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); //} break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxForward<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask); for (int i = 2; i < bottom.size(); ++i) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask); } break; default: LOG(FATAL) << "Unknown elementwise operation."; } } template <typename Dtype> __global__ void MaxBackward(const int nthreads, const Dtype* top_diff, const int blob_idx, const int* mask, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype gradient = 0; if (mask[index] == blob_idx) { gradient += top_diff[index]; } bottom_diff[index] = gradient; } } template <typename Dtype> void EltwiseAdaptiveLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int* mask = NULL; const int count = top[0]->count(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: if (stable_prod_grad_) { bool initialized = false; for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { caffe_copy(count, bottom[j]->gpu_data(), bottom_diff); initialized = true; } else { caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff, bottom_diff); } } } else { caffe_gpu_div(count, top_data, bottom_data, bottom_diff); } caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); break; case EltwiseParameter_EltwiseOp_SUM: if (coeffs_[i] == Dtype(1.)) { crop_layer_->Backward(crop_top_vec_,propagate_down,crop_bottom_vec_); const Dtype* top_diff = crop_bottom_vec_[0]->gpu_diff(); caffe_copy(count, top_diff, bottom_diff); } else { caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff); } break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.gpu_data(); MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, i, mask, bottom_diff); break; default: LOG(FATAL) << "Unknown elementwise operation."; } } } } INSTANTIATE_LAYER_GPU_FUNCS(EltwiseAdaptiveLayer); } // namespace caffe
a7a7683d127f85fa3c4d58fd50cb8175f1d72443.cu
#include <cfloat> #include <vector> #include "caffe/layers/eltwise_adaptive_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data, int* mask) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype maxval = -FLT_MAX; int maxidx = -1; if (bottom_data_a[index] > bottom_data_b[index]) { // only update for very first bottom_data blob (blob_idx == 0) if (blob_idx == 0) { maxval = bottom_data_a[index]; top_data[index] = maxval; maxidx = blob_idx; mask[index] = maxidx; } } else { maxval = bottom_data_b[index]; top_data[index] = maxval; maxidx = blob_idx + 1; mask[index] = maxidx; } } } template <typename Dtype> void EltwiseAdaptiveLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int* mask = NULL; const int count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), top_data); for (int i = 2; i < bottom.size(); ++i) { caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_SUM: caffe_gpu_set(count, Dtype(0.), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? crop_layer_->Forward(crop_bottom_vec_, crop_top_vec_); caffe_gpu_axpy(count, coeffs_[0], crop_top_blob_.gpu_data(), top_data); //bottom[0] = &crop_top_blob_; //for (int i = 0; i < bottom.size(); ++i) { //caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); //} break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) MaxForward<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask); for (int i = 2; i < bottom.size(); ++i) { // NOLINT_NEXT_LINE(whitespace/operators) MaxForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask); } break; default: LOG(FATAL) << "Unknown elementwise operation."; } } template <typename Dtype> __global__ void MaxBackward(const int nthreads, const Dtype* top_diff, const int blob_idx, const int* mask, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype gradient = 0; if (mask[index] == blob_idx) { gradient += top_diff[index]; } bottom_diff[index] = gradient; } } template <typename Dtype> void EltwiseAdaptiveLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int* mask = NULL; const int count = top[0]->count(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: if (stable_prod_grad_) { bool initialized = false; for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { caffe_copy(count, bottom[j]->gpu_data(), bottom_diff); initialized = true; } else { caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff, bottom_diff); } } } else { caffe_gpu_div(count, top_data, bottom_data, bottom_diff); } caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); break; case EltwiseParameter_EltwiseOp_SUM: if (coeffs_[i] == Dtype(1.)) { crop_layer_->Backward(crop_top_vec_,propagate_down,crop_bottom_vec_); const Dtype* top_diff = crop_bottom_vec_[0]->gpu_diff(); caffe_copy(count, top_diff, bottom_diff); } else { caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff); } break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.gpu_data(); MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, i, mask, bottom_diff); break; default: LOG(FATAL) << "Unknown elementwise operation."; } } } } INSTANTIATE_LAYER_GPU_FUNCS(EltwiseAdaptiveLayer); } // namespace caffe
6504cd5c776ef0eadd8cf7a90cb3fb201477fcf1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" /* * Perfom a reduction from data of length 'size' to result, where length of result will be 'number of blocks'. */ extern "C" __global__ void invert(int n, float *a, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { result[i] = 1.0f / a[i]; } }
6504cd5c776ef0eadd8cf7a90cb3fb201477fcf1.cu
#include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" /* * Perfom a reduction from data of length 'size' to result, where length of result will be 'number of blocks'. */ extern "C" __global__ void invert(int n, float *a, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { result[i] = 1.0f / a[i]; } }
263090fdaeda49f687f419664488e9b1d4a7e246.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2011 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include "common.h" /////////////////////////////////////////////////////////////////////////////// /// texture references /////////////////////////////////////////////////////////////////////////////// /// image to warp texture<float, 2, hipReadModeElementType> texToWarp; /////////////////////////////////////////////////////////////////////////////// /// \brief warp image with a given displacement field, CUDA kernel. /// \param[in] width image width /// \param[in] height image height /// \param[in] stride image stride /// \param[in] u horizontal displacement /// \param[in] v vertical displacement /// \param[out] out result /////////////////////////////////////////////////////////////////////////////// __global__ void WarpingKernel(int width, int height, int stride, const float *u, const float *v, float *out) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; const int pos = ix + iy * stride; if (ix >= width || iy >= height) return; float x = ((float)ix + u[pos] + 0.5f) / (float)width; float y = ((float)iy + v[pos] + 0.5f) / (float)height; out[pos] = tex2D(texToWarp, x, y); } /////////////////////////////////////////////////////////////////////////////// /// \brief warp image with provided vector field, CUDA kernel wrapper. /// /// For each output pixel there is a vector which tells which pixel /// from a source image should be mapped to this particular output /// pixel. /// It is assumed that images and the vector field have the same stride and /// resolution. /// \param[in] src source image /// \param[in] w width /// \param[in] h height /// \param[in] s stride /// \param[in] u horizontal displacement /// \param[in] v vertical displacement /// \param[out] out warped image /////////////////////////////////////////////////////////////////////////////// static void WarpImage(const float *src, int w, int h, int s, const float *u, const float *v, float *out) { dim3 threads(32, 6); dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y)); // mirror if a coordinate value is out-of-range texToWarp.addressMode[0] = hipAddressModeMirror; texToWarp.addressMode[1] = hipAddressModeMirror; texToWarp.filterMode = hipFilterModeLinear; texToWarp.normalized = true; hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); hipBindTexture2D(0, texToWarp, src, w, h, s * sizeof(float)); hipLaunchKernelGGL(( WarpingKernel), dim3(blocks), dim3(threads), 0, 0, w, h, s, u, v, out); }
263090fdaeda49f687f419664488e9b1d4a7e246.cu
/* * Copyright 1993-2011 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include "common.h" /////////////////////////////////////////////////////////////////////////////// /// texture references /////////////////////////////////////////////////////////////////////////////// /// image to warp texture<float, 2, cudaReadModeElementType> texToWarp; /////////////////////////////////////////////////////////////////////////////// /// \brief warp image with a given displacement field, CUDA kernel. /// \param[in] width image width /// \param[in] height image height /// \param[in] stride image stride /// \param[in] u horizontal displacement /// \param[in] v vertical displacement /// \param[out] out result /////////////////////////////////////////////////////////////////////////////// __global__ void WarpingKernel(int width, int height, int stride, const float *u, const float *v, float *out) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; const int pos = ix + iy * stride; if (ix >= width || iy >= height) return; float x = ((float)ix + u[pos] + 0.5f) / (float)width; float y = ((float)iy + v[pos] + 0.5f) / (float)height; out[pos] = tex2D(texToWarp, x, y); } /////////////////////////////////////////////////////////////////////////////// /// \brief warp image with provided vector field, CUDA kernel wrapper. /// /// For each output pixel there is a vector which tells which pixel /// from a source image should be mapped to this particular output /// pixel. /// It is assumed that images and the vector field have the same stride and /// resolution. /// \param[in] src source image /// \param[in] w width /// \param[in] h height /// \param[in] s stride /// \param[in] u horizontal displacement /// \param[in] v vertical displacement /// \param[out] out warped image /////////////////////////////////////////////////////////////////////////////// static void WarpImage(const float *src, int w, int h, int s, const float *u, const float *v, float *out) { dim3 threads(32, 6); dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y)); // mirror if a coordinate value is out-of-range texToWarp.addressMode[0] = cudaAddressModeMirror; texToWarp.addressMode[1] = cudaAddressModeMirror; texToWarp.filterMode = cudaFilterModeLinear; texToWarp.normalized = true; cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); cudaBindTexture2D(0, texToWarp, src, w, h, s * sizeof(float)); WarpingKernel<<<blocks, threads>>>(w, h, s, u, v, out); }
60d019dc7c8d3379a4bc5c78f2304cdfc566f15a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../common/book.h" __global__ void kernel(void) { } int main(void) { hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, ); printf("Hello, World!\n"); return 0; }
60d019dc7c8d3379a4bc5c78f2304cdfc566f15a.cu
#include "../common/book.h" __global__ void kernel(void) { } int main(void) { kernel<<<1,1>>>(); printf("Hello, World!\n"); return 0; }
a524ae7803776611c253dd86ee751967349f7d53.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdbool.h> #include "idas_parallel_host.h" /* stack implementation */ __device__ __shared__ static struct dir_stack_tag { uchar i; uchar buf[STACK_BUF_BYTES]; } stack[WARP_SIZE]; #define stack_byte(i) (stack[threadIdx.x].buf[(i) >> STACK_DIR_BITS]) #define stack_ofs(i) ((i & STACK_DIR_MASK) << 1) #define stack_get(i) \ ((stack_byte(i) & (STACK_DIR_MASK << stack_ofs(i))) >> stack_ofs(i)) __device__ static inline void stack_init(void) { stack[threadIdx.x].i = 0; } __device__ static inline void stack_put(DirDev dir) { stack_byte(stack[threadIdx.x].i) &= ~(STACK_DIR_MASK << stack_ofs(stack[threadIdx.x].i)); stack_byte(stack[threadIdx.x].i) |= dir << stack_ofs(stack[threadIdx.x].i); ++stack[threadIdx.x].i; } __device__ static inline bool stack_is_empty(void) { return stack[threadIdx.x].i == 0; /* how about !stack[threadIdx.x].i */ } __device__ static inline DirDev stack_pop(void) { --stack[threadIdx.x].i; return stack_get(stack[threadIdx.x].i); } __device__ static inline DirDev stack_peak(void) { return stack_get(stack[threadIdx.x].i - 1); } /* state implementation */ #define STATE_EMPTY 0 #define STATE_TILE_BITS 4 #define STATE_TILE_MASK ((1ull << STATE_TILE_BITS) - 1) /* * goal: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] */ __device__ __shared__ static struct state_tag { unsigned long long tile; /* packed representation label(4bit)*16pos */ uchar empty; uchar h_value; /* ub of h_value is 6*16 */ } state[WARP_SIZE]; #define state_tile_ofs(i) (i << 2) #define state_tile_get(i) \ ((state[threadIdx.x].tile & (STATE_TILE_MASK << state_tile_ofs(i))) >> \ state_tile_ofs(i)) #define state_tile_set(i, val) \ do \ { \ state[threadIdx.x].tile &= ~((STATE_TILE_MASK) << state_tile_ofs(i)); \ state[threadIdx.x].tile |= ((unsigned long long) val) \ << state_tile_ofs(i); \ } while (0) __device__ static uchar inline distance(uchar i, uchar j) { return i > j ? i - j : j - i; } #define H_DIFF(opponent, empty, empty_dir) \ h_diff_table_shared[opponent][empty][empty_dir] __device__ __shared__ static signed char h_diff_table_shared[STATE_N][STATE_N] [DIR_N]; __device__ static void state_init_hvalue(void) { uchar from_x[STATE_N], from_y[STATE_N]; int tid = threadIdx.x; state[tid].h_value = 0; for (int i = 0; i < STATE_N; ++i) { from_x[state_tile_get(i)] = POS_X(i); from_y[state_tile_get(i)] = POS_Y(i); } for (int i = 1; i < STATE_N; ++i) { state[tid].h_value += distance(from_x[i], POS_X(i)); state[tid].h_value += distance(from_y[i], POS_Y(i)); } } __device__ static void state_tile_fill(const uchar v_list[STATE_WIDTH * STATE_WIDTH]) { for (int i = 0; i < STATE_N; ++i) { if (v_list[i] == STATE_EMPTY) state[threadIdx.x].empty = i; state_tile_set(i, v_list[i]); } } __device__ static inline bool state_is_goal(void) { return state[threadIdx.x].h_value == 0; } __device__ static char assert_direction2 [DIR_UP == 0 && DIR_RIGHT == 1 && DIR_LEFT == 2 && DIR_DOWN == 3 ? 1 : -1]; __device__ __shared__ static bool movable_table_shared[STATE_N][DIR_N]; __device__ static inline bool state_movable(DirDev dir) { return movable_table_shared[state[threadIdx.x].empty][dir]; } __device__ static char assert_direction [DIR_UP == 0 && DIR_RIGHT == 1 && DIR_LEFT == 2 && DIR_DOWN == 3 ? 1 : -1]; __device__ __constant__ const static int pos_diff_table[DIR_N] = { -STATE_WIDTH, 1, -1, +STATE_WIDTH}; __device__ static inline bool state_move_with_limit(DirDev dir, unsigned int f_limit) { int new_empty = state[threadIdx.x].empty + pos_diff_table[dir]; int opponent = state_tile_get(new_empty); int new_h_value = state[threadIdx.x].h_value + H_DIFF(opponent, new_empty, dir); if (stack[threadIdx.x].i + 1 + new_h_value > f_limit) return false; state[threadIdx.x].h_value = new_h_value; state_tile_set(state[threadIdx.x].empty, opponent); state[threadIdx.x].empty = new_empty; return true; } __device__ static inline void state_move(DirDev dir) { int new_empty = state[threadIdx.x].empty + pos_diff_table[dir]; int opponent = state_tile_get(new_empty); state[threadIdx.x].h_value += H_DIFF(opponent, new_empty, dir); state_tile_set(state[threadIdx.x].empty, opponent); state[threadIdx.x].empty = new_empty; } /* * solver implementation */ __device__ static bool idas_internal(uchar f_limit) { uchar dir = 0; for (;;) { if (state_is_goal()) return true; if ((stack_is_empty() || stack_peak() != dir_reverse_dev(dir)) && state_movable(dir)) { if (state_move_with_limit(dir, f_limit)) { stack_put(dir); dir = 0; continue; } } while (++dir == DIR_N) { if (stack_is_empty()) return false; dir = stack_pop(); state_move(dir_reverse_dev(dir)); } } } __global__ void idas_kernel(uchar *input, signed char *plan, int f_limit, signed char *h_diff_table, bool *movable_table) { int tid = threadIdx.x; int bid = blockIdx.x; int id = tid + bid * blockDim.x; for (int dir = 0; dir < DIR_N; ++dir) if (tid < STATE_N) movable_table_shared[tid][dir] = movable_table[tid * DIR_N + dir]; for (int i = 0; i < STATE_N * DIR_N; ++i) if (tid < STATE_N) h_diff_table_shared[tid][i / DIR_N][i % DIR_N] = h_diff_table[tid * STATE_N * DIR_N + i]; __syncthreads(); stack_init(); state_tile_fill(input + id * STATE_N); state_init_hvalue(); if (idas_internal(f_limit)) { plan[id * PLAN_LEN_MAX] = (signed char) stack[tid].i; /* len of plan */ for (uchar i = 0; i < stack[tid].i; ++i) plan[i + 1 + id * PLAN_LEN_MAX] = stack_get(i); } else plan[id * PLAN_LEN_MAX] = NOT_SOLVED; } void idas_parallel_main(uchar *input, signed char *plan, int f_limit, signed char *h_diff_table, bool *movable_table) { (void) assert_direction[0]; (void) assert_direction2[0]; hipLaunchKernelGGL(( idas_kernel), dim3(N_BLOCK), dim3(N_CORE / N_BLOCK), 0, 0, input, plan, f_limit, h_diff_table, movable_table); }
a524ae7803776611c253dd86ee751967349f7d53.cu
#include <stdbool.h> #include "idas_parallel_host.h" /* stack implementation */ __device__ __shared__ static struct dir_stack_tag { uchar i; uchar buf[STACK_BUF_BYTES]; } stack[WARP_SIZE]; #define stack_byte(i) (stack[threadIdx.x].buf[(i) >> STACK_DIR_BITS]) #define stack_ofs(i) ((i & STACK_DIR_MASK) << 1) #define stack_get(i) \ ((stack_byte(i) & (STACK_DIR_MASK << stack_ofs(i))) >> stack_ofs(i)) __device__ static inline void stack_init(void) { stack[threadIdx.x].i = 0; } __device__ static inline void stack_put(DirDev dir) { stack_byte(stack[threadIdx.x].i) &= ~(STACK_DIR_MASK << stack_ofs(stack[threadIdx.x].i)); stack_byte(stack[threadIdx.x].i) |= dir << stack_ofs(stack[threadIdx.x].i); ++stack[threadIdx.x].i; } __device__ static inline bool stack_is_empty(void) { return stack[threadIdx.x].i == 0; /* how about !stack[threadIdx.x].i */ } __device__ static inline DirDev stack_pop(void) { --stack[threadIdx.x].i; return stack_get(stack[threadIdx.x].i); } __device__ static inline DirDev stack_peak(void) { return stack_get(stack[threadIdx.x].i - 1); } /* state implementation */ #define STATE_EMPTY 0 #define STATE_TILE_BITS 4 #define STATE_TILE_MASK ((1ull << STATE_TILE_BITS) - 1) /* * goal: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] */ __device__ __shared__ static struct state_tag { unsigned long long tile; /* packed representation label(4bit)*16pos */ uchar empty; uchar h_value; /* ub of h_value is 6*16 */ } state[WARP_SIZE]; #define state_tile_ofs(i) (i << 2) #define state_tile_get(i) \ ((state[threadIdx.x].tile & (STATE_TILE_MASK << state_tile_ofs(i))) >> \ state_tile_ofs(i)) #define state_tile_set(i, val) \ do \ { \ state[threadIdx.x].tile &= ~((STATE_TILE_MASK) << state_tile_ofs(i)); \ state[threadIdx.x].tile |= ((unsigned long long) val) \ << state_tile_ofs(i); \ } while (0) __device__ static uchar inline distance(uchar i, uchar j) { return i > j ? i - j : j - i; } #define H_DIFF(opponent, empty, empty_dir) \ h_diff_table_shared[opponent][empty][empty_dir] __device__ __shared__ static signed char h_diff_table_shared[STATE_N][STATE_N] [DIR_N]; __device__ static void state_init_hvalue(void) { uchar from_x[STATE_N], from_y[STATE_N]; int tid = threadIdx.x; state[tid].h_value = 0; for (int i = 0; i < STATE_N; ++i) { from_x[state_tile_get(i)] = POS_X(i); from_y[state_tile_get(i)] = POS_Y(i); } for (int i = 1; i < STATE_N; ++i) { state[tid].h_value += distance(from_x[i], POS_X(i)); state[tid].h_value += distance(from_y[i], POS_Y(i)); } } __device__ static void state_tile_fill(const uchar v_list[STATE_WIDTH * STATE_WIDTH]) { for (int i = 0; i < STATE_N; ++i) { if (v_list[i] == STATE_EMPTY) state[threadIdx.x].empty = i; state_tile_set(i, v_list[i]); } } __device__ static inline bool state_is_goal(void) { return state[threadIdx.x].h_value == 0; } __device__ static char assert_direction2 [DIR_UP == 0 && DIR_RIGHT == 1 && DIR_LEFT == 2 && DIR_DOWN == 3 ? 1 : -1]; __device__ __shared__ static bool movable_table_shared[STATE_N][DIR_N]; __device__ static inline bool state_movable(DirDev dir) { return movable_table_shared[state[threadIdx.x].empty][dir]; } __device__ static char assert_direction [DIR_UP == 0 && DIR_RIGHT == 1 && DIR_LEFT == 2 && DIR_DOWN == 3 ? 1 : -1]; __device__ __constant__ const static int pos_diff_table[DIR_N] = { -STATE_WIDTH, 1, -1, +STATE_WIDTH}; __device__ static inline bool state_move_with_limit(DirDev dir, unsigned int f_limit) { int new_empty = state[threadIdx.x].empty + pos_diff_table[dir]; int opponent = state_tile_get(new_empty); int new_h_value = state[threadIdx.x].h_value + H_DIFF(opponent, new_empty, dir); if (stack[threadIdx.x].i + 1 + new_h_value > f_limit) return false; state[threadIdx.x].h_value = new_h_value; state_tile_set(state[threadIdx.x].empty, opponent); state[threadIdx.x].empty = new_empty; return true; } __device__ static inline void state_move(DirDev dir) { int new_empty = state[threadIdx.x].empty + pos_diff_table[dir]; int opponent = state_tile_get(new_empty); state[threadIdx.x].h_value += H_DIFF(opponent, new_empty, dir); state_tile_set(state[threadIdx.x].empty, opponent); state[threadIdx.x].empty = new_empty; } /* * solver implementation */ __device__ static bool idas_internal(uchar f_limit) { uchar dir = 0; for (;;) { if (state_is_goal()) return true; if ((stack_is_empty() || stack_peak() != dir_reverse_dev(dir)) && state_movable(dir)) { if (state_move_with_limit(dir, f_limit)) { stack_put(dir); dir = 0; continue; } } while (++dir == DIR_N) { if (stack_is_empty()) return false; dir = stack_pop(); state_move(dir_reverse_dev(dir)); } } } __global__ void idas_kernel(uchar *input, signed char *plan, int f_limit, signed char *h_diff_table, bool *movable_table) { int tid = threadIdx.x; int bid = blockIdx.x; int id = tid + bid * blockDim.x; for (int dir = 0; dir < DIR_N; ++dir) if (tid < STATE_N) movable_table_shared[tid][dir] = movable_table[tid * DIR_N + dir]; for (int i = 0; i < STATE_N * DIR_N; ++i) if (tid < STATE_N) h_diff_table_shared[tid][i / DIR_N][i % DIR_N] = h_diff_table[tid * STATE_N * DIR_N + i]; __syncthreads(); stack_init(); state_tile_fill(input + id * STATE_N); state_init_hvalue(); if (idas_internal(f_limit)) { plan[id * PLAN_LEN_MAX] = (signed char) stack[tid].i; /* len of plan */ for (uchar i = 0; i < stack[tid].i; ++i) plan[i + 1 + id * PLAN_LEN_MAX] = stack_get(i); } else plan[id * PLAN_LEN_MAX] = NOT_SOLVED; } void idas_parallel_main(uchar *input, signed char *plan, int f_limit, signed char *h_diff_table, bool *movable_table) { (void) assert_direction[0]; (void) assert_direction2[0]; idas_kernel<<<N_BLOCK, N_CORE / N_BLOCK>>>(input, plan, f_limit, h_diff_table, movable_table); }
e2830a5b8d485c64348d4ad7e5922e0c283b394a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from magmablas/ztranspose.cu, normal z -> c, Mon Jun 25 18:24:13 2018 @author Stan Tomov @author Mark Gates */ #include "magma_internal.h" #define PRECISION_c #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) __device__ void ctranspose_device( int m, int n, const magmaFloatComplex *A, int lda, magmaFloatComplex *AT, int ldat) { __shared__ magmaFloatComplex sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; AT += iby + tx + (ibx + ty)*ldat; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = A[j2*lda]; } } } __syncthreads(); // save NB-by-NX subtile from sA into AT i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { AT[i2 + j2*ldat] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; AT += NX*ldat; } } /* kernel wrapper to call the device function. */ __global__ void ctranspose_kernel( int m, int n, const magmaFloatComplex *A, int lda, magmaFloatComplex *AT, int ldat) { ctranspose_device(m, n, A, lda, AT, ldat); } __global__ void ctranspose_kernel_batched( int m, int n, magmaFloatComplex **dA_array, int lda, magmaFloatComplex **dAT_array, int ldat) { int batchid = blockIdx.z; ctranspose_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat); } /***************************************************************************//** Purpose ------- ctranspose copies and transposes a matrix dA to matrix dAT. Same as ctranspose, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA COMPLEX array, dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT COMPLEX array, dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_transpose *******************************************************************************/ extern "C" void magmablas_ctranspose( magma_int_t m, magma_int_t n, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_ptr dAT, magma_int_t lddat, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ) ); hipLaunchKernelGGL(( ctranspose_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dAT, lddat ); } /***************************************************************************//** Purpose ------- ctranspose_batched copies and transposes a matrix dA_array[i] to matrix dAT_array[i]. Same as ctranspose_batched, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA_array COMPLEX* array, dimension (batchCount) array of pointers to the matrices dA, where each dA is of dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT_array COMPLEX* array, dimension (batchCount) array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @param[in] batchCount Number of matrices in dA_array and dAT_array @ingroup magma_transpose_batched *******************************************************************************/ extern "C" void magmablas_ctranspose_batched( magma_int_t m, magma_int_t n, magmaFloatComplex **dA_array, magma_int_t ldda, magmaFloatComplex **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY, 1 ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ), batchCount ); hipLaunchKernelGGL(( ctranspose_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA_array, ldda, dAT_array, lddat ); }
e2830a5b8d485c64348d4ad7e5922e0c283b394a.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from magmablas/ztranspose.cu, normal z -> c, Mon Jun 25 18:24:13 2018 @author Stan Tomov @author Mark Gates */ #include "magma_internal.h" #define PRECISION_c #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) __device__ void ctranspose_device( int m, int n, const magmaFloatComplex *A, int lda, magmaFloatComplex *AT, int ldat) { __shared__ magmaFloatComplex sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; AT += iby + tx + (ibx + ty)*ldat; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = A[j2*lda]; } } } __syncthreads(); // save NB-by-NX subtile from sA into AT i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { AT[i2 + j2*ldat] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; AT += NX*ldat; } } /* kernel wrapper to call the device function. */ __global__ void ctranspose_kernel( int m, int n, const magmaFloatComplex *A, int lda, magmaFloatComplex *AT, int ldat) { ctranspose_device(m, n, A, lda, AT, ldat); } __global__ void ctranspose_kernel_batched( int m, int n, magmaFloatComplex **dA_array, int lda, magmaFloatComplex **dAT_array, int ldat) { int batchid = blockIdx.z; ctranspose_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat); } /***************************************************************************//** Purpose ------- ctranspose copies and transposes a matrix dA to matrix dAT. Same as ctranspose, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA COMPLEX array, dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT COMPLEX array, dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_transpose *******************************************************************************/ extern "C" void magmablas_ctranspose( magma_int_t m, magma_int_t n, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_ptr dAT, magma_int_t lddat, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ) ); ctranspose_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dAT, lddat ); } /***************************************************************************//** Purpose ------- ctranspose_batched copies and transposes a matrix dA_array[i] to matrix dAT_array[i]. Same as ctranspose_batched, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA_array COMPLEX* array, dimension (batchCount) array of pointers to the matrices dA, where each dA is of dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT_array COMPLEX* array, dimension (batchCount) array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @param[in] batchCount Number of matrices in dA_array and dAT_array @ingroup magma_transpose_batched *******************************************************************************/ extern "C" void magmablas_ctranspose_batched( magma_int_t m, magma_int_t n, magmaFloatComplex **dA_array, magma_int_t ldda, magmaFloatComplex **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY, 1 ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ), batchCount ); ctranspose_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA_array, ldda, dAT_array, lddat ); }
6e2662a42f64f13e8baf2709bddd7909ecf45bd2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //---------------------------------*-CUDA-*----------------------------------// // Copyright 2020 UT-Battelle, LLC, and other Celeritas developers. // See the top-level COPYRIGHT file for details. // SPDX-License-Identifier: (Apache-2.0 OR MIT) //---------------------------------------------------------------------------// //! \file DetectorUtils.cu //---------------------------------------------------------------------------// #include "DetectorUtils.hh" #include "base/Atomics.hh" #include "base/KernelParamCalculator.cuda.hh" #include "base/StackAllocatorView.hh" namespace celeritas { namespace detail { namespace { //---------------------------------------------------------------------------// // KERNELS //---------------------------------------------------------------------------// __global__ void bin_buffer_kernel(DetectorPointers const detector) { auto hits = StackAllocatorView<Hit>(detector.hit_buffer).get(); UniformGrid grid(detector.tally_grid); size_type thread_idx = KernelParamCalculator::thread_id().get(); if (thread_idx < hits.size()) { // Find bin const Hit& hit = hits[thread_idx]; real_type z_pos = hit.pos[2]; size_type bin; if (z_pos <= grid.front()) bin = 0; else if (z_pos >= grid.back()) bin = grid.size() - 1; else bin = grid.find(z_pos); // Add energy deposition (NOTE: very slow on arch 600) atomic_add(&detector.tally_deposition[bin], hit.energy_deposited.value()); } } //---------------------------------------------------------------------------// __global__ void normalize_kernel(DetectorPointers const detector, real_type norm) { size_type thread_idx = KernelParamCalculator::thread_id().get(); if (thread_idx < detector.tally_deposition.size()) { detector.tally_deposition[thread_idx] *= norm; } } //---------------------------------------------------------------------------// } // namespace //---------------------------------------------------------------------------// // KERNEL INTERFACES //---------------------------------------------------------------------------// /*! * Bin the buffer into the tally grid. * * The caller will have to clear the buffer after calling this. No * normalization is performed. */ void bin_buffer(const DetectorPointers& detector) { auto params = KernelParamCalculator()(detector.capacity()); hipLaunchKernelGGL(( bin_buffer_kernel), dim3(params.grid_size), dim3(params.block_size), 0, 0, detector); } //---------------------------------------------------------------------------// /*! * Multiply the binned data by the given normalization. */ void normalize(const DetectorPointers& device_ptrs, real_type norm) { auto params = KernelParamCalculator()(device_ptrs.tally_deposition.size()); hipLaunchKernelGGL(( normalize_kernel), dim3(params.grid_size), dim3(params.block_size), 0, 0, device_ptrs, norm); } //---------------------------------------------------------------------------// } // namespace detail } // namespace celeritas
6e2662a42f64f13e8baf2709bddd7909ecf45bd2.cu
//---------------------------------*-CUDA-*----------------------------------// // Copyright 2020 UT-Battelle, LLC, and other Celeritas developers. // See the top-level COPYRIGHT file for details. // SPDX-License-Identifier: (Apache-2.0 OR MIT) //---------------------------------------------------------------------------// //! \file DetectorUtils.cu //---------------------------------------------------------------------------// #include "DetectorUtils.hh" #include "base/Atomics.hh" #include "base/KernelParamCalculator.cuda.hh" #include "base/StackAllocatorView.hh" namespace celeritas { namespace detail { namespace { //---------------------------------------------------------------------------// // KERNELS //---------------------------------------------------------------------------// __global__ void bin_buffer_kernel(DetectorPointers const detector) { auto hits = StackAllocatorView<Hit>(detector.hit_buffer).get(); UniformGrid grid(detector.tally_grid); size_type thread_idx = KernelParamCalculator::thread_id().get(); if (thread_idx < hits.size()) { // Find bin const Hit& hit = hits[thread_idx]; real_type z_pos = hit.pos[2]; size_type bin; if (z_pos <= grid.front()) bin = 0; else if (z_pos >= grid.back()) bin = grid.size() - 1; else bin = grid.find(z_pos); // Add energy deposition (NOTE: very slow on arch 600) atomic_add(&detector.tally_deposition[bin], hit.energy_deposited.value()); } } //---------------------------------------------------------------------------// __global__ void normalize_kernel(DetectorPointers const detector, real_type norm) { size_type thread_idx = KernelParamCalculator::thread_id().get(); if (thread_idx < detector.tally_deposition.size()) { detector.tally_deposition[thread_idx] *= norm; } } //---------------------------------------------------------------------------// } // namespace //---------------------------------------------------------------------------// // KERNEL INTERFACES //---------------------------------------------------------------------------// /*! * Bin the buffer into the tally grid. * * The caller will have to clear the buffer after calling this. No * normalization is performed. */ void bin_buffer(const DetectorPointers& detector) { auto params = KernelParamCalculator()(detector.capacity()); bin_buffer_kernel<<<params.grid_size, params.block_size>>>(detector); } //---------------------------------------------------------------------------// /*! * Multiply the binned data by the given normalization. */ void normalize(const DetectorPointers& device_ptrs, real_type norm) { auto params = KernelParamCalculator()(device_ptrs.tally_deposition.size()); normalize_kernel<<<params.grid_size, params.block_size>>>(device_ptrs, norm); } //---------------------------------------------------------------------------// } // namespace detail } // namespace celeritas
1f5bf7488d8a99dd03f65d634c52eefff38e0d73.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> template <typename T> __global__ void cropAndResizeKernel(const int nthreads, const T* image_ptr, const float* boxes_ptr, int num_boxes, int batch, int image_height, int image_width, int crop_height, int crop_width, int depth, float extrapolation_value, float* crops_ptr) { for (int out_idx = threadIdx.x + blockIdx.x * blockDim.x ; out_idx < nthreads; out_idx += blockDim.x * gridDim.x) { int idx = out_idx; const int x = idx % crop_width; idx /= crop_width; const int y = idx % crop_height; idx /= crop_height; const int d = idx % depth; const int b = idx / depth; const float y1 = boxes_ptr[b * 4]; const float x1 = boxes_ptr[b * 4 + 1]; const float y2 = boxes_ptr[b * 4 + 2]; const float x2 = boxes_ptr[b * 4 + 3]; //each image has num_boxes of boxes, so we simply divide to get the box index. const int b_in = b / num_boxes; if (b_in < 0 || b_in >= batch) { continue; } const float height_scale = (crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1) : 0; const float width_scale = (crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1) : 0; const float in_y = (crop_height > 1) ? y1 * (image_height - 1) + y * height_scale : 0.5 * (y1 + y2) * (image_height - 1); if (in_y < 0 || in_y > image_height - 1) { crops_ptr[out_idx] = extrapolation_value; continue; } const float in_x = (crop_width > 1) ? x1 * (image_width - 1) + x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (in_x < 0 || in_x > image_width - 1) { crops_ptr[out_idx] = extrapolation_value; continue; } const int top_y_index = floorf(in_y); const int bottom_y_index = ceilf(in_y); const float y_lerp = in_y - top_y_index; const int left_x_index = floorf(in_x); const int right_x_index = ceilf(in_x); const float x_lerp = in_x - left_x_index; const float top_left(static_cast<float>( image_ptr[((b_in * depth + d) * image_height + top_y_index) * image_width + left_x_index])); const float top_right(static_cast<float>( image_ptr[((b_in * depth + d) * image_height + top_y_index) * image_width + right_x_index])); const float bottom_left(static_cast<float>( image_ptr[((b_in * depth + d) * image_height + bottom_y_index) * image_width + left_x_index])); const float bottom_right(static_cast<float>( image_ptr[((b_in * depth + d) * image_height + bottom_y_index) * image_width + right_x_index])); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; crops_ptr[out_idx] = top + (bottom - top) * y_lerp; } } int cropAndResizeInference( hipStream_t stream, int n, const void* image, const void* rois, int batch_size, int input_height, int input_width, int num_boxes, int crop_height, int crop_width, int depth, void* output) { int output_volume = batch_size * num_boxes * crop_height * crop_width * depth; int block_size = 1024; int grid_size = (output_volume + block_size - 1 ) / block_size; hipLaunchKernelGGL(( cropAndResizeKernel<float>) , dim3(grid_size), dim3(block_size), 0, stream, output_volume, static_cast<const float*>(image), static_cast<const float*>(rois), num_boxes, batch_size, input_height, input_width, crop_height, crop_width, depth, 0.0f, static_cast<float*>(output)); return 0; }
1f5bf7488d8a99dd03f65d634c52eefff38e0d73.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda.h> #include <cuda_runtime.h> template <typename T> __global__ void cropAndResizeKernel(const int nthreads, const T* image_ptr, const float* boxes_ptr, int num_boxes, int batch, int image_height, int image_width, int crop_height, int crop_width, int depth, float extrapolation_value, float* crops_ptr) { for (int out_idx = threadIdx.x + blockIdx.x * blockDim.x ; out_idx < nthreads; out_idx += blockDim.x * gridDim.x) { int idx = out_idx; const int x = idx % crop_width; idx /= crop_width; const int y = idx % crop_height; idx /= crop_height; const int d = idx % depth; const int b = idx / depth; const float y1 = boxes_ptr[b * 4]; const float x1 = boxes_ptr[b * 4 + 1]; const float y2 = boxes_ptr[b * 4 + 2]; const float x2 = boxes_ptr[b * 4 + 3]; //each image has num_boxes of boxes, so we simply divide to get the box index. const int b_in = b / num_boxes; if (b_in < 0 || b_in >= batch) { continue; } const float height_scale = (crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1) : 0; const float width_scale = (crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1) : 0; const float in_y = (crop_height > 1) ? y1 * (image_height - 1) + y * height_scale : 0.5 * (y1 + y2) * (image_height - 1); if (in_y < 0 || in_y > image_height - 1) { crops_ptr[out_idx] = extrapolation_value; continue; } const float in_x = (crop_width > 1) ? x1 * (image_width - 1) + x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (in_x < 0 || in_x > image_width - 1) { crops_ptr[out_idx] = extrapolation_value; continue; } const int top_y_index = floorf(in_y); const int bottom_y_index = ceilf(in_y); const float y_lerp = in_y - top_y_index; const int left_x_index = floorf(in_x); const int right_x_index = ceilf(in_x); const float x_lerp = in_x - left_x_index; const float top_left(static_cast<float>( image_ptr[((b_in * depth + d) * image_height + top_y_index) * image_width + left_x_index])); const float top_right(static_cast<float>( image_ptr[((b_in * depth + d) * image_height + top_y_index) * image_width + right_x_index])); const float bottom_left(static_cast<float>( image_ptr[((b_in * depth + d) * image_height + bottom_y_index) * image_width + left_x_index])); const float bottom_right(static_cast<float>( image_ptr[((b_in * depth + d) * image_height + bottom_y_index) * image_width + right_x_index])); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; crops_ptr[out_idx] = top + (bottom - top) * y_lerp; } } int cropAndResizeInference( cudaStream_t stream, int n, const void* image, const void* rois, int batch_size, int input_height, int input_width, int num_boxes, int crop_height, int crop_width, int depth, void* output) { int output_volume = batch_size * num_boxes * crop_height * crop_width * depth; int block_size = 1024; int grid_size = (output_volume + block_size - 1 ) / block_size; cropAndResizeKernel<float> <<< grid_size, block_size, 0, stream>>>(output_volume, static_cast<const float*>(image), static_cast<const float*>(rois), num_boxes, batch_size, input_height, input_width, crop_height, crop_width, depth, 0.0f, static_cast<float*>(output)); return 0; }
0a9d3474e757d18df0a976f741effb12d8f50f0a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "segment_csr_cuda.h" #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include "index_info.cuh" #include "reducer.cuh" #include "utils.cuh" #define THREADS 256 #define BLOCKS(TB, N) (TB * N + THREADS - 1) / THREADS #define FULL_MASK 0xffffffff template<typename T> __device__ __forceinline__ T ldg(const T* ptr) { #if __CUDA_ARCH__ >= 350 return __ldg(ptr); #else return *ptr; #endif } template <typename scalar_t, SegmentReductionType REDUCE, int TB> __global__ void segment_csr_kernel(const scalar_t *src_data, const at::cuda::detail::TensorInfo<int64_t, int> indptr_info, scalar_t *out_data, int64_t *arg_out_data, size_t N, size_t E) { // Each warp processes exactly `32/TB` rows and aggregates all row values // via a parallel reduction. int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; int row_idx = thread_idx / TB; int lane_idx = thread_idx & (TB - 1); if (row_idx < N) { int offset = IndexPtrToOffset<int64_t>::get(row_idx, indptr_info); int64_t row_start = ldg(indptr_info.data + offset); int64_t row_end = ldg(indptr_info.data + offset + indptr_info.strides[indptr_info.dims - 1]); scalar_t val = Reducer<scalar_t, REDUCE>::init(); int64_t arg, arg_tmp; offset = (row_idx / (indptr_info.sizes[indptr_info.dims - 1] - 1)) * E; for (int64_t src_idx = row_start + lane_idx; src_idx < row_end; src_idx += TB) { Reducer<scalar_t, REDUCE>::update(&val, src_data[offset + src_idx], &arg, src_idx); } #pragma unroll for (int i = TB / 2; i > 0; i /= 2) { // Parallel reduction inside a single warp. if (REDUCE == MIN || REDUCE == MAX) arg_tmp = __shfl_down_sync(FULL_MASK, arg, i); Reducer<scalar_t, REDUCE>::update( &val, __shfl_down_sync(FULL_MASK, val, i), &arg, arg_tmp); } if (lane_idx == 0) { Reducer<scalar_t, REDUCE>::write(out_data + row_idx, val, arg_out_data + row_idx, arg, row_end - row_start); } } } template <typename scalar_t, SegmentReductionType REDUCE> __global__ void segment_csr_broadcast_kernel( const scalar_t *src_data, const at::cuda::detail::TensorInfo<int64_t, int> indptr_info, scalar_t *out_data, int64_t *arg_out_data, size_t N, size_t K, size_t E) { // Each thread processes exactly one row. It turned out that is more // efficient than using shared memory due to avoiding synchronization // barriers. int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; int row_idx = thread_idx / K; int lane_idx = thread_idx % K; if (thread_idx < N * K) { int offset = IndexPtrToOffset<int64_t>::get(row_idx, indptr_info); int64_t row_start = ldg(indptr_info.data + offset); int64_t row_end = ldg(indptr_info.data + offset + indptr_info.strides[indptr_info.dims - 1]); scalar_t val = Reducer<scalar_t, REDUCE>::init(); int64_t arg; offset = (row_idx / (indptr_info.sizes[indptr_info.dims - 1] - 1)) * E * K; for (int64_t src_idx = row_start; src_idx < row_end; src_idx++) { Reducer<scalar_t, REDUCE>::update( &val, src_data[offset + K * src_idx + lane_idx], &arg, src_idx); } Reducer<scalar_t, REDUCE>::write(out_data + thread_idx, val, arg_out_data + thread_idx, arg, row_end - row_start); } } std::tuple<torch::Tensor, torch::optional<torch::Tensor>> segment_csr_cuda(torch::Tensor src, torch::Tensor indptr, torch::optional<torch::Tensor> optional_out, std::string reduce) { CHECK_CUDA(src); CHECK_CUDA(indptr); if (optional_out.has_value()) CHECK_CUDA(optional_out.value()); hipSetDevice(src.get_device()); CHECK_INPUT(src.dim() >= indptr.dim()); auto sizes = indptr.sizes().vec(); for (auto i = 0; i < indptr.dim() - 1; i++) sizes[i] = src.size(i); indptr = indptr.expand(sizes); auto dim = indptr.dim() - 1; src = src.contiguous(); torch::Tensor out; if (optional_out.has_value()) { out = optional_out.value().contiguous(); for (int i = 0; i < out.dim(); i++) if (i != dim) CHECK_INPUT(src.size(i) == out.size(i)); CHECK_INPUT(src.numel() == 0 || out.size(dim) == indptr.size(dim) - 1); } else { sizes = src.sizes().vec(); sizes[dim] = std::max<int64_t>(indptr.size(dim) - 1, 0); out = torch::empty(sizes, src.options()); } torch::optional<torch::Tensor> arg_out = torch::nullopt; int64_t *arg_out_data = nullptr; if (reduce2REDUCE.at(reduce) == MIN || reduce2REDUCE.at(reduce) == MAX) { arg_out = torch::full(out.sizes(), src.size(dim), indptr.options()); arg_out_data = arg_out.value().data_ptr<int64_t>(); } if (src.numel() == 0) { if (!optional_out.has_value()) out.fill_(0); return std::make_tuple(out, arg_out); } auto N = out.size(dim) * (indptr.numel() / indptr.size(-1)); auto K = out.numel() / N; auto E = src.size(dim); auto indptr_info = at::cuda::detail::getTensorInfo<int64_t, int>(indptr); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_ALL_TYPES(src.scalar_type(), "_", [&] { auto src_data = src.data_ptr<scalar_t>(); auto out_data = out.data_ptr<scalar_t>(); AT_DISPATCH_REDUCTION_TYPES(reduce, [&] { if (K == 1) { hipLaunchKernelGGL(( segment_csr_kernel<scalar_t, REDUCE, 1>) , dim3(BLOCKS(32, N)), dim3(THREADS), 0, stream, src_data, indptr_info, out_data, arg_out_data, N, E); } else { hipLaunchKernelGGL(( segment_csr_broadcast_kernel<scalar_t, REDUCE>) , dim3(BLOCKS(1, N * K)), dim3(THREADS), 0, stream, src_data, indptr_info, out_data, arg_out_data, N, K, E); } }); }); return std::make_tuple(out, arg_out); } template <typename scalar_t, int TB> __global__ void gather_csr_kernel(const scalar_t *src_data, const at::cuda::detail::TensorInfo<int64_t, int> indptr_info, scalar_t *out_data, size_t N, size_t E) { int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; int row_idx = thread_idx / TB; int lane_idx = thread_idx % TB; if (row_idx < N) { int offset = IndexPtrToOffset<int64_t>::get(row_idx, indptr_info); int row_start = ldg(indptr_info.data + offset); int row_end = ldg(indptr_info.data + offset + indptr_info.strides[indptr_info.dims - 1]); scalar_t val = ldg(src_data + row_idx); offset = (row_idx / (indptr_info.sizes[indptr_info.dims - 1] - 1)) * E; for (int out_idx = row_start + lane_idx; out_idx < row_end; out_idx += TB) { out_data[offset + out_idx] = val; // "Mostly" coalesced. } } } template <typename scalar_t> __global__ void gather_csr_broadcast_kernel( const scalar_t *src_data, const at::cuda::detail::TensorInfo<int64_t, int> indptr_info, scalar_t *out_data, size_t N, size_t K, size_t E) { int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; int row_idx = thread_idx / K; int lane_idx = thread_idx % K; if (thread_idx < N * K) { int offset = IndexPtrToOffset<int64_t>::get(row_idx, indptr_info); int row_start = ldg(indptr_info.data + offset); int row_end = ldg(indptr_info.data + offset + indptr_info.strides[indptr_info.dims - 1]); scalar_t val = src_data[thread_idx]; // Coalesced. offset = (row_idx / (indptr_info.sizes[indptr_info.dims - 1] - 1)) * E * K; for (int out_idx = row_start; out_idx < row_end; out_idx++) { out_data[offset + K * out_idx + lane_idx] = val; // "Mostly" coalesced. } } } torch::Tensor gather_csr_cuda(torch::Tensor src, torch::Tensor indptr, torch::optional<torch::Tensor> optional_out) { CHECK_CUDA(src); CHECK_CUDA(indptr); if (optional_out.has_value()) CHECK_CUDA(optional_out.value()); hipSetDevice(src.get_device()); CHECK_INPUT(src.dim() >= indptr.dim()); auto sizes = indptr.sizes().vec(); for (auto i = 0; i < indptr.dim() - 1; i++) sizes[i] = src.size(i); indptr = indptr.expand(sizes); auto dim = indptr.dim() - 1; CHECK_INPUT(src.size(dim) == 0 || src.size(dim) == indptr.size(dim) - 1); src = src.contiguous(); torch::Tensor out; if (optional_out.has_value()) { out = optional_out.value().contiguous(); for (auto i = 0; i < out.dim(); i++) if (i != dim) CHECK_INPUT(src.size(i) == out.size(i)); } else { auto sizes = src.sizes().vec(); if (src.numel() > 0) { sizes[dim] = indptr.flatten()[-1].cpu().data_ptr<int64_t>()[0]; } else { sizes[dim] = 0; } out = torch::empty(sizes, src.options()); } if (src.numel() == 0) { if (!optional_out.has_value()) out.fill_(0); return out; } auto N = src.size(dim) * (indptr.numel() / indptr.size(-1)); auto K = src.numel() / N; auto E = out.size(dim); auto indptr_info = at::cuda::detail::getTensorInfo<int64_t, int>(indptr); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_ALL_TYPES(src.scalar_type(), "_", [&] { auto src_data = src.data_ptr<scalar_t>(); auto out_data = out.data_ptr<scalar_t>(); if (K == 1) hipLaunchKernelGGL(( gather_csr_kernel<scalar_t, 4>), dim3(BLOCKS(1, 4 * N)), dim3(THREADS), 0, stream, src_data, indptr_info, out_data, N, E); else hipLaunchKernelGGL(( gather_csr_broadcast_kernel<scalar_t>) , dim3(BLOCKS(1, N * K)), dim3(THREADS), 0, stream, src_data, indptr_info, out_data, N, K, E); }); return out; }
0a9d3474e757d18df0a976f741effb12d8f50f0a.cu
#include "segment_csr_cuda.h" #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include "index_info.cuh" #include "reducer.cuh" #include "utils.cuh" #define THREADS 256 #define BLOCKS(TB, N) (TB * N + THREADS - 1) / THREADS #define FULL_MASK 0xffffffff template<typename T> __device__ __forceinline__ T ldg(const T* ptr) { #if __CUDA_ARCH__ >= 350 return __ldg(ptr); #else return *ptr; #endif } template <typename scalar_t, SegmentReductionType REDUCE, int TB> __global__ void segment_csr_kernel(const scalar_t *src_data, const at::cuda::detail::TensorInfo<int64_t, int> indptr_info, scalar_t *out_data, int64_t *arg_out_data, size_t N, size_t E) { // Each warp processes exactly `32/TB` rows and aggregates all row values // via a parallel reduction. int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; int row_idx = thread_idx / TB; int lane_idx = thread_idx & (TB - 1); if (row_idx < N) { int offset = IndexPtrToOffset<int64_t>::get(row_idx, indptr_info); int64_t row_start = ldg(indptr_info.data + offset); int64_t row_end = ldg(indptr_info.data + offset + indptr_info.strides[indptr_info.dims - 1]); scalar_t val = Reducer<scalar_t, REDUCE>::init(); int64_t arg, arg_tmp; offset = (row_idx / (indptr_info.sizes[indptr_info.dims - 1] - 1)) * E; for (int64_t src_idx = row_start + lane_idx; src_idx < row_end; src_idx += TB) { Reducer<scalar_t, REDUCE>::update(&val, src_data[offset + src_idx], &arg, src_idx); } #pragma unroll for (int i = TB / 2; i > 0; i /= 2) { // Parallel reduction inside a single warp. if (REDUCE == MIN || REDUCE == MAX) arg_tmp = __shfl_down_sync(FULL_MASK, arg, i); Reducer<scalar_t, REDUCE>::update( &val, __shfl_down_sync(FULL_MASK, val, i), &arg, arg_tmp); } if (lane_idx == 0) { Reducer<scalar_t, REDUCE>::write(out_data + row_idx, val, arg_out_data + row_idx, arg, row_end - row_start); } } } template <typename scalar_t, SegmentReductionType REDUCE> __global__ void segment_csr_broadcast_kernel( const scalar_t *src_data, const at::cuda::detail::TensorInfo<int64_t, int> indptr_info, scalar_t *out_data, int64_t *arg_out_data, size_t N, size_t K, size_t E) { // Each thread processes exactly one row. It turned out that is more // efficient than using shared memory due to avoiding synchronization // barriers. int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; int row_idx = thread_idx / K; int lane_idx = thread_idx % K; if (thread_idx < N * K) { int offset = IndexPtrToOffset<int64_t>::get(row_idx, indptr_info); int64_t row_start = ldg(indptr_info.data + offset); int64_t row_end = ldg(indptr_info.data + offset + indptr_info.strides[indptr_info.dims - 1]); scalar_t val = Reducer<scalar_t, REDUCE>::init(); int64_t arg; offset = (row_idx / (indptr_info.sizes[indptr_info.dims - 1] - 1)) * E * K; for (int64_t src_idx = row_start; src_idx < row_end; src_idx++) { Reducer<scalar_t, REDUCE>::update( &val, src_data[offset + K * src_idx + lane_idx], &arg, src_idx); } Reducer<scalar_t, REDUCE>::write(out_data + thread_idx, val, arg_out_data + thread_idx, arg, row_end - row_start); } } std::tuple<torch::Tensor, torch::optional<torch::Tensor>> segment_csr_cuda(torch::Tensor src, torch::Tensor indptr, torch::optional<torch::Tensor> optional_out, std::string reduce) { CHECK_CUDA(src); CHECK_CUDA(indptr); if (optional_out.has_value()) CHECK_CUDA(optional_out.value()); cudaSetDevice(src.get_device()); CHECK_INPUT(src.dim() >= indptr.dim()); auto sizes = indptr.sizes().vec(); for (auto i = 0; i < indptr.dim() - 1; i++) sizes[i] = src.size(i); indptr = indptr.expand(sizes); auto dim = indptr.dim() - 1; src = src.contiguous(); torch::Tensor out; if (optional_out.has_value()) { out = optional_out.value().contiguous(); for (int i = 0; i < out.dim(); i++) if (i != dim) CHECK_INPUT(src.size(i) == out.size(i)); CHECK_INPUT(src.numel() == 0 || out.size(dim) == indptr.size(dim) - 1); } else { sizes = src.sizes().vec(); sizes[dim] = std::max<int64_t>(indptr.size(dim) - 1, 0); out = torch::empty(sizes, src.options()); } torch::optional<torch::Tensor> arg_out = torch::nullopt; int64_t *arg_out_data = nullptr; if (reduce2REDUCE.at(reduce) == MIN || reduce2REDUCE.at(reduce) == MAX) { arg_out = torch::full(out.sizes(), src.size(dim), indptr.options()); arg_out_data = arg_out.value().data_ptr<int64_t>(); } if (src.numel() == 0) { if (!optional_out.has_value()) out.fill_(0); return std::make_tuple(out, arg_out); } auto N = out.size(dim) * (indptr.numel() / indptr.size(-1)); auto K = out.numel() / N; auto E = src.size(dim); auto indptr_info = at::cuda::detail::getTensorInfo<int64_t, int>(indptr); auto stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_ALL_TYPES(src.scalar_type(), "_", [&] { auto src_data = src.data_ptr<scalar_t>(); auto out_data = out.data_ptr<scalar_t>(); AT_DISPATCH_REDUCTION_TYPES(reduce, [&] { if (K == 1) { segment_csr_kernel<scalar_t, REDUCE, 1> <<<BLOCKS(32, N), THREADS, 0, stream>>>( src_data, indptr_info, out_data, arg_out_data, N, E); } else { segment_csr_broadcast_kernel<scalar_t, REDUCE> <<<BLOCKS(1, N * K), THREADS, 0, stream>>>( src_data, indptr_info, out_data, arg_out_data, N, K, E); } }); }); return std::make_tuple(out, arg_out); } template <typename scalar_t, int TB> __global__ void gather_csr_kernel(const scalar_t *src_data, const at::cuda::detail::TensorInfo<int64_t, int> indptr_info, scalar_t *out_data, size_t N, size_t E) { int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; int row_idx = thread_idx / TB; int lane_idx = thread_idx % TB; if (row_idx < N) { int offset = IndexPtrToOffset<int64_t>::get(row_idx, indptr_info); int row_start = ldg(indptr_info.data + offset); int row_end = ldg(indptr_info.data + offset + indptr_info.strides[indptr_info.dims - 1]); scalar_t val = ldg(src_data + row_idx); offset = (row_idx / (indptr_info.sizes[indptr_info.dims - 1] - 1)) * E; for (int out_idx = row_start + lane_idx; out_idx < row_end; out_idx += TB) { out_data[offset + out_idx] = val; // "Mostly" coalesced. } } } template <typename scalar_t> __global__ void gather_csr_broadcast_kernel( const scalar_t *src_data, const at::cuda::detail::TensorInfo<int64_t, int> indptr_info, scalar_t *out_data, size_t N, size_t K, size_t E) { int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; int row_idx = thread_idx / K; int lane_idx = thread_idx % K; if (thread_idx < N * K) { int offset = IndexPtrToOffset<int64_t>::get(row_idx, indptr_info); int row_start = ldg(indptr_info.data + offset); int row_end = ldg(indptr_info.data + offset + indptr_info.strides[indptr_info.dims - 1]); scalar_t val = src_data[thread_idx]; // Coalesced. offset = (row_idx / (indptr_info.sizes[indptr_info.dims - 1] - 1)) * E * K; for (int out_idx = row_start; out_idx < row_end; out_idx++) { out_data[offset + K * out_idx + lane_idx] = val; // "Mostly" coalesced. } } } torch::Tensor gather_csr_cuda(torch::Tensor src, torch::Tensor indptr, torch::optional<torch::Tensor> optional_out) { CHECK_CUDA(src); CHECK_CUDA(indptr); if (optional_out.has_value()) CHECK_CUDA(optional_out.value()); cudaSetDevice(src.get_device()); CHECK_INPUT(src.dim() >= indptr.dim()); auto sizes = indptr.sizes().vec(); for (auto i = 0; i < indptr.dim() - 1; i++) sizes[i] = src.size(i); indptr = indptr.expand(sizes); auto dim = indptr.dim() - 1; CHECK_INPUT(src.size(dim) == 0 || src.size(dim) == indptr.size(dim) - 1); src = src.contiguous(); torch::Tensor out; if (optional_out.has_value()) { out = optional_out.value().contiguous(); for (auto i = 0; i < out.dim(); i++) if (i != dim) CHECK_INPUT(src.size(i) == out.size(i)); } else { auto sizes = src.sizes().vec(); if (src.numel() > 0) { sizes[dim] = indptr.flatten()[-1].cpu().data_ptr<int64_t>()[0]; } else { sizes[dim] = 0; } out = torch::empty(sizes, src.options()); } if (src.numel() == 0) { if (!optional_out.has_value()) out.fill_(0); return out; } auto N = src.size(dim) * (indptr.numel() / indptr.size(-1)); auto K = src.numel() / N; auto E = out.size(dim); auto indptr_info = at::cuda::detail::getTensorInfo<int64_t, int>(indptr); auto stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_ALL_TYPES(src.scalar_type(), "_", [&] { auto src_data = src.data_ptr<scalar_t>(); auto out_data = out.data_ptr<scalar_t>(); if (K == 1) gather_csr_kernel<scalar_t, 4><<<BLOCKS(1, 4 * N), THREADS, 0, stream>>>( src_data, indptr_info, out_data, N, E); else gather_csr_broadcast_kernel<scalar_t> <<<BLOCKS(1, N * K), THREADS, 0, stream>>>(src_data, indptr_info, out_data, N, K, E); }); return out; }
3b881b38d373a91b7e078460dcb2e3845fecf374.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from magmablas/zlaswp_batched.cu, normal z -> c, Thu Oct 8 23:05:38 2020 @author Azzam Haidar @author Tingxing Dong */ #include "magma_internal.h" #include "batched_kernel_param.h" #define BLK_SIZE 256 #define CLASWP_COL_NTH 32 // SWP_WIDTH is number of threads in a block // 64 and 256 are better on Kepler; extern __shared__ magmaFloatComplex shared_data[]; /******************************************************************************/ static __device__ void claswp_rowparallel_devfunc( int n, int width, int height, magmaFloatComplex *dA, int lda, magmaFloatComplex *dout, int ldo, magma_int_t* pivinfo) { //int height = k2- k1; //int height = blockDim.x; unsigned int tid = threadIdx.x; dA += SWP_WIDTH * blockIdx.x * lda; dout += SWP_WIDTH * blockIdx.x * ldo; magmaFloatComplex *sdata = shared_data; if (blockIdx.x == gridDim.x -1) { width = n - blockIdx.x * SWP_WIDTH; } if (tid < height) { int mynewroworig = pivinfo[tid]-1; //-1 to get the index in C int itsreplacement = pivinfo[mynewroworig] -1; //-1 to get the index in C //printf("%d: mynewroworig = %d, itsreplacement = %d\n", tid, mynewroworig, itsreplacement); #pragma unroll for (int i=0; i < width; i++) { sdata[ tid + i * height ] = dA[ mynewroworig + i * lda ]; dA[ mynewroworig + i * lda ] = dA[ itsreplacement + i * lda ]; } } __syncthreads(); if (tid < height) { // copy back the upper swapped portion of A to dout #pragma unroll for (int i=0; i < width; i++) { dout[tid + i * ldo] = sdata[tid + i * height]; } } } /******************************************************************************/ // parallel swap the swaped dA(1:nb,i:n) is stored in dout __global__ void claswp_rowparallel_kernel( int n, int width, int height, magmaFloatComplex *dinput, int ldi, magmaFloatComplex *doutput, int ldo, magma_int_t* pivinfo) { claswp_rowparallel_devfunc(n, width, height, dinput, ldi, doutput, ldo, pivinfo); } /******************************************************************************/ __global__ void claswp_rowparallel_kernel_batched( int n, int width, int height, magmaFloatComplex **input_array, int input_i, int input_j, int ldi, magmaFloatComplex **output_array, int output_i, int output_j, int ldo, magma_int_t** pivinfo_array) { int batchid = blockIdx.z; claswp_rowparallel_devfunc( n, width, height, input_array[batchid] + input_j * ldi + input_i, ldi, output_array[batchid] + output_j * ldo + output_i, ldo, pivinfo_array[batchid]); } /******************************************************************************/ extern "C" void magma_claswp_rowparallel_batched( magma_int_t n, magmaFloatComplex** input_array, magma_int_t input_i, magma_int_t input_j, magma_int_t ldi, magmaFloatComplex** output_array, magma_int_t output_i, magma_int_t output_j, magma_int_t ldo, magma_int_t k1, magma_int_t k2, magma_int_t **pivinfo_array, magma_int_t batchCount, magma_queue_t queue) { #define input_array(i,j) input_array, i, j #define output_array(i,j) output_array, i, j if (n == 0 ) return; int height = k2-k1; if ( height > 1024) { fprintf( stderr, "%s: n=%lld > 1024, not supported\n", __func__, (long long) n ); } int blocks = magma_ceildiv( n, SWP_WIDTH ); magma_int_t max_batchCount = queue->get_maxBatch(); for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) { magma_int_t ibatch = min(max_batchCount, batchCount-i); dim3 grid(blocks, 1, ibatch); if ( n < SWP_WIDTH) { size_t shmem = sizeof(magmaFloatComplex) * height * n; hipLaunchKernelGGL(( claswp_rowparallel_kernel_batched) , dim3(grid), dim3(height), shmem, queue->cuda_stream() , n, n, height, input_array+i, input_i, input_j, ldi, output_array+i, output_i, output_j, ldo, pivinfo_array+i ); } else { size_t shmem = sizeof(magmaFloatComplex) * height * SWP_WIDTH; hipLaunchKernelGGL(( claswp_rowparallel_kernel_batched) , dim3(grid), dim3(height), shmem, queue->cuda_stream() , n, SWP_WIDTH, height, input_array+i, input_i, input_j, ldi, output_array+i, output_i, output_j, ldo, pivinfo_array+i ); } } #undef input_array #undef output_attay } /******************************************************************************/ extern "C" void magma_claswp_rowparallel_native( magma_int_t n, magmaFloatComplex* input, magma_int_t ldi, magmaFloatComplex* output, magma_int_t ldo, magma_int_t k1, magma_int_t k2, magma_int_t *pivinfo, magma_queue_t queue) { if (n == 0 ) return; int height = k2-k1; if ( height > MAX_NTHREADS) { fprintf( stderr, "%s: height=%lld > %lld, magma_claswp_rowparallel_q not supported\n", __func__, (long long) n, (long long) MAX_NTHREADS ); } int blocks = magma_ceildiv( n, SWP_WIDTH ); dim3 grid(blocks, 1, 1); if ( n < SWP_WIDTH) { size_t shmem = sizeof(magmaFloatComplex) * height * n; hipLaunchKernelGGL(( claswp_rowparallel_kernel) , dim3(grid), dim3(height), shmem, queue->cuda_stream() , n, n, height, input, ldi, output, ldo, pivinfo ); } else { size_t shmem = sizeof(magmaFloatComplex) * height * SWP_WIDTH; hipLaunchKernelGGL(( claswp_rowparallel_kernel) , dim3(grid), dim3(height), shmem, queue->cuda_stream() , n, SWP_WIDTH, height, input, ldi, output, ldo, pivinfo ); } } /******************************************************************************/ // serial swap that does swapping one row by one row __global__ void claswp_rowserial_kernel_batched( int n, magmaFloatComplex **dA_array, int lda, int k1, int k2, magma_int_t** ipiv_array ) { magmaFloatComplex* dA = dA_array[blockIdx.z]; magma_int_t *dipiv = ipiv_array[blockIdx.z]; unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; k1--; k2--; if (tid < n) { magmaFloatComplex A1; for (int i1 = k1; i1 < k2; i1++) { int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2 if ( i2 != i1) { A1 = dA[i1 + tid * lda]; dA[i1 + tid * lda] = dA[i2 + tid * lda]; dA[i2 + tid * lda] = A1; } } } } /******************************************************************************/ // serial swap that does swapping one row by one row __global__ void claswp_rowserial_kernel_native( int n, magmaFloatComplex_ptr dA, int lda, int k1, int k2, magma_int_t* dipiv ) { unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; //k1--; //k2--; if (tid < n) { magmaFloatComplex A1; for (int i1 = k1; i1 < k2; i1++) { int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2 if ( i2 != i1) { A1 = dA[i1 + tid * lda]; dA[i1 + tid * lda] = dA[i2 + tid * lda]; dA[i2 + tid * lda] = A1; } } } } /******************************************************************************/ // serial swap that does swapping one row by one row, similar to LAPACK // K1, K2 are in Fortran indexing extern "C" void magma_claswp_rowserial_batched(magma_int_t n, magmaFloatComplex** dA_array, magma_int_t lda, magma_int_t k1, magma_int_t k2, magma_int_t **ipiv_array, magma_int_t batchCount, magma_queue_t queue) { if (n == 0) return; int blocks = magma_ceildiv( n, BLK_SIZE ); magma_int_t max_batchCount = queue->get_maxBatch(); for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) { magma_int_t ibatch = min(max_batchCount, batchCount-i); dim3 grid(blocks, 1, ibatch); hipLaunchKernelGGL(( claswp_rowserial_kernel_batched) , dim3(grid), dim3(max(BLK_SIZE, n)), 0, queue->cuda_stream() , n, dA_array+i, lda, k1, k2, ipiv_array+i); } } /******************************************************************************/ // serial swap that does swapping one row by one row, similar to LAPACK // K1, K2 are in Fortran indexing extern "C" void magma_claswp_rowserial_native(magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t lda, magma_int_t k1, magma_int_t k2, magma_int_t* dipiv, magma_queue_t queue) { if (n == 0) return; int blocks = magma_ceildiv( n, BLK_SIZE ); dim3 grid(blocks, 1, 1); hipLaunchKernelGGL(( claswp_rowserial_kernel_native) , dim3(grid), dim3(max(BLK_SIZE, n)), 0, queue->cuda_stream() , n, dA, lda, k1, k2, dipiv); } /******************************************************************************/ // serial swap that does swapping one column by one column __device__ void claswp_columnserial_devfunc(int n, magmaFloatComplex_ptr dA, int lda, int k1, int k2, magma_int_t* dipiv ) { unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; k1--; k2--; if ( k1 < 0 || k2 < 0 ) return; if ( tid < n) { magmaFloatComplex A1; if (k1 <= k2) { for (int i1 = k1; i1 <= k2; i1++) { int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2 if ( i2 != i1) { A1 = dA[i1 * lda + tid]; dA[i1 * lda + tid] = dA[i2 * lda + tid]; dA[i2 * lda + tid] = A1; } } } else { for (int i1 = k1; i1 >= k2; i1--) { int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2 if ( i2 != i1) { A1 = dA[i1 * lda + tid]; dA[i1 * lda + tid] = dA[i2 * lda + tid]; dA[i2 * lda + tid] = A1; } } } } } __global__ void claswp_columnserial_kernel_batched( int n, magmaFloatComplex **dA_array, int lda, int k1, int k2, magma_int_t** ipiv_array ) { magmaFloatComplex* dA = dA_array[blockIdx.z]; magma_int_t *dipiv = ipiv_array[blockIdx.z]; claswp_columnserial_devfunc(n, dA, lda, k1, k2, dipiv); } __global__ void claswp_columnserial_kernel( int n, magmaFloatComplex_ptr dA, int lda, int k1, int k2, magma_int_t* dipiv ) { claswp_columnserial_devfunc(n, dA, lda, k1, k2, dipiv); } /******************************************************************************/ // serial swap that does swapping one column by one column // K1, K2 are in Fortran indexing extern "C" void magma_claswp_columnserial( magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t lda, magma_int_t k1, magma_int_t k2, magma_int_t *dipiv, magma_queue_t queue) { if (n == 0 ) return; int blocks = magma_ceildiv( n, CLASWP_COL_NTH ); dim3 grid(blocks, 1, 1); hipLaunchKernelGGL(( claswp_columnserial_kernel), dim3(grid), dim3(CLASWP_COL_NTH), 0, queue->cuda_stream() , n, dA, lda, k1, k2, dipiv); } extern "C" void magma_claswp_columnserial_batched(magma_int_t n, magmaFloatComplex** dA_array, magma_int_t lda, magma_int_t k1, magma_int_t k2, magma_int_t **ipiv_array, magma_int_t batchCount, magma_queue_t queue) { if (n == 0 ) return; int blocks = magma_ceildiv( n, CLASWP_COL_NTH ); magma_int_t max_batchCount = queue->get_maxBatch(); for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) { magma_int_t ibatch = min(max_batchCount, batchCount-i); dim3 grid(blocks, 1, ibatch); hipLaunchKernelGGL(( claswp_columnserial_kernel_batched) , dim3(grid), dim3(min(CLASWP_COL_NTH,n)), 0, queue->cuda_stream() , n, dA_array+i, lda, k1, k2, ipiv_array+i); } }
3b881b38d373a91b7e078460dcb2e3845fecf374.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from magmablas/zlaswp_batched.cu, normal z -> c, Thu Oct 8 23:05:38 2020 @author Azzam Haidar @author Tingxing Dong */ #include "magma_internal.h" #include "batched_kernel_param.h" #define BLK_SIZE 256 #define CLASWP_COL_NTH 32 // SWP_WIDTH is number of threads in a block // 64 and 256 are better on Kepler; extern __shared__ magmaFloatComplex shared_data[]; /******************************************************************************/ static __device__ void claswp_rowparallel_devfunc( int n, int width, int height, magmaFloatComplex *dA, int lda, magmaFloatComplex *dout, int ldo, magma_int_t* pivinfo) { //int height = k2- k1; //int height = blockDim.x; unsigned int tid = threadIdx.x; dA += SWP_WIDTH * blockIdx.x * lda; dout += SWP_WIDTH * blockIdx.x * ldo; magmaFloatComplex *sdata = shared_data; if (blockIdx.x == gridDim.x -1) { width = n - blockIdx.x * SWP_WIDTH; } if (tid < height) { int mynewroworig = pivinfo[tid]-1; //-1 to get the index in C int itsreplacement = pivinfo[mynewroworig] -1; //-1 to get the index in C //printf("%d: mynewroworig = %d, itsreplacement = %d\n", tid, mynewroworig, itsreplacement); #pragma unroll for (int i=0; i < width; i++) { sdata[ tid + i * height ] = dA[ mynewroworig + i * lda ]; dA[ mynewroworig + i * lda ] = dA[ itsreplacement + i * lda ]; } } __syncthreads(); if (tid < height) { // copy back the upper swapped portion of A to dout #pragma unroll for (int i=0; i < width; i++) { dout[tid + i * ldo] = sdata[tid + i * height]; } } } /******************************************************************************/ // parallel swap the swaped dA(1:nb,i:n) is stored in dout __global__ void claswp_rowparallel_kernel( int n, int width, int height, magmaFloatComplex *dinput, int ldi, magmaFloatComplex *doutput, int ldo, magma_int_t* pivinfo) { claswp_rowparallel_devfunc(n, width, height, dinput, ldi, doutput, ldo, pivinfo); } /******************************************************************************/ __global__ void claswp_rowparallel_kernel_batched( int n, int width, int height, magmaFloatComplex **input_array, int input_i, int input_j, int ldi, magmaFloatComplex **output_array, int output_i, int output_j, int ldo, magma_int_t** pivinfo_array) { int batchid = blockIdx.z; claswp_rowparallel_devfunc( n, width, height, input_array[batchid] + input_j * ldi + input_i, ldi, output_array[batchid] + output_j * ldo + output_i, ldo, pivinfo_array[batchid]); } /******************************************************************************/ extern "C" void magma_claswp_rowparallel_batched( magma_int_t n, magmaFloatComplex** input_array, magma_int_t input_i, magma_int_t input_j, magma_int_t ldi, magmaFloatComplex** output_array, magma_int_t output_i, magma_int_t output_j, magma_int_t ldo, magma_int_t k1, magma_int_t k2, magma_int_t **pivinfo_array, magma_int_t batchCount, magma_queue_t queue) { #define input_array(i,j) input_array, i, j #define output_array(i,j) output_array, i, j if (n == 0 ) return; int height = k2-k1; if ( height > 1024) { fprintf( stderr, "%s: n=%lld > 1024, not supported\n", __func__, (long long) n ); } int blocks = magma_ceildiv( n, SWP_WIDTH ); magma_int_t max_batchCount = queue->get_maxBatch(); for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) { magma_int_t ibatch = min(max_batchCount, batchCount-i); dim3 grid(blocks, 1, ibatch); if ( n < SWP_WIDTH) { size_t shmem = sizeof(magmaFloatComplex) * height * n; claswp_rowparallel_kernel_batched <<< grid, height, shmem, queue->cuda_stream() >>> ( n, n, height, input_array+i, input_i, input_j, ldi, output_array+i, output_i, output_j, ldo, pivinfo_array+i ); } else { size_t shmem = sizeof(magmaFloatComplex) * height * SWP_WIDTH; claswp_rowparallel_kernel_batched <<< grid, height, shmem, queue->cuda_stream() >>> ( n, SWP_WIDTH, height, input_array+i, input_i, input_j, ldi, output_array+i, output_i, output_j, ldo, pivinfo_array+i ); } } #undef input_array #undef output_attay } /******************************************************************************/ extern "C" void magma_claswp_rowparallel_native( magma_int_t n, magmaFloatComplex* input, magma_int_t ldi, magmaFloatComplex* output, magma_int_t ldo, magma_int_t k1, magma_int_t k2, magma_int_t *pivinfo, magma_queue_t queue) { if (n == 0 ) return; int height = k2-k1; if ( height > MAX_NTHREADS) { fprintf( stderr, "%s: height=%lld > %lld, magma_claswp_rowparallel_q not supported\n", __func__, (long long) n, (long long) MAX_NTHREADS ); } int blocks = magma_ceildiv( n, SWP_WIDTH ); dim3 grid(blocks, 1, 1); if ( n < SWP_WIDTH) { size_t shmem = sizeof(magmaFloatComplex) * height * n; claswp_rowparallel_kernel <<< grid, height, shmem, queue->cuda_stream() >>> ( n, n, height, input, ldi, output, ldo, pivinfo ); } else { size_t shmem = sizeof(magmaFloatComplex) * height * SWP_WIDTH; claswp_rowparallel_kernel <<< grid, height, shmem, queue->cuda_stream() >>> ( n, SWP_WIDTH, height, input, ldi, output, ldo, pivinfo ); } } /******************************************************************************/ // serial swap that does swapping one row by one row __global__ void claswp_rowserial_kernel_batched( int n, magmaFloatComplex **dA_array, int lda, int k1, int k2, magma_int_t** ipiv_array ) { magmaFloatComplex* dA = dA_array[blockIdx.z]; magma_int_t *dipiv = ipiv_array[blockIdx.z]; unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; k1--; k2--; if (tid < n) { magmaFloatComplex A1; for (int i1 = k1; i1 < k2; i1++) { int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2 if ( i2 != i1) { A1 = dA[i1 + tid * lda]; dA[i1 + tid * lda] = dA[i2 + tid * lda]; dA[i2 + tid * lda] = A1; } } } } /******************************************************************************/ // serial swap that does swapping one row by one row __global__ void claswp_rowserial_kernel_native( int n, magmaFloatComplex_ptr dA, int lda, int k1, int k2, magma_int_t* dipiv ) { unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; //k1--; //k2--; if (tid < n) { magmaFloatComplex A1; for (int i1 = k1; i1 < k2; i1++) { int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2 if ( i2 != i1) { A1 = dA[i1 + tid * lda]; dA[i1 + tid * lda] = dA[i2 + tid * lda]; dA[i2 + tid * lda] = A1; } } } } /******************************************************************************/ // serial swap that does swapping one row by one row, similar to LAPACK // K1, K2 are in Fortran indexing extern "C" void magma_claswp_rowserial_batched(magma_int_t n, magmaFloatComplex** dA_array, magma_int_t lda, magma_int_t k1, magma_int_t k2, magma_int_t **ipiv_array, magma_int_t batchCount, magma_queue_t queue) { if (n == 0) return; int blocks = magma_ceildiv( n, BLK_SIZE ); magma_int_t max_batchCount = queue->get_maxBatch(); for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) { magma_int_t ibatch = min(max_batchCount, batchCount-i); dim3 grid(blocks, 1, ibatch); claswp_rowserial_kernel_batched <<< grid, max(BLK_SIZE, n), 0, queue->cuda_stream() >>> (n, dA_array+i, lda, k1, k2, ipiv_array+i); } } /******************************************************************************/ // serial swap that does swapping one row by one row, similar to LAPACK // K1, K2 are in Fortran indexing extern "C" void magma_claswp_rowserial_native(magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t lda, magma_int_t k1, magma_int_t k2, magma_int_t* dipiv, magma_queue_t queue) { if (n == 0) return; int blocks = magma_ceildiv( n, BLK_SIZE ); dim3 grid(blocks, 1, 1); claswp_rowserial_kernel_native <<< grid, max(BLK_SIZE, n), 0, queue->cuda_stream() >>> (n, dA, lda, k1, k2, dipiv); } /******************************************************************************/ // serial swap that does swapping one column by one column __device__ void claswp_columnserial_devfunc(int n, magmaFloatComplex_ptr dA, int lda, int k1, int k2, magma_int_t* dipiv ) { unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; k1--; k2--; if ( k1 < 0 || k2 < 0 ) return; if ( tid < n) { magmaFloatComplex A1; if (k1 <= k2) { for (int i1 = k1; i1 <= k2; i1++) { int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2 if ( i2 != i1) { A1 = dA[i1 * lda + tid]; dA[i1 * lda + tid] = dA[i2 * lda + tid]; dA[i2 * lda + tid] = A1; } } } else { for (int i1 = k1; i1 >= k2; i1--) { int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2 if ( i2 != i1) { A1 = dA[i1 * lda + tid]; dA[i1 * lda + tid] = dA[i2 * lda + tid]; dA[i2 * lda + tid] = A1; } } } } } __global__ void claswp_columnserial_kernel_batched( int n, magmaFloatComplex **dA_array, int lda, int k1, int k2, magma_int_t** ipiv_array ) { magmaFloatComplex* dA = dA_array[blockIdx.z]; magma_int_t *dipiv = ipiv_array[blockIdx.z]; claswp_columnserial_devfunc(n, dA, lda, k1, k2, dipiv); } __global__ void claswp_columnserial_kernel( int n, magmaFloatComplex_ptr dA, int lda, int k1, int k2, magma_int_t* dipiv ) { claswp_columnserial_devfunc(n, dA, lda, k1, k2, dipiv); } /******************************************************************************/ // serial swap that does swapping one column by one column // K1, K2 are in Fortran indexing extern "C" void magma_claswp_columnserial( magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t lda, magma_int_t k1, magma_int_t k2, magma_int_t *dipiv, magma_queue_t queue) { if (n == 0 ) return; int blocks = magma_ceildiv( n, CLASWP_COL_NTH ); dim3 grid(blocks, 1, 1); claswp_columnserial_kernel<<< grid, CLASWP_COL_NTH, 0, queue->cuda_stream() >>> (n, dA, lda, k1, k2, dipiv); } extern "C" void magma_claswp_columnserial_batched(magma_int_t n, magmaFloatComplex** dA_array, magma_int_t lda, magma_int_t k1, magma_int_t k2, magma_int_t **ipiv_array, magma_int_t batchCount, magma_queue_t queue) { if (n == 0 ) return; int blocks = magma_ceildiv( n, CLASWP_COL_NTH ); magma_int_t max_batchCount = queue->get_maxBatch(); for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) { magma_int_t ibatch = min(max_batchCount, batchCount-i); dim3 grid(blocks, 1, ibatch); claswp_columnserial_kernel_batched <<< grid, min(CLASWP_COL_NTH,n), 0, queue->cuda_stream() >>> (n, dA_array+i, lda, k1, k2, ipiv_array+i); } }
d86c0f24f6f63e3838e63b1ec80556d2d6807190.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdlib> #include <vector> #include <iostream> #include <numeric> #include "CPUTimer.h" #include "GPUTimer.h" #include "CUDAError.h" void GenerateRandomArray(int* array, const int N) { srand(time(NULL)); for (int i = 0; i < N; ++i) { float tmp = rand() / (float)RAND_MAX * 100.0f; array[i] = tmp - 50; } } int AddSequentialInCPU(const int* array, const int N) { int sum = 0; for(int i = 0; i < N; ++i) sum += array[i]; return sum; } __device__ void FirstAddWithCopyOnShared(const int* in, int* shared, const int index, const unsigned int stepSize) { if(threadIdx.x < stepSize) shared[threadIdx.x] = in[index] + in[index + stepSize]; __syncthreads(); } __device__ inline int isFirstThreadOfThisBlock() { return threadIdx.x == 0; } __device__ void Reduce(int* in, int* out, const int index, unsigned int stepSize) { while(stepSize > 0) { if(threadIdx.x < stepSize) in[index] += in[index + stepSize]; else break; stepSize >>= 1; __syncthreads(); } if(isFirstThreadOfThisBlock()) out[blockIdx.x] = in[index]; } __global__ void AddParallelInGPU(int* in, int* out) { const int index = threadIdx.x + blockDim.x * blockIdx.x; extern __shared__ int s_data[]; FirstAddWithCopyOnShared(in, s_data, index, blockDim.x >> 1); Reduce(s_data, out, threadIdx.x, blockDim.x >> 2); } int main() { const int N = 1 << 20; int* h_array = (int*)malloc(N * sizeof(int)); int cpuSum = 0; GenerateRandomArray(h_array, N); int h_gpuSum[1]; int* d_array; int* d_sumBlocks; int *d_gpuSum; const int BLOCK_DIM = 1024; const int GRID_DIM = N / BLOCK_DIM; hipMalloc((void**)&d_array, N * sizeof(int)); hipMalloc((void**)&d_sumBlocks, GRID_DIM * sizeof(int)); hipMalloc((void**)&d_gpuSum, sizeof(int)); hipMemcpy(d_array, h_array, N * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( AddParallelInGPU), dim3(GRID_DIM), dim3(BLOCK_DIM), (BLOCK_DIM >> 1) * sizeof(int), 0, d_array, d_sumBlocks); hipLaunchKernelGGL(( AddParallelInGPU), dim3(1), dim3(GRID_DIM), (GRID_DIM >> 1) * sizeof(int), 0, d_sumBlocks, d_gpuSum); cpuSum = AddSequentialInCPU(h_array, N); hipDeviceSynchronize(); hipMemcpy(h_gpuSum, d_gpuSum, sizeof(int), hipMemcpyDeviceToHost); if(cpuSum == h_gpuSum[0]) std::cout << "MATCH"; else std::cout << "WRONG"; std::cout << std::endl; hipFree(d_array); hipFree(d_sumBlocks); hipFree(d_gpuSum); return 0; }
d86c0f24f6f63e3838e63b1ec80556d2d6807190.cu
#include <cstdlib> #include <vector> #include <iostream> #include <numeric> #include "CPUTimer.h" #include "GPUTimer.h" #include "CUDAError.h" void GenerateRandomArray(int* array, const int N) { srand(time(NULL)); for (int i = 0; i < N; ++i) { float tmp = rand() / (float)RAND_MAX * 100.0f; array[i] = tmp - 50; } } int AddSequentialInCPU(const int* array, const int N) { int sum = 0; for(int i = 0; i < N; ++i) sum += array[i]; return sum; } __device__ void FirstAddWithCopyOnShared(const int* in, int* shared, const int index, const unsigned int stepSize) { if(threadIdx.x < stepSize) shared[threadIdx.x] = in[index] + in[index + stepSize]; __syncthreads(); } __device__ inline int isFirstThreadOfThisBlock() { return threadIdx.x == 0; } __device__ void Reduce(int* in, int* out, const int index, unsigned int stepSize) { while(stepSize > 0) { if(threadIdx.x < stepSize) in[index] += in[index + stepSize]; else break; stepSize >>= 1; __syncthreads(); } if(isFirstThreadOfThisBlock()) out[blockIdx.x] = in[index]; } __global__ void AddParallelInGPU(int* in, int* out) { const int index = threadIdx.x + blockDim.x * blockIdx.x; extern __shared__ int s_data[]; FirstAddWithCopyOnShared(in, s_data, index, blockDim.x >> 1); Reduce(s_data, out, threadIdx.x, blockDim.x >> 2); } int main() { const int N = 1 << 20; int* h_array = (int*)malloc(N * sizeof(int)); int cpuSum = 0; GenerateRandomArray(h_array, N); int h_gpuSum[1]; int* d_array; int* d_sumBlocks; int *d_gpuSum; const int BLOCK_DIM = 1024; const int GRID_DIM = N / BLOCK_DIM; cudaMalloc((void**)&d_array, N * sizeof(int)); cudaMalloc((void**)&d_sumBlocks, GRID_DIM * sizeof(int)); cudaMalloc((void**)&d_gpuSum, sizeof(int)); cudaMemcpy(d_array, h_array, N * sizeof(int), cudaMemcpyHostToDevice); AddParallelInGPU<<<GRID_DIM, BLOCK_DIM, (BLOCK_DIM >> 1) * sizeof(int)>>>(d_array, d_sumBlocks); AddParallelInGPU<<<1, GRID_DIM, (GRID_DIM >> 1) * sizeof(int)>>>(d_sumBlocks, d_gpuSum); cpuSum = AddSequentialInCPU(h_array, N); cudaDeviceSynchronize(); cudaMemcpy(h_gpuSum, d_gpuSum, sizeof(int), cudaMemcpyDeviceToHost); if(cpuSum == h_gpuSum[0]) std::cout << "MATCH"; else std::cout << "WRONG"; std::cout << std::endl; cudaFree(d_array); cudaFree(d_sumBlocks); cudaFree(d_gpuSum); return 0; }
f757c9a66dfad0fb837cfafcdb510effb3898f79.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" /*---- Private global variables. Design decision of the teachers. ----*/ unsigned char *d_red, *d_green, *d_blue; float *d_filter; /*---- Forward declaration of private functions ----*/ __global__ static void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth); __global__ static void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel); __global__ static void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols); static void cleanup(); /** * @brief Entry point function, provided by the HW. * * This functions takes an RGBA image as a buffer, and blurs it, providing as output the blurred version * both as 3 separate channel and as a combiened buffer. * * @param h_inputImageRGBA: input, image buffer in host memory. Probably given to confuse the student. Ignored * @param d_inputImageRGBA: input, image buffer in device memory. Provided by the caller code * @param d_outputImageRGBA: output, pre-allocated image buffer in device memory for the blurred image * @param numRows: input, number of rows of the image * @param numCols: input, number of columns of the image * @param d_redBlurred: output, pre-allocated image buffer in device memory for the red channel of the blurred image * @param d_greenBlurred: output, pre-allocated image buffer in device memory for the green channel of the blurred image * @param d_blueBlurred: output, pre-allocated image buffer in device memory for the blue channel of the blurred image * @param filterWidth: input, width of the square blur filter */ void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { const int K = 32; //Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(K, K, 1); //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize(ceil(numCols / static_cast<float>(blockSize.x)), ceil(numRows / static_cast<float>(blockSize.y)), 1); //Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //Call your convolution kernel here 3 times, once for each color channel. hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); cleanup(); } /** * @brief Entry point function for memory allocation, provided by the HW. * * This functions allocates in device memory images and filter buffers, used as global variables by all the functions. * Must be called by user code before calling your_gaussian_blur(). your_gaussian_blur() takes care of cleaning up the memory. * * @param numRowsImage: input, number of rows of the image * @param numColsImage: input, number of columns of the image * @param h_filter: input, filter buffer in host memory. * @param filterWidth: input, width of the square blur filter */ void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice)); } /*---- Implementation of private functions ----*/ __global__ static void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { /* ------------ INSTRUCTIONS ------------*/ // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. const int c = blockDim.x * blockIdx.x + threadIdx.x; const int r = blockDim.y * blockIdx.y + threadIdx.y; if ( c >= numCols || r >= numRows ) { return; } float result = .0f; for (int filter_r = -filterWidth / 2; filter_r <= filterWidth / 2; ++filter_r) { for (int filter_c = -filterWidth / 2; filter_c <= filterWidth / 2; ++filter_c) { //Find the global image position for this filter position //clamp to boundary of the image const int image_r = min(max(r + filter_r, 0), static_cast<int>(numRows - 1)); const int image_c = min(max(c + filter_c, 0), static_cast<int>(numCols - 1)); const float imageValue = static_cast<float>(inputChannel[image_r * numCols + image_c]); const float filterValue = filter[(filter_r + filterWidth / 2) * filterWidth + filter_c + filterWidth/2]; result += imageValue * filterValue; } } outputChannel[r * numCols + c] = static_cast<unsigned char>(result); } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ static void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } const int c = blockDim.x * blockIdx.x + threadIdx.x; const int r = blockDim.y * blockIdx.y + threadIdx.y; if ( c >= numCols || r >= numRows ) { return; } const int idx = c + r * numCols; const uchar4 rgba = inputImageRGBA[idx]; redChannel[idx] = rgba.x; greenChannel[idx] = rgba.y; blueChannel[idx] = rgba.z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ static void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } //Free all the memory that we allocated //make sure you free any arrays that you allocated static void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); checkCudaErrors(hipFree(d_filter)); }
f757c9a66dfad0fb837cfafcdb510effb3898f79.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" /*---- Private global variables. Design decision of the teachers. ----*/ unsigned char *d_red, *d_green, *d_blue; float *d_filter; /*---- Forward declaration of private functions ----*/ __global__ static void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth); __global__ static void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel); __global__ static void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols); static void cleanup(); /** * @brief Entry point function, provided by the HW. * * This functions takes an RGBA image as a buffer, and blurs it, providing as output the blurred version * both as 3 separate channel and as a combiened buffer. * * @param h_inputImageRGBA: input, image buffer in host memory. Probably given to confuse the student. Ignored * @param d_inputImageRGBA: input, image buffer in device memory. Provided by the caller code * @param d_outputImageRGBA: output, pre-allocated image buffer in device memory for the blurred image * @param numRows: input, number of rows of the image * @param numCols: input, number of columns of the image * @param d_redBlurred: output, pre-allocated image buffer in device memory for the red channel of the blurred image * @param d_greenBlurred: output, pre-allocated image buffer in device memory for the green channel of the blurred image * @param d_blueBlurred: output, pre-allocated image buffer in device memory for the blue channel of the blurred image * @param filterWidth: input, width of the square blur filter */ void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { const int K = 32; //Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(K, K, 1); //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize(ceil(numCols / static_cast<float>(blockSize.x)), ceil(numRows / static_cast<float>(blockSize.y)), 1); //Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //Call your convolution kernel here 3 times, once for each color channel. gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); cleanup(); } /** * @brief Entry point function for memory allocation, provided by the HW. * * This functions allocates in device memory images and filter buffers, used as global variables by all the functions. * Must be called by user code before calling your_gaussian_blur(). your_gaussian_blur() takes care of cleaning up the memory. * * @param numRowsImage: input, number of rows of the image * @param numColsImage: input, number of columns of the image * @param h_filter: input, filter buffer in host memory. * @param filterWidth: input, width of the square blur filter */ void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice)); } /*---- Implementation of private functions ----*/ __global__ static void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { /* ------------ INSTRUCTIONS ------------*/ // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. const int c = blockDim.x * blockIdx.x + threadIdx.x; const int r = blockDim.y * blockIdx.y + threadIdx.y; if ( c >= numCols || r >= numRows ) { return; } float result = .0f; for (int filter_r = -filterWidth / 2; filter_r <= filterWidth / 2; ++filter_r) { for (int filter_c = -filterWidth / 2; filter_c <= filterWidth / 2; ++filter_c) { //Find the global image position for this filter position //clamp to boundary of the image const int image_r = min(max(r + filter_r, 0), static_cast<int>(numRows - 1)); const int image_c = min(max(c + filter_c, 0), static_cast<int>(numCols - 1)); const float imageValue = static_cast<float>(inputChannel[image_r * numCols + image_c]); const float filterValue = filter[(filter_r + filterWidth / 2) * filterWidth + filter_c + filterWidth/2]; result += imageValue * filterValue; } } outputChannel[r * numCols + c] = static_cast<unsigned char>(result); } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ static void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } const int c = blockDim.x * blockIdx.x + threadIdx.x; const int r = blockDim.y * blockIdx.y + threadIdx.y; if ( c >= numCols || r >= numRows ) { return; } const int idx = c + r * numCols; const uchar4 rgba = inputImageRGBA[idx]; redChannel[idx] = rgba.x; greenChannel[idx] = rgba.y; blueChannel[idx] = rgba.z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ static void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } //Free all the memory that we allocated //make sure you free any arrays that you allocated static void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); checkCudaErrors(cudaFree(d_filter)); }
2029f1611fac0e3cd60deaa9292ef7f6cbf50fc3.hip
// !!! This is a file automatically generated by hipify!!! // #include "../common/book.h" #include "./utils.h" #include "./timing.h" #include "./itk_io.h" // #include "./cuda_kernels.cuh" #pragma clang diagnostic push #pragma ide diagnostic ignored "CannotResolve" #include <hip/hip_runtime.h> #include <hipfft.h> #include <iostream> #include <complex> #include <vector> #include <cstring> /* cufftShift library */ #include <cufftShiftInterface.h> #define NX 16 #define NY 16 #define NZ 16 #define POW 4 #define LX (2 * M_PI) #define LY (2 * M_PI) #define NUM_IMAGES 1 #define THETA 6.125*M_PI_4 #define PHI 2.4812 // #define ST sin(THETA) // #define CT cos(THETA) // #define SP sin(PHI) // #define CP cos(PHI) texture<float, 3, hipReadModeElementType> tex; using namespace std; typedef float SimPixelType; // __global__ void add_slices(PIXEL_TYPE* image_in, PIXEL_TYPE* image_out) { // int tid = threadIdx.x + blockIdx.x * blockDim.x; // int idx = tid % (128 * 128 * 2); // PIXEL_TYPE temp = image_in[tid]; // // printf("%d\n", idx); // // if (tid < TOTAL_PIXELS) { // // if (tid < 16384) { // atomicAdd( &image_out[idx], temp ); // // image_out[tid] = temp; // // } // // } // } __device__ float my_roundf (float a) { float fa = fabsf (a); float t = ((fa >= 0.5f) && (fa <= 8388608.0f)) ? 0.5f : 0.0f; return copysignf (truncf (fa + t), a); } /* * Texture lookup based 3D volume rotation. * * */ __global__ void d_render_tex(float *d_output /*, uint imageW, uint imageH, float w*/) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int z = tid / (NX * NY) + 1; int x = tid % NX + 1; int y = ( tid % (NX * NY) ) / NX + 1; float ST = sinf(THETA); float CT = cosf(THETA); float SP = sinf(PHI); float CP = cosf(PHI); int p1 = (NX + 1)/2 + 1; int p2 = (NY + 1)/2 + 1; int p3 = (NZ + 1)/2 + 1; // Apply the rotation, nearest neighbor int xx = my_roundf(x*CT + z*ST - CT*p1 + p1 - ST*p3); if (xx > NX || xx < 1) return; int yy = my_roundf(- x*SP*ST + y*CP + z*SP*CT + SP*ST*p1 - CP*p2 - SP*CT*p3 + p2); if (yy > NY || yy < 1) return; int zz = my_roundf(- x*CP*ST - y*SP + z*CP*CT + CP*ST*p1 + SP*p2 - CP*CT*p3 + p3); if (zz > NZ || zz < 1) return; uint idx = ((zz - 1) << (2 * POW)) + ((yy - 1) << POW) + xx - 1; float voxel = tex3D( tex, x - 1, y - 1, z - 1 ); d_output[idx] = voxel; } __global__ void d_render(float *d_output, float *d_input, float ST, float CT, float SP, float CP /*, uint imageW, uint imageH, float w*/) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int z = tid / (NX * NY) + 1; int x = tid % NX + 1; int y = ( tid % (NX * NY) ) / NX + 1; // float ST = sinf(THETA); // float CT = cosf(THETA); // float SP = sinf(PHI); // float CP = cosf(PHI); int p1 = (NX + 1)/2 + 1; int p2 = (NY + 1)/2 + 1; int p3 = (NZ + 1)/2 + 1; // Apply the rotation, nearest neighbor resampling // int xx = roundf(x*CT + z*ST - CT*p1 + p1 - ST*p3); // if (xx > NX || xx < 1) return; // int yy = roundf(- x*SP*ST + y*CP + z*SP*CT + SP*ST*p1 - CP*p2 - SP*CT*p3 + p2); // if (yy > NY || yy < 1) return; // int zz = roundf(- x*CP*ST - y*SP + z*CP*CT + CP*ST*p1 + SP*p2 - CP*CT*p3 + p3); // if (zz > NZ || zz < 1) return; /* * T1.inv * T2.inv * T3.inv * T4.inv */ // int xx = roundf( x*CT - y*ST*SP + z*ST*CP - p1*CT + p2*ST*SP - p3*ST*CP + p1); // if (xx > NX || xx < 1) return; // int yy = roundf( y*CP + z*SP - p2*CP - p3*SP + p2 ); // if (yy > NY || yy < 1) return; // int zz = roundf( - x*ST - y*CT*SP + z*CT*CP + p1*ST + p2*CT*SP - CT*CP*p3 + p3 ); // if (zz > NZ || zz < 1) return; /* * T4 * T2 * T3 * T1 */ int xx = roundf( x*CT - y*ST*SP - z*ST*CP - p1*CT + p2*ST*SP + p3*ST*CP + p1); if (xx > NX || xx < 1) return; int yy = roundf( y*CP - z*SP - p2*CP + p3*SP + p2 ); if (yy > NY || yy < 1) return; int zz = roundf( x*ST + y*CT*SP + z*CT*CP - p1*ST - p2*CT*SP - CT*CP*p3 + p3 ); if (zz > NZ || zz < 1) return; // int xx = my_roundf( x*CT - z*ST*CP - p1*CT + p3*ST + p1); // if (xx > NX || xx < 1) return; // int yy = my_roundf( - x*SP*ST + y*CP - z*SP*CT + p1*SP*ST - p2*CP + p3*SP*CT + p2 ); // if (yy > NY || yy < 1) return; // int zz = my_roundf( x*CP*ST + y*SP + z*CP*CT - p1*CP*ST - p2*SP - p3*CP*CT + p3 ); // if (zz > NZ || zz < 1) return; uint idx = ((zz - 1) << (2 * POW)) + ((yy - 1) << POW) + xx - 1; // float voxel = tex3D( tex, x - 1, y - 1, z - 1 ); float voxel = d_input[ idx ]; d_output[tid] = voxel; } __global__ void d_render_cumulate(float *d_output, float *d_input, float ST, float CT, float SP, float CP ) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int z = tid / (NX * NY) + 1; int x = tid % NX + 1; int y = ( tid % (NX * NY) ) / NX + 1; // float ST = sinf(theta); // float CT = cosf(theta); // float SP = sinf(phi); // float CP = cosf(phi); int p1 = (NX + 1)/2 + 1; int p2 = (NY + 1)/2 + 1; int p3 = (NZ + 1)/2 + 1; // Apply the rotation, nearest neighbor // int xx = my_roundf( x*CT + z*ST - CT*p1 + p1 - ST*p3 ); // if (xx > NX || xx < 1) return; // int yy = my_roundf(- x*SP*ST + y*CP + z*SP*CT + SP*ST*p1 - CP*p2 - SP*CT*p3 + p2); // if (yy > NY || yy < 1) return; // int zz = my_roundf(- x*CP*ST - y*SP + z*CP*CT + CP*ST*p1 + SP*p2 - CP*CT*p3 + p3); // if (zz > NZ || zz < 1) return; // Apply the rotation, nearest neighbor // int xx = roundf( x*CT - y*ST*SP - z*ST*CP - p1*CT + p2*ST*SP + p3*ST*CP + p1); // if (xx > NX || xx < 1) return; // int yy = roundf( y*CP - z*SP - p2*CP + p3*SP + p2 ); // if (yy > NY || yy < 1) return; // int zz = roundf( x*ST + y*CT*SP + z*CT*CP - p1*ST - p2*CT*SP - CT*CP*p3 + p3 ); // if (zz > NZ || zz < 1) return; /* * Apply inverse rotation and find the nearest neighbor in the * ORIGINAL image instead of in the output image. */ int xx = roundf( x*CT - z*ST - p1*CT + p3*ST + p1); if (xx > NX || xx < 1) return; int yy = roundf( - x*SP*ST + y*CP - z*SP*CT + p1*SP*ST - p2*CP + p3*SP*CT + p2 ); if (yy > NY || yy < 1) return; int zz = roundf( x*CP*ST + y*SP + z*CP*CT - p1*CP*ST - p2*SP - p3*CP*CT + p3 ); if (zz > NZ || zz < 1) return; // if (xx > NX || xx < 1 || yy > NY || yy < 1 || zz > NZ || zz < 1) { // return; // } uint idx = ((zz - 1) << (2 * POW)) + ((yy - 1) << POW) + xx - 1; // float voxel = tex3D( tex, x - 1, y - 1, z - 1 ); float voxel = d_input[ idx ]; d_output[tid] = voxel; // Apply a texture lookup // d_output[tid] = voxel; // atomicAdd( &d_output[tid], voxel ); } __global__ void Multiply_complex(SimPixelType* image_in, SimPixelType* image_in2) { int tid = threadIdx.x + blockIdx.x * blockDim.x; // int idx = tid % (128 * 128 * 2); SimPixelType c1_real = image_in[tid*2]; SimPixelType c1_imag = image_in[tid*2+1]; SimPixelType c2_real = image_in2[tid*2]; SimPixelType c2_imag = image_in2[tid*2+1]; image_in[tid*2] = c1_real * c2_real - c1_imag * c2_imag; image_in[tid*2+1] = c1_real * c2_imag + c1_imag * c2_real; } int main() { hipDeviceReset(); /* Create couple of images for testing */ SimPixelType *x = new SimPixelType[NX * NY]; SimPixelType *y = new SimPixelType[NX * NY]; SimPixelType *in = new SimPixelType[NX * NY * NZ]; /* A vector holding multiple images data */ vector< SimPixelType* > image_vector; vector< SimPixelType* > dev_pointers_in; vector< SimPixelType* > dev_pointers_out; vector< SimPixelType* > imageOut_vector; vector< SimPixelType* > mult_image_vector; /* Create Fourier Kernel plan */ hipfftHandle planr2c[NUM_IMAGES]; hipfftHandle planc2r[NUM_IMAGES]; /* Create an array of CUDA streams */ hipStream_t streams_fft[NUM_IMAGES]; /* Output image */ complex<SimPixelType> *out = new complex<SimPixelType>[NX * NY * NZ]; gpuErrchk( hipHostRegister( out, sizeof(SimPixelType)*NX*NY*NZ*2, hipHostRegisterPortable ) ); // complex<SimPixelType>* out; // gpuErrchk( hipHostMalloc( &out, NX * NY * NZ * sizeof(SimPixelType) * 2 ) ); /* Initialize it */ memset( out, 0, sizeof(SimPixelType)*NX*NY*NZ*2 ); /* Create the second argument image in the multiply kernel */ SimPixelType* OTF = new SimPixelType[NX * NY * NZ * 2]; // Since the image is complex SimPixelType* dev_OTF; for (int p = 0; p < NZ; p++) { for(int j = 0; j < NY; j++) { for(int kk = 0; kk < NX; kk++) { OTF[(j * NX + kk) * 2] = kk + j; OTF[(j * NX + kk) * 2 + 1] = kk + j; } } } /* Reserve memory locations for the OTF image */ gpuErrchk( hipMalloc( &dev_OTF, sizeof(SimPixelType)*NX*NY*NZ*2 ) ); gpuErrchk( hipHostRegister( OTF, sizeof(SimPixelType)*NX*NY*NZ*2, hipHostRegisterPortable ) ); for (unsigned i = 0; i < NUM_IMAGES; i++) { SimPixelType *vx = new SimPixelType[NX * NY * NZ]; SimPixelType *mult_image = new SimPixelType[NX * NY * NZ]; // SimPixelType* vx; // hipHostMalloc( &vx, NX * NY * NZ * sizeof(SimPixelType) ); for (int p = 0; p < NZ; p++) { for(int j = 0; j < NY; j++){ for(int kk = 0; kk < NX; kk++){ x[j * NX + kk] = kk * LX/NX; y[j * NX + kk] = kk * LY/NY; /* Put values in the new images */ // if (p == 64) vx[j * NX + kk + p * NX * NY] = cos(x[j * NX + kk] + y[j * NX + kk]); if ( i == 0 ) { in[j * NX + kk + p * NX * NY] = cos(x[j * NX + kk] + y[j * NX + kk]); } } } } t1 = absoluteTime(); gpuErrchk( hipHostRegister( vx, sizeof(SimPixelType)*NX*NY*NZ, hipHostRegisterPortable ) ); gpuErrchk( hipHostRegister( mult_image, sizeof(SimPixelType)*NX*NY*NZ, hipHostRegisterPortable ) ); t2 = absoluteTime(); std::cout << "\n\n Register time: " << (float)(t2-t1)/1000000 << "ms" << std::endl; // for (int j = 0; j < NY; j++){ // for (int i = 0; i < NX; i++){ // // printf("%.3f ", vx[j*NX + i]/(NX*NY)); // cout << vx[j * NX + i] << " "; // } // // printf("\n"); // cout << endl; // } // cout << endl; /* Allocate some spaces on the device */ SimPixelType *d_vx; SimPixelType *d_out; /* Some space on the device */ gpuErrchk(hipMalloc(&d_vx, NX * NY * NZ * sizeof(SimPixelType))); gpuErrchk(hipMalloc(&d_out, NX * NY * NZ * sizeof(hipfftReal))); /* Create cufft FFT plans */ int n[2] = {NX, NY}; int inembed[] = {NX, NY}; int onembed[] = {NX, NY}; /* Forward Fourier Transform plan */ hipfftPlanMany(&planr2c[i], 2, // rank n, // dimension inembed, 1, // istride NX * NY, // idist onembed, 1, //ostride NX * NY, // odist HIPFFT_R2C, NZ); /* Inverse Fourier Transform plan */ hipfftPlanMany(&planc2r[i], 2, // rank n, // dimension onembed, 1, // istride NX * NY, // idist inembed, 1, //ostride NX * NY, // odist HIPFFT_C2R, NZ); cufftSetCompatibilityMode(planr2c[i], CUFFT_COMPATIBILITY_NATIVE); cufftSetCompatibilityMode(planc2r[i], CUFFT_COMPATIBILITY_NATIVE); /* Create streams associated with this 2 plans */ gpuErrchk( hipStreamCreate( &streams_fft[i] )); hipfftSetStream( planr2c[i], streams_fft[i] ); // gpuErrchk( hipStreamCreate(&streams_ifft[i]) ); // hipfftSetStream(&planc2r[i]); image_vector.push_back( vx ); mult_image_vector.push_back( mult_image ); dev_pointers_in.push_back( d_vx ); dev_pointers_out.push_back( d_out ); } /* Copying data to the device for processing */ // hipMemcpy(d_vx, vx, NX * NY * sizeof(hipfftDoubleReal), hipMemcpyHostToDevice); // hipMemcpy(d_out, out, NX * NY * sizeof(hipfftDoubleComplex), hipMemcpyHostToDevice); gpuErrchk( hipMemcpyAsync( dev_OTF, OTF, 2*NX*NY*NZ*sizeof(SimPixelType), hipMemcpyHostToDevice, streams_fft[0] ) ); for (unsigned int j = 0; j < NUM_IMAGES; j++ ) { gpuErrchk( hipMemcpyAsync( dev_pointers_in[j], image_vector[j], NX*NY*NZ*sizeof(SimPixelType), hipMemcpyHostToDevice, streams_fft[j]) ); gpuErrchk( hipMemcpyAsync( dev_pointers_out[j], out, NX*NY*NZ*sizeof(hipfftReal), hipMemcpyHostToDevice, streams_fft[j] ) ); } /* * Apply the rotation */ /* Create texture array */ // for (unsigned int j = 0; j < NUM_IMAGES; j++) { // gpuErrchk( hipStreamSynchronize( streams_fft[j] ) ); // } // hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); // hipArray *d_volumeArray = 0; // const hipExtent volumeSize = make_hipExtent(128, 128, 128); // size_t size = volumeSize.width*volumeSize.height*volumeSize.depth; // gpuErrchk( hipMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize) ); // hipMemcpy3DParms copyParams = {0}; // copyParams.srcPtr = make_hipPitchedPtr( dev_pointers_in[0], // volumeSize.width*sizeof(float), // volumeSize.width, // volumeSize.height // ); // copyParams.dstArray = d_volumeArray; // copyParams.extent = volumeSize; // copyParams.kind = hipMemcpyDeviceToDevice; // gpuErrchk( hipMemcpy3D( &copyParams) ); // tex.normalized = false; // tex.filterMode = hipFilterModePoint; // Filtering mode // tex.addressMode[0] = hipAddressModeBorder; // tex.addressMode[1] = hipAddressModeBorder; // tex.addressMode[2] = hipAddressModeBorder; // gpuErrchk(hipBindTextureToArray(tex, d_volumeArray, channelDesc)); t1 = absoluteTime(); for (unsigned int j = 0; j < NUM_IMAGES; j++) { // hipfftExecD2Z( planr2c[j], // (SimPixelType*)dev_pointers_in[j], // (hipfftDoubleComplex*)dev_pointers_out[j]); // Multiply_complex<<< NX*NY*NZ/512, 512, 0, streams_fft[j] >>>( dev_pointers_out[j], // dev_OTF // ); // Obtain data from texture memory hipLaunchKernelGGL(( d_render_cumulate), dim3(NX*NY*NZ/256), dim3(256), 0, streams_fft[j] , dev_pointers_out[j], dev_pointers_in[j], sin(-THETA), cos(-THETA), sin(-PHI), cos(-PHI) ); /* CUDA rotation */ } // for (unsigned int j = 0; j < NUM_IMAGES; j++) { // hipfftSetStream(planc2r[j], streams_fft[j]); // } // // for (unsigned int j = 0; j < NUM_IMAGES; j++) { // hipfftExecZ2D( planc2r[j], (hipfftDoubleComplex*)dev_pointers_out[j], (SimPixelType*)dev_pointers_in[j]); // } for (unsigned int j = 0; j < NUM_IMAGES; j++) { gpuErrchk( hipMemcpyAsync( mult_image_vector[j], dev_pointers_out[j], NX*NY*NZ*sizeof(SimPixelType), hipMemcpyDeviceToHost, streams_fft[j] ) ); } for (unsigned int j = 0; j < NUM_IMAGES; j++) { gpuErrchk( hipStreamSynchronize( streams_fft[j] ) ); } t2 = absoluteTime(); std::cout << "\n\n Streaming time: " << (float)(t2-t1)/1000000 << "ms" << std::endl; t1 = absoluteTime(); for (unsigned int j = 0; j < NUM_IMAGES; j++) { gpuErrchk( hipHostUnregister(image_vector[j]) ); // gpuErrchk( hipHostFree(image_vector[j]) ); } gpuErrchk( hipHostUnregister(OTF) ); gpuErrchk( hipHostUnregister(out) ); // gpuErrchk( hipHostFree( out ) ); t2 = absoluteTime(); std::cout << "\n\n Host Unregister time: " << (float)(t2-t1)/1000000 << "ms" << std::endl; // for (int j = 0; j < NY; j++){ // for (int i = 0; i < NX; i++){ // // printf("%.3f ", vx[j*NX + i]/(NX*NY)); // // SimPixelType* vx = image_vector[1]; // // cout << image_vector[0][j * NX + i]/( NX * NY ) << " "; // cout << complex_array[j * NX + i] << " "; // } // // printf("\n"); // cout << endl; // } // cout << endl; for (int j = 0; j < NY; j++){ for (int i = 0; i < NX; i++){ // printf("%.3f ", vx[j*NX + i]/(NX*NY)); printf("% .2e ", mult_image_vector[0][ 0 * NX * NY + j * NX + i]); } // printf("\n"); cout << endl; } float* buffer = (float*) malloc( sizeof(float) * NX * NY ); float* dev_buf; memcpy( buffer, mult_image_vector[0], sizeof(float) * NX * NY ); gpuErrchk( hipMalloc( &dev_buf, sizeof(float) * NX * NY ) ); gpuErrchk( hipMemcpy( dev_buf, buffer, sizeof(float) * NX * NY, hipMemcpyHostToDevice ) ); cufftShift_2D_impl( dev_buf, NX, NY ); gpuErrchk( hipMemcpy( buffer, dev_buf, sizeof(float) * NX * NY, hipMemcpyDeviceToHost ) ); printf("Shifted output:\n"); for (int j = 0; j < NY; j++){ for (int i = 0; i < NX; i++){ // printf("%.3f ", vx[j*NX + i]/(NX*NY)); printf("% .2e ", buffer[ j * NX + i]); } // printf("\n"); cout << endl; } /* * Output an image for testing */ store_mha/*<double>*/( mult_image_vector[0],// input image 3, // dim NX,// h NY,// w NZ,// d "./tex_image.mha" // output dest ); for (unsigned int j = 0; j < NUM_IMAGES; j++) { gpuErrchk( hipFree( dev_pointers_in[j] ) ); gpuErrchk( hipFree( dev_pointers_out[j] ) ); hipStreamDestroy( streams_fft[j] ); // gpuErrchk( hipFreeArray( d_volumeArray ) ); delete[] image_vector[j]; delete[] mult_image_vector[j]; } gpuErrchk( hipFree( dev_OTF ) ); delete[] OTF; delete[] out; delete[] x; delete[] y; free( buffer ); hipDeviceReset(); // hipfftPlan2d(&planr2c, NY, NX, HIPFFT_D2Z); // hipfftPlan2d(&planc2r, NY, NX, HIPFFT_Z2D); // hipfftExecD2Z(planr2c, (hipfftDoubleReal *)d_vx, (hipfftDoubleComplex *)d_out); // hipfftExecZ2D(planc2r, (hipfftDoubleComplex *)d_out, (hipfftDoubleReal *)d_vx); /* Copy results back from the device */ // hipMemcpy(vx, d_vx, NX * NY * sizeof(hipfftDoubleReal), hipMemcpyDeviceToHost); // int count = 0; // hipDeviceProp_t prop; // int dev_id; // // determining how many devices are available to use on the computer // HANDLE_ERROR( hipGetDeviceCount( &count ) ); // printf("There are %d device(s) on this computer.\n", count); // // Iterates through each of the device on this computer // printDevInfo(count, prop); return 0; } #pragma clang diagnostic pop
2029f1611fac0e3cd60deaa9292ef7f6cbf50fc3.cu
// #include "../common/book.h" #include "./utils.h" #include "./timing.h" #include "./itk_io.h" // #include "./cuda_kernels.cuh" #pragma clang diagnostic push #pragma ide diagnostic ignored "CannotResolve" #include <cuda.h> #include <cufft.h> #include <iostream> #include <complex> #include <vector> #include <cstring> /* cufftShift library */ #include <cufftShiftInterface.h> #define NX 16 #define NY 16 #define NZ 16 #define POW 4 #define LX (2 * M_PI) #define LY (2 * M_PI) #define NUM_IMAGES 1 #define THETA 6.125*M_PI_4 #define PHI 2.4812 // #define ST sin(THETA) // #define CT cos(THETA) // #define SP sin(PHI) // #define CP cos(PHI) texture<float, 3, cudaReadModeElementType> tex; using namespace std; typedef float SimPixelType; // __global__ void add_slices(PIXEL_TYPE* image_in, PIXEL_TYPE* image_out) { // int tid = threadIdx.x + blockIdx.x * blockDim.x; // int idx = tid % (128 * 128 * 2); // PIXEL_TYPE temp = image_in[tid]; // // printf("%d\n", idx); // // if (tid < TOTAL_PIXELS) { // // if (tid < 16384) { // atomicAdd( &image_out[idx], temp ); // // image_out[tid] = temp; // // } // // } // } __device__ float my_roundf (float a) { float fa = fabsf (a); float t = ((fa >= 0.5f) && (fa <= 8388608.0f)) ? 0.5f : 0.0f; return copysignf (truncf (fa + t), a); } /* * Texture lookup based 3D volume rotation. * * */ __global__ void d_render_tex(float *d_output /*, uint imageW, uint imageH, float w*/) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int z = tid / (NX * NY) + 1; int x = tid % NX + 1; int y = ( tid % (NX * NY) ) / NX + 1; float ST = sinf(THETA); float CT = cosf(THETA); float SP = sinf(PHI); float CP = cosf(PHI); int p1 = (NX + 1)/2 + 1; int p2 = (NY + 1)/2 + 1; int p3 = (NZ + 1)/2 + 1; // Apply the rotation, nearest neighbor int xx = my_roundf(x*CT + z*ST - CT*p1 + p1 - ST*p3); if (xx > NX || xx < 1) return; int yy = my_roundf(- x*SP*ST + y*CP + z*SP*CT + SP*ST*p1 - CP*p2 - SP*CT*p3 + p2); if (yy > NY || yy < 1) return; int zz = my_roundf(- x*CP*ST - y*SP + z*CP*CT + CP*ST*p1 + SP*p2 - CP*CT*p3 + p3); if (zz > NZ || zz < 1) return; uint idx = ((zz - 1) << (2 * POW)) + ((yy - 1) << POW) + xx - 1; float voxel = tex3D( tex, x - 1, y - 1, z - 1 ); d_output[idx] = voxel; } __global__ void d_render(float *d_output, float *d_input, float ST, float CT, float SP, float CP /*, uint imageW, uint imageH, float w*/) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int z = tid / (NX * NY) + 1; int x = tid % NX + 1; int y = ( tid % (NX * NY) ) / NX + 1; // float ST = sinf(THETA); // float CT = cosf(THETA); // float SP = sinf(PHI); // float CP = cosf(PHI); int p1 = (NX + 1)/2 + 1; int p2 = (NY + 1)/2 + 1; int p3 = (NZ + 1)/2 + 1; // Apply the rotation, nearest neighbor resampling // int xx = roundf(x*CT + z*ST - CT*p1 + p1 - ST*p3); // if (xx > NX || xx < 1) return; // int yy = roundf(- x*SP*ST + y*CP + z*SP*CT + SP*ST*p1 - CP*p2 - SP*CT*p3 + p2); // if (yy > NY || yy < 1) return; // int zz = roundf(- x*CP*ST - y*SP + z*CP*CT + CP*ST*p1 + SP*p2 - CP*CT*p3 + p3); // if (zz > NZ || zz < 1) return; /* * T1.inv * T2.inv * T3.inv * T4.inv */ // int xx = roundf( x*CT - y*ST*SP + z*ST*CP - p1*CT + p2*ST*SP - p3*ST*CP + p1); // if (xx > NX || xx < 1) return; // int yy = roundf( y*CP + z*SP - p2*CP - p3*SP + p2 ); // if (yy > NY || yy < 1) return; // int zz = roundf( - x*ST - y*CT*SP + z*CT*CP + p1*ST + p2*CT*SP - CT*CP*p3 + p3 ); // if (zz > NZ || zz < 1) return; /* * T4 * T2 * T3 * T1 */ int xx = roundf( x*CT - y*ST*SP - z*ST*CP - p1*CT + p2*ST*SP + p3*ST*CP + p1); if (xx > NX || xx < 1) return; int yy = roundf( y*CP - z*SP - p2*CP + p3*SP + p2 ); if (yy > NY || yy < 1) return; int zz = roundf( x*ST + y*CT*SP + z*CT*CP - p1*ST - p2*CT*SP - CT*CP*p3 + p3 ); if (zz > NZ || zz < 1) return; // int xx = my_roundf( x*CT - z*ST*CP - p1*CT + p3*ST + p1); // if (xx > NX || xx < 1) return; // int yy = my_roundf( - x*SP*ST + y*CP - z*SP*CT + p1*SP*ST - p2*CP + p3*SP*CT + p2 ); // if (yy > NY || yy < 1) return; // int zz = my_roundf( x*CP*ST + y*SP + z*CP*CT - p1*CP*ST - p2*SP - p3*CP*CT + p3 ); // if (zz > NZ || zz < 1) return; uint idx = ((zz - 1) << (2 * POW)) + ((yy - 1) << POW) + xx - 1; // float voxel = tex3D( tex, x - 1, y - 1, z - 1 ); float voxel = d_input[ idx ]; d_output[tid] = voxel; } __global__ void d_render_cumulate(float *d_output, float *d_input, float ST, float CT, float SP, float CP ) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int z = tid / (NX * NY) + 1; int x = tid % NX + 1; int y = ( tid % (NX * NY) ) / NX + 1; // float ST = sinf(theta); // float CT = cosf(theta); // float SP = sinf(phi); // float CP = cosf(phi); int p1 = (NX + 1)/2 + 1; int p2 = (NY + 1)/2 + 1; int p3 = (NZ + 1)/2 + 1; // Apply the rotation, nearest neighbor // int xx = my_roundf( x*CT + z*ST - CT*p1 + p1 - ST*p3 ); // if (xx > NX || xx < 1) return; // int yy = my_roundf(- x*SP*ST + y*CP + z*SP*CT + SP*ST*p1 - CP*p2 - SP*CT*p3 + p2); // if (yy > NY || yy < 1) return; // int zz = my_roundf(- x*CP*ST - y*SP + z*CP*CT + CP*ST*p1 + SP*p2 - CP*CT*p3 + p3); // if (zz > NZ || zz < 1) return; // Apply the rotation, nearest neighbor // int xx = roundf( x*CT - y*ST*SP - z*ST*CP - p1*CT + p2*ST*SP + p3*ST*CP + p1); // if (xx > NX || xx < 1) return; // int yy = roundf( y*CP - z*SP - p2*CP + p3*SP + p2 ); // if (yy > NY || yy < 1) return; // int zz = roundf( x*ST + y*CT*SP + z*CT*CP - p1*ST - p2*CT*SP - CT*CP*p3 + p3 ); // if (zz > NZ || zz < 1) return; /* * Apply inverse rotation and find the nearest neighbor in the * ORIGINAL image instead of in the output image. */ int xx = roundf( x*CT - z*ST - p1*CT + p3*ST + p1); if (xx > NX || xx < 1) return; int yy = roundf( - x*SP*ST + y*CP - z*SP*CT + p1*SP*ST - p2*CP + p3*SP*CT + p2 ); if (yy > NY || yy < 1) return; int zz = roundf( x*CP*ST + y*SP + z*CP*CT - p1*CP*ST - p2*SP - p3*CP*CT + p3 ); if (zz > NZ || zz < 1) return; // if (xx > NX || xx < 1 || yy > NY || yy < 1 || zz > NZ || zz < 1) { // return; // } uint idx = ((zz - 1) << (2 * POW)) + ((yy - 1) << POW) + xx - 1; // float voxel = tex3D( tex, x - 1, y - 1, z - 1 ); float voxel = d_input[ idx ]; d_output[tid] = voxel; // Apply a texture lookup // d_output[tid] = voxel; // atomicAdd( &d_output[tid], voxel ); } __global__ void Multiply_complex(SimPixelType* image_in, SimPixelType* image_in2) { int tid = threadIdx.x + blockIdx.x * blockDim.x; // int idx = tid % (128 * 128 * 2); SimPixelType c1_real = image_in[tid*2]; SimPixelType c1_imag = image_in[tid*2+1]; SimPixelType c2_real = image_in2[tid*2]; SimPixelType c2_imag = image_in2[tid*2+1]; image_in[tid*2] = c1_real * c2_real - c1_imag * c2_imag; image_in[tid*2+1] = c1_real * c2_imag + c1_imag * c2_real; } int main() { cudaDeviceReset(); /* Create couple of images for testing */ SimPixelType *x = new SimPixelType[NX * NY]; SimPixelType *y = new SimPixelType[NX * NY]; SimPixelType *in = new SimPixelType[NX * NY * NZ]; /* A vector holding multiple images data */ vector< SimPixelType* > image_vector; vector< SimPixelType* > dev_pointers_in; vector< SimPixelType* > dev_pointers_out; vector< SimPixelType* > imageOut_vector; vector< SimPixelType* > mult_image_vector; /* Create Fourier Kernel plan */ cufftHandle planr2c[NUM_IMAGES]; cufftHandle planc2r[NUM_IMAGES]; /* Create an array of CUDA streams */ cudaStream_t streams_fft[NUM_IMAGES]; /* Output image */ complex<SimPixelType> *out = new complex<SimPixelType>[NX * NY * NZ]; gpuErrchk( cudaHostRegister( out, sizeof(SimPixelType)*NX*NY*NZ*2, cudaHostRegisterPortable ) ); // complex<SimPixelType>* out; // gpuErrchk( cudaMallocHost( &out, NX * NY * NZ * sizeof(SimPixelType) * 2 ) ); /* Initialize it */ memset( out, 0, sizeof(SimPixelType)*NX*NY*NZ*2 ); /* Create the second argument image in the multiply kernel */ SimPixelType* OTF = new SimPixelType[NX * NY * NZ * 2]; // Since the image is complex SimPixelType* dev_OTF; for (int p = 0; p < NZ; p++) { for(int j = 0; j < NY; j++) { for(int kk = 0; kk < NX; kk++) { OTF[(j * NX + kk) * 2] = kk + j; OTF[(j * NX + kk) * 2 + 1] = kk + j; } } } /* Reserve memory locations for the OTF image */ gpuErrchk( cudaMalloc( &dev_OTF, sizeof(SimPixelType)*NX*NY*NZ*2 ) ); gpuErrchk( cudaHostRegister( OTF, sizeof(SimPixelType)*NX*NY*NZ*2, cudaHostRegisterPortable ) ); for (unsigned i = 0; i < NUM_IMAGES; i++) { SimPixelType *vx = new SimPixelType[NX * NY * NZ]; SimPixelType *mult_image = new SimPixelType[NX * NY * NZ]; // SimPixelType* vx; // cudaMallocHost( &vx, NX * NY * NZ * sizeof(SimPixelType) ); for (int p = 0; p < NZ; p++) { for(int j = 0; j < NY; j++){ for(int kk = 0; kk < NX; kk++){ x[j * NX + kk] = kk * LX/NX; y[j * NX + kk] = kk * LY/NY; /* Put values in the new images */ // if (p == 64) vx[j * NX + kk + p * NX * NY] = cos(x[j * NX + kk] + y[j * NX + kk]); if ( i == 0 ) { in[j * NX + kk + p * NX * NY] = cos(x[j * NX + kk] + y[j * NX + kk]); } } } } t1 = absoluteTime(); gpuErrchk( cudaHostRegister( vx, sizeof(SimPixelType)*NX*NY*NZ, cudaHostRegisterPortable ) ); gpuErrchk( cudaHostRegister( mult_image, sizeof(SimPixelType)*NX*NY*NZ, cudaHostRegisterPortable ) ); t2 = absoluteTime(); std::cout << "\n\n Register time: " << (float)(t2-t1)/1000000 << "ms" << std::endl; // for (int j = 0; j < NY; j++){ // for (int i = 0; i < NX; i++){ // // printf("%.3f ", vx[j*NX + i]/(NX*NY)); // cout << vx[j * NX + i] << " "; // } // // printf("\n"); // cout << endl; // } // cout << endl; /* Allocate some spaces on the device */ SimPixelType *d_vx; SimPixelType *d_out; /* Some space on the device */ gpuErrchk(cudaMalloc(&d_vx, NX * NY * NZ * sizeof(SimPixelType))); gpuErrchk(cudaMalloc(&d_out, NX * NY * NZ * sizeof(cufftReal))); /* Create cufft FFT plans */ int n[2] = {NX, NY}; int inembed[] = {NX, NY}; int onembed[] = {NX, NY}; /* Forward Fourier Transform plan */ cufftPlanMany(&planr2c[i], 2, // rank n, // dimension inembed, 1, // istride NX * NY, // idist onembed, 1, //ostride NX * NY, // odist CUFFT_R2C, NZ); /* Inverse Fourier Transform plan */ cufftPlanMany(&planc2r[i], 2, // rank n, // dimension onembed, 1, // istride NX * NY, // idist inembed, 1, //ostride NX * NY, // odist CUFFT_C2R, NZ); cufftSetCompatibilityMode(planr2c[i], CUFFT_COMPATIBILITY_NATIVE); cufftSetCompatibilityMode(planc2r[i], CUFFT_COMPATIBILITY_NATIVE); /* Create streams associated with this 2 plans */ gpuErrchk( cudaStreamCreate( &streams_fft[i] )); cufftSetStream( planr2c[i], streams_fft[i] ); // gpuErrchk( cudaStreamCreate(&streams_ifft[i]) ); // cufftSetStream(&planc2r[i]); image_vector.push_back( vx ); mult_image_vector.push_back( mult_image ); dev_pointers_in.push_back( d_vx ); dev_pointers_out.push_back( d_out ); } /* Copying data to the device for processing */ // cudaMemcpy(d_vx, vx, NX * NY * sizeof(cufftDoubleReal), cudaMemcpyHostToDevice); // cudaMemcpy(d_out, out, NX * NY * sizeof(cufftDoubleComplex), cudaMemcpyHostToDevice); gpuErrchk( cudaMemcpyAsync( dev_OTF, OTF, 2*NX*NY*NZ*sizeof(SimPixelType), cudaMemcpyHostToDevice, streams_fft[0] ) ); for (unsigned int j = 0; j < NUM_IMAGES; j++ ) { gpuErrchk( cudaMemcpyAsync( dev_pointers_in[j], image_vector[j], NX*NY*NZ*sizeof(SimPixelType), cudaMemcpyHostToDevice, streams_fft[j]) ); gpuErrchk( cudaMemcpyAsync( dev_pointers_out[j], out, NX*NY*NZ*sizeof(cufftReal), cudaMemcpyHostToDevice, streams_fft[j] ) ); } /* * Apply the rotation */ /* Create texture array */ // for (unsigned int j = 0; j < NUM_IMAGES; j++) { // gpuErrchk( cudaStreamSynchronize( streams_fft[j] ) ); // } // cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); // cudaArray *d_volumeArray = 0; // const cudaExtent volumeSize = make_cudaExtent(128, 128, 128); // size_t size = volumeSize.width*volumeSize.height*volumeSize.depth; // gpuErrchk( cudaMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize) ); // cudaMemcpy3DParms copyParams = {0}; // copyParams.srcPtr = make_cudaPitchedPtr( dev_pointers_in[0], // volumeSize.width*sizeof(float), // volumeSize.width, // volumeSize.height // ); // copyParams.dstArray = d_volumeArray; // copyParams.extent = volumeSize; // copyParams.kind = cudaMemcpyDeviceToDevice; // gpuErrchk( cudaMemcpy3D( &copyParams) ); // tex.normalized = false; // tex.filterMode = cudaFilterModePoint; // Filtering mode // tex.addressMode[0] = cudaAddressModeBorder; // tex.addressMode[1] = cudaAddressModeBorder; // tex.addressMode[2] = cudaAddressModeBorder; // gpuErrchk(cudaBindTextureToArray(tex, d_volumeArray, channelDesc)); t1 = absoluteTime(); for (unsigned int j = 0; j < NUM_IMAGES; j++) { // cufftExecD2Z( planr2c[j], // (SimPixelType*)dev_pointers_in[j], // (cufftDoubleComplex*)dev_pointers_out[j]); // Multiply_complex<<< NX*NY*NZ/512, 512, 0, streams_fft[j] >>>( dev_pointers_out[j], // dev_OTF // ); // Obtain data from texture memory d_render_cumulate<<< NX*NY*NZ/256, 256, 0, streams_fft[j] >>>( dev_pointers_out[j], dev_pointers_in[j], sin(-THETA), cos(-THETA), sin(-PHI), cos(-PHI) ); /* CUDA rotation */ } // for (unsigned int j = 0; j < NUM_IMAGES; j++) { // cufftSetStream(planc2r[j], streams_fft[j]); // } // // for (unsigned int j = 0; j < NUM_IMAGES; j++) { // cufftExecZ2D( planc2r[j], (cufftDoubleComplex*)dev_pointers_out[j], (SimPixelType*)dev_pointers_in[j]); // } for (unsigned int j = 0; j < NUM_IMAGES; j++) { gpuErrchk( cudaMemcpyAsync( mult_image_vector[j], dev_pointers_out[j], NX*NY*NZ*sizeof(SimPixelType), cudaMemcpyDeviceToHost, streams_fft[j] ) ); } for (unsigned int j = 0; j < NUM_IMAGES; j++) { gpuErrchk( cudaStreamSynchronize( streams_fft[j] ) ); } t2 = absoluteTime(); std::cout << "\n\n Streaming time: " << (float)(t2-t1)/1000000 << "ms" << std::endl; t1 = absoluteTime(); for (unsigned int j = 0; j < NUM_IMAGES; j++) { gpuErrchk( cudaHostUnregister(image_vector[j]) ); // gpuErrchk( cudaFreeHost(image_vector[j]) ); } gpuErrchk( cudaHostUnregister(OTF) ); gpuErrchk( cudaHostUnregister(out) ); // gpuErrchk( cudaFreeHost( out ) ); t2 = absoluteTime(); std::cout << "\n\n Host Unregister time: " << (float)(t2-t1)/1000000 << "ms" << std::endl; // for (int j = 0; j < NY; j++){ // for (int i = 0; i < NX; i++){ // // printf("%.3f ", vx[j*NX + i]/(NX*NY)); // // SimPixelType* vx = image_vector[1]; // // cout << image_vector[0][j * NX + i]/( NX * NY ) << " "; // cout << complex_array[j * NX + i] << " "; // } // // printf("\n"); // cout << endl; // } // cout << endl; for (int j = 0; j < NY; j++){ for (int i = 0; i < NX; i++){ // printf("%.3f ", vx[j*NX + i]/(NX*NY)); printf("% .2e ", mult_image_vector[0][ 0 * NX * NY + j * NX + i]); } // printf("\n"); cout << endl; } float* buffer = (float*) malloc( sizeof(float) * NX * NY ); float* dev_buf; memcpy( buffer, mult_image_vector[0], sizeof(float) * NX * NY ); gpuErrchk( cudaMalloc( &dev_buf, sizeof(float) * NX * NY ) ); gpuErrchk( cudaMemcpy( dev_buf, buffer, sizeof(float) * NX * NY, cudaMemcpyHostToDevice ) ); cufftShift_2D_impl( dev_buf, NX, NY ); gpuErrchk( cudaMemcpy( buffer, dev_buf, sizeof(float) * NX * NY, cudaMemcpyDeviceToHost ) ); printf("Shifted output:\n"); for (int j = 0; j < NY; j++){ for (int i = 0; i < NX; i++){ // printf("%.3f ", vx[j*NX + i]/(NX*NY)); printf("% .2e ", buffer[ j * NX + i]); } // printf("\n"); cout << endl; } /* * Output an image for testing */ store_mha/*<double>*/( mult_image_vector[0],// input image 3, // dim NX,// h NY,// w NZ,// d "./tex_image.mha" // output dest ); for (unsigned int j = 0; j < NUM_IMAGES; j++) { gpuErrchk( cudaFree( dev_pointers_in[j] ) ); gpuErrchk( cudaFree( dev_pointers_out[j] ) ); cudaStreamDestroy( streams_fft[j] ); // gpuErrchk( cudaFreeArray( d_volumeArray ) ); delete[] image_vector[j]; delete[] mult_image_vector[j]; } gpuErrchk( cudaFree( dev_OTF ) ); delete[] OTF; delete[] out; delete[] x; delete[] y; free( buffer ); cudaDeviceReset(); // cufftPlan2d(&planr2c, NY, NX, CUFFT_D2Z); // cufftPlan2d(&planc2r, NY, NX, CUFFT_Z2D); // cufftExecD2Z(planr2c, (cufftDoubleReal *)d_vx, (cufftDoubleComplex *)d_out); // cufftExecZ2D(planc2r, (cufftDoubleComplex *)d_out, (cufftDoubleReal *)d_vx); /* Copy results back from the device */ // cudaMemcpy(vx, d_vx, NX * NY * sizeof(cufftDoubleReal), cudaMemcpyDeviceToHost); // int count = 0; // cudaDeviceProp prop; // int dev_id; // // determining how many devices are available to use on the computer // HANDLE_ERROR( cudaGetDeviceCount( &count ) ); // printf("There are %d device(s) on this computer.\n", count); // // Iterates through each of the device on this computer // printDevInfo(count, prop); return 0; } #pragma clang diagnostic pop
b20157f3cf2b50f3cf208a9637b15fc585b78b04.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <string> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <opencv2\core.hpp> #include <opencv2\highgui.hpp> #include <opencv2\imgcodecs.hpp> #include <opencv2\imgproc.hpp> #define MAX_THREADS 32 using namespace std; using namespace cv; __global__ void enlarge(float* src, size_t inputPitch, int rows, int cols, float* dst, size_t outputPitch, float rowRatio, float colRatio) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; if (row < rows&&col < cols) { // get 4 adjacent pixel points back float* q11 = (float*)((char*)src + ((int)(row*colRatio))*inputPitch) + (int)(col*rowRatio); float* q12 = (float*)((char*)src + (((int)(row*colRatio) + 1))*inputPitch) + (int)(col*rowRatio); float* q21 = (float*)((char*)src + ((int)(row*colRatio))*inputPitch) + (int)(col*rowRatio) + 1; float* q22 = (float*)((char*)src + (((int)(row*colRatio) + 1))*inputPitch) + (int)(col*rowRatio) + 1; // Bilinear Interpolation float* outputPixel = (float*)((char*)dst + row*outputPitch) + col; *outputPixel = (1 - rowRatio)*(1 - colRatio)*(*q11) + (1 - rowRatio)*colRatio*(*q12) + rowRatio*(1 - colRatio)*(*q21) + rowRatio*colRatio*(*q22); } } void resizeImage(const Mat & input, Mat & output) { float rowRatio = (float)input.rows / (float)output.rows; float colRatio = (float)input.cols / (float)output.cols; // define block size and thread size dim3 blockSize(output.cols / MAX_THREADS + 1, output.rows / MAX_THREADS + 1); dim3 threadSize(MAX_THREADS, MAX_THREADS); hipStream_t inputStream, outputStream; hipStreamCreate(&inputStream); hipStreamCreate(&outputStream); size_t inputPitch, outputPitch; float* src; float* dst; hipMallocPitch(&src, &inputPitch, sizeof(float)*input.cols, input.rows); hipMemcpy2DAsync(src, inputPitch, input.data, sizeof(float)*input.cols, sizeof(float)*input.cols, input.rows, hipMemcpyHostToDevice, inputStream); hipMallocPitch(&dst, &outputPitch, sizeof(float)*output.cols, output.rows); hipMemcpy2DAsync(dst, outputPitch, output.data, sizeof(float)*output.cols, sizeof(float)*output.cols, output.rows, hipMemcpyHostToDevice, outputStream); hipStreamSynchronize(inputStream); hipStreamSynchronize(outputStream); hipLaunchKernelGGL(( enlarge), dim3(blockSize), dim3(threadSize) , 0, 0, src, inputPitch, output.rows, output.cols, dst, outputPitch, rowRatio, colRatio); hipError_t error = hipDeviceSynchronize(); if (error != hipSuccess) { cout << hipGetErrorString(error) << endl; } hipMemcpy2D(output.data, sizeof(float)*output.cols, dst, outputPitch, sizeof(float)*output.cols, output.rows, hipMemcpyDeviceToHost); // resource releasing hipStreamDestroy(inputStream); hipStreamDestroy(outputStream); hipFree(src); hipFree(dst); } int main() { string path = "type-c.jpg"; Mat img = imread(path, IMREAD_GRAYSCALE); img.convertTo(img, CV_32F); float alpha = 2; Mat result(Size(img.cols*alpha, img.rows*alpha), CV_32F, Scalar(0)); Mat cpuResult; hipEvent_t start, end; hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start); resizeImage(img, result); hipEventRecord(end); hipEventSynchronize(end); float time; hipEventElapsedTime(&time, start, end); cout << "time cost co GPU: " << time << " ms." << endl; hipEventDestroy(start); hipEventDestroy(end); // test opencv api double cpuStart = (double)getTickCount(); resize(img, cpuResult, Size(img.cols*alpha, img.rows*alpha)); double cpuEnd = (double)getTickCount(); double cpuTime = (cpuEnd - cpuStart) / getTickCount(); cout << "time cost co CPU: " << cpuTime * 1000 << " ms." << endl; cpuResult.convertTo(cpuResult, CV_8U); string title = "CUDA"; namedWindow(title); /* need to convert to CV_8U type, because a CV_32F image, whose pixel value ranges from 0.0 to 1.0 http://stackoverflow.com/questions/14539498/change-type-of-mat-object-from-cv-32f-to-cv-8u */ result.convertTo(result, CV_8U); imshow(title, result); waitKey(0); imwrite("bigger.jpg", result); return 0; }
b20157f3cf2b50f3cf208a9637b15fc585b78b04.cu
#include <stdio.h> #include <iostream> #include <string> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <opencv2\core.hpp> #include <opencv2\highgui.hpp> #include <opencv2\imgcodecs.hpp> #include <opencv2\imgproc.hpp> #define MAX_THREADS 32 using namespace std; using namespace cv; __global__ void enlarge(float* src, size_t inputPitch, int rows, int cols, float* dst, size_t outputPitch, float rowRatio, float colRatio) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; if (row < rows&&col < cols) { // get 4 adjacent pixel points back float* q11 = (float*)((char*)src + ((int)(row*colRatio))*inputPitch) + (int)(col*rowRatio); float* q12 = (float*)((char*)src + (((int)(row*colRatio) + 1))*inputPitch) + (int)(col*rowRatio); float* q21 = (float*)((char*)src + ((int)(row*colRatio))*inputPitch) + (int)(col*rowRatio) + 1; float* q22 = (float*)((char*)src + (((int)(row*colRatio) + 1))*inputPitch) + (int)(col*rowRatio) + 1; // Bilinear Interpolation float* outputPixel = (float*)((char*)dst + row*outputPitch) + col; *outputPixel = (1 - rowRatio)*(1 - colRatio)*(*q11) + (1 - rowRatio)*colRatio*(*q12) + rowRatio*(1 - colRatio)*(*q21) + rowRatio*colRatio*(*q22); } } void resizeImage(const Mat & input, Mat & output) { float rowRatio = (float)input.rows / (float)output.rows; float colRatio = (float)input.cols / (float)output.cols; // define block size and thread size dim3 blockSize(output.cols / MAX_THREADS + 1, output.rows / MAX_THREADS + 1); dim3 threadSize(MAX_THREADS, MAX_THREADS); cudaStream_t inputStream, outputStream; cudaStreamCreate(&inputStream); cudaStreamCreate(&outputStream); size_t inputPitch, outputPitch; float* src; float* dst; cudaMallocPitch(&src, &inputPitch, sizeof(float)*input.cols, input.rows); cudaMemcpy2DAsync(src, inputPitch, input.data, sizeof(float)*input.cols, sizeof(float)*input.cols, input.rows, cudaMemcpyHostToDevice, inputStream); cudaMallocPitch(&dst, &outputPitch, sizeof(float)*output.cols, output.rows); cudaMemcpy2DAsync(dst, outputPitch, output.data, sizeof(float)*output.cols, sizeof(float)*output.cols, output.rows, cudaMemcpyHostToDevice, outputStream); cudaStreamSynchronize(inputStream); cudaStreamSynchronize(outputStream); enlarge<<<blockSize, threadSize >>>(src, inputPitch, output.rows, output.cols, dst, outputPitch, rowRatio, colRatio); cudaError_t error = cudaDeviceSynchronize(); if (error != cudaSuccess) { cout << cudaGetErrorString(error) << endl; } cudaMemcpy2D(output.data, sizeof(float)*output.cols, dst, outputPitch, sizeof(float)*output.cols, output.rows, cudaMemcpyDeviceToHost); // resource releasing cudaStreamDestroy(inputStream); cudaStreamDestroy(outputStream); cudaFree(src); cudaFree(dst); } int main() { string path = "type-c.jpg"; Mat img = imread(path, IMREAD_GRAYSCALE); img.convertTo(img, CV_32F); float alpha = 2; Mat result(Size(img.cols*alpha, img.rows*alpha), CV_32F, Scalar(0)); Mat cpuResult; cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); resizeImage(img, result); cudaEventRecord(end); cudaEventSynchronize(end); float time; cudaEventElapsedTime(&time, start, end); cout << "time cost co GPU: " << time << " ms." << endl; cudaEventDestroy(start); cudaEventDestroy(end); // test opencv api double cpuStart = (double)getTickCount(); resize(img, cpuResult, Size(img.cols*alpha, img.rows*alpha)); double cpuEnd = (double)getTickCount(); double cpuTime = (cpuEnd - cpuStart) / getTickCount(); cout << "time cost co CPU: " << cpuTime * 1000 << " ms." << endl; cpuResult.convertTo(cpuResult, CV_8U); string title = "CUDA"; namedWindow(title); /* need to convert to CV_8U type, because a CV_32F image, whose pixel value ranges from 0.0 to 1.0 http://stackoverflow.com/questions/14539498/change-type-of-mat-object-from-cv-32f-to-cv-8u */ result.convertTo(result, CV_8U); imshow(title, result); waitKey(0); imwrite("bigger.jpg", result); return 0; }
2a0d00c5fc14c8099440435dee052bde4b5ebe57.hip
// !!! This is a file automatically generated by hipify!!! /**************************************************************************\ | | Copyright (C) 2009 Marc Stevens | | This program is free software: you can redistribute it and/or modify | it under the terms of the GNU General Public License as published by | the Free Software Foundation, either version 3 of the License, or | (at your option) any later version. | | This program is distributed in the hope that it will be useful, | but WITHOUT ANY WARRANTY; without even the implied warranty of | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | GNU General Public License for more details. | | You should have received a copy of the GNU General Public License | along with this program. If not, see <http://www.gnu.org/licenses/>. | \**************************************************************************/ #include <iostream> #include <vector> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> #include <hip/hip_runtime.h> #include <boost/cstdint.hpp> using namespace std; typedef boost::uint32_t uint32; typedef boost::uint64_t uint64; #define MAX_CUDA_THREADS (1<<20) #define REGISTERS_PER_CUDA_THREAD 32 #define TRAIL_NOCONSTRUCTOR #include "birthday_types.hpp" #ifndef CUDA_SAFE_CALL #define CUDA_SAFE_CALL(s) (s) #endif #ifndef cutilSafeCall #define cutilSafeCall(s) (s) #endif class cuda_device_detail { public: uint32 device; uint32 blocks; uint32 threadsperblock; trail_type* buffer_host; }; /* We assume that these are _thread specific_ (instead of global) storage managed by the cuda realtime libraries */ __device__ trail_type working_states2[MAX_CUDA_THREADS]; __device__ trail_type buffer2[MAX_CUDA_THREADS]; __constant__ uint32 msg1[16], msg2[16], ihv1[4], ihv2[4], ihv2mod[4]; __constant__ uint32 precomp1[4], precomp2[4]; __constant__ uint32 hybridmask, distinguishedpointmask, maximumpathlength; /* F, G and H are basic MD5 functions: selection, majority, parity */ #define MD5_F(x, y, z) (((x) & (y)) | ((~x) & (z))) #define MD5_G(x, y, z) (((x) & (z)) | ((y) & (~z))) #define MD5_H(x, y, z) ((x) ^ (y) ^ (z)) #define MD5_I(x, y, z) ((y) ^ ((x) | (~z))) /* ROTATE_LEFT rotates x left n bits */ #define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n)))) /* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */ /* Rotation is separate from addition to prevent recomputation */ #define MD5_FF(a, b, c, d, x, s, ac) \ {(a) += MD5_F ((b), (c), (d)) + (x) + (uint32)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } #define MD5_GG(a, b, c, d, x, s, ac) \ {(a) += MD5_G ((b), (c), (d)) + (x) + (uint32)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } #define MD5_HH(a, b, c, d, x, s, ac) \ {(a) += MD5_H ((b), (c), (d)) + (x) + (uint32)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } #define MD5_II(a, b, c, d, x, s, ac) \ {(a) += MD5_I ((b), (c), (d)) + (x) + (uint32)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } __global__ void cuda_md5_init() { int idx = blockIdx.x * blockDim.x + threadIdx.x; working_states2[idx].len = 0; buffer2[idx].len = 0; } bool cuda_device::init(uint32 device, const uint32 ihv1b[4], const uint32 ihv2b[4], const uint32 ihv2modb[4], const uint32 msg1b[16], const uint32 msg2b[16], uint32 hmask, uint32 dpmask, uint32 maxlen) { detail = new cuda_device_detail; detail->device = device; int deviceCount; CUDA_SAFE_CALL( hipGetDeviceCount(&deviceCount) ); if (deviceCount == 0) { cout << "There is no device supporting CUDA!" << endl; return false; } hipDeviceProp_t deviceProp; CUDA_SAFE_CALL( hipGetDeviceProperties(&deviceProp, device) ); if (deviceProp.major == 9999) { cout << "Emulation device found." << endl; return false; } cout << "CUDA device " << device << ": " << deviceProp.name << " (" << deviceProp.multiProcessorCount << " MPs)" << endl; unsigned maxthreadspermp = deviceProp.maxThreadsPerMultiProcessor; if (maxthreadspermp > MAX_CUDA_THREADS) maxthreadspermp = (MAX_CUDA_THREADS/32)*32; while (maxthreadspermp > deviceProp.regsPerMultiprocessor * REGISTERS_PER_CUDA_THREAD) maxthreadspermp -= 32; unsigned minblockspermp = 1; while (maxthreadspermp > minblockspermp * deviceProp.maxThreadsPerBlock) minblockspermp += 1; while (maxthreadspermp * REGISTERS_PER_CUDA_THREAD > minblockspermp * deviceProp.regsPerBlock) minblockspermp += 1; detail->threadsperblock = ((maxthreadspermp / minblockspermp) / 32) * 32; detail->blocks = minblockspermp * deviceProp.multiProcessorCount; cout << "Using " << detail->blocks << " blocks with " << detail->threadsperblock << " threads each: total " << detail->blocks * detail->threadsperblock << " threads." << endl; CUDA_SAFE_CALL( hipSetDevice(device) ); CUDA_SAFE_CALL( hipSetDeviceFlags( hipDeviceScheduleBlockingSync ) ); CUDA_SAFE_CALL( hipHostMalloc( (void**)(&(detail->buffer_host)), 122880 * sizeof(trail_type) ) ); uint32 pc1[4], pc2[4]; uint32 a = ihv1b[0], b = ihv1b[1], c = ihv1b[2], d = ihv1b[3]; MD5_FF ( a, b, c, d, msg1b[ 0], 7, 3614090360); /* 1 */ MD5_FF ( d, a, b, c, msg1b[ 1], 12, 3905402710); /* 2 */ MD5_FF ( c, d, a, b, msg1b[ 2], 17, 606105819); /* 3 */ MD5_FF ( b, c, d, a, msg1b[ 3], 22, 3250441966); /* 4 */ MD5_FF ( a, b, c, d, msg1b[ 4], 7, 4118548399); /* 5 */ MD5_FF ( d, a, b, c, msg1b[ 5], 12, 1200080426); /* 6 */ MD5_FF ( c, d, a, b, msg1b[ 6], 17, 2821735955); /* 7 */ MD5_FF ( b, c, d, a, msg1b[ 7], 22, 4249261313); /* 8 */ MD5_FF ( a, b, c, d, msg1b[ 8], 7, 1770035416); /* 9 */ MD5_FF ( d, a, b, c, msg1b[ 9], 12, 2336552879); /* 10 */ MD5_FF ( c, d, a, b, msg1b[10], 17, 4294925233); /* 11 */ MD5_FF ( b, c, d, a, msg1b[11], 22, 2304563134); /* 12 */ MD5_FF ( a, b, c, d, msg1b[12], 7, 1804603682); /* 13 */ pc1[0] = a; pc1[1] = b; pc1[2] = c; pc1[3] = d; a = ihv2b[0]; b = ihv2b[1]; c = ihv2b[2]; d = ihv2b[3]; MD5_FF ( a, b, c, d, msg2b[ 0], 7, 3614090360); /* 1 */ MD5_FF ( d, a, b, c, msg2b[ 1], 12, 3905402710); /* 2 */ MD5_FF ( c, d, a, b, msg2b[ 2], 17, 606105819); /* 3 */ MD5_FF ( b, c, d, a, msg2b[ 3], 22, 3250441966); /* 4 */ MD5_FF ( a, b, c, d, msg2b[ 4], 7, 4118548399); /* 5 */ MD5_FF ( d, a, b, c, msg2b[ 5], 12, 1200080426); /* 6 */ MD5_FF ( c, d, a, b, msg2b[ 6], 17, 2821735955); /* 7 */ MD5_FF ( b, c, d, a, msg2b[ 7], 22, 4249261313); /* 8 */ MD5_FF ( a, b, c, d, msg2b[ 8], 7, 1770035416); /* 9 */ MD5_FF ( d, a, b, c, msg2b[ 9], 12, 2336552879); /* 10 */ MD5_FF ( c, d, a, b, msg2b[10], 17, 4294925233); /* 11 */ MD5_FF ( b, c, d, a, msg2b[11], 22, 2304563134); /* 12 */ MD5_FF ( a, b, c, d, msg2b[12], 7, 1804603682); /* 13 */ pc2[0] = a; pc2[1] = b; pc2[2] = c; pc2[3] = d; CUDA_SAFE_CALL( hipMemcpyToSymbol(msg1, msg1b, sizeof(msg1)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(msg2, msg2b, sizeof(msg2)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(ihv1, ihv1b, sizeof(ihv1)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(ihv2, ihv2b, sizeof(ihv2)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(ihv2mod, ihv2modb, sizeof(ihv2mod)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(precomp1, pc1, sizeof(pc1)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(precomp2, pc2, sizeof(pc2)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(hybridmask, &hmask, sizeof(hmask)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(distinguishedpointmask, &dpmask, sizeof(dpmask)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(maximumpathlength, &maxlen, sizeof(maxlen)) ); hipLaunchKernelGGL(( cuda_md5_init), dim3(detail->blocks), dim3(detail->threadsperblock), 0, 0, ); return true; } __global__ void cuda_md5_work(uint64 seed) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; buffer2[idx].len = 0; uint32 len = working_states2[idx].len; uint32 x = working_states2[idx].end[0]; uint32 y = working_states2[idx].end[1]; uint32 z = working_states2[idx].end[2]; if (len >= maximumpathlength || len == 0) { x = uint32(seed>>32) ^ threadIdx.x; y = uint32(seed) ^ blockIdx.x; z = 0; working_states2[idx].start[0] = x; working_states2[idx].start[1] = y; working_states2[idx].start[2] = z; len = 0; } // __syncthreads(); for (unsigned j = 0; j < 0x100; ++j) { { uint32* in = msg1; uint32 a = precomp1[0], b = precomp1[1], c = precomp1[2], d = precomp1[3]; if (x > y) { in = msg2; a = precomp2[0]; b = precomp2[1]; c = precomp2[2]; d = precomp2[3]; } MD5_FF ( d, a, b, c, z, 12, 4254626195); /* 14 */ MD5_FF ( c, d, a, b, x, 17, 2792965006); /* 15 */ MD5_FF ( b, c, d, a, y, 22, 1236535329); /* 16 */ MD5_GG ( a, b, c, d, in[ 1], 5, 4129170786); /* 17 */ MD5_GG ( d, a, b, c, in[ 6], 9, 3225465664); /* 18 */ MD5_GG ( c, d, a, b, in[11], 14, 643717713); /* 19 */ MD5_GG ( b, c, d, a, in[ 0], 20, 3921069994); /* 20 */ MD5_GG ( a, b, c, d, in[ 5], 5, 3593408605); /* 21 */ MD5_GG ( d, a, b, c, in[10], 9, 38016083); /* 22 */ MD5_GG ( c, d, a, b, y, 14, 3634488961); /* 23 */ MD5_GG ( b, c, d, a, in[ 4], 20, 3889429448); /* 24 */ MD5_GG ( a, b, c, d, in[ 9], 5, 568446438); /* 25 */ MD5_GG ( d, a, b, c, x, 9, 3275163606); /* 26 */ MD5_GG ( c, d, a, b, in[ 3], 14, 4107603335); /* 27 */ MD5_GG ( b, c, d, a, in[ 8], 20, 1163531501); /* 28 */ MD5_GG ( a, b, c, d, z, 5, 2850285829); /* 29 */ MD5_GG ( d, a, b, c, in[ 2], 9, 4243563512); /* 30 */ MD5_GG ( c, d, a, b, in[ 7], 14, 1735328473); /* 31 */ MD5_GG ( b, c, d, a, in[12], 20, 2368359562); /* 32 */ MD5_HH ( a, b, c, d, in[ 5], 4, 4294588738); /* 33 */ MD5_HH ( d, a, b, c, in[ 8], 11, 2272392833); /* 34 */ MD5_HH ( c, d, a, b, in[11], 16, 1839030562); /* 35 */ MD5_HH ( b, c, d, a, x, 23, 4259657740); /* 36 */ MD5_HH ( a, b, c, d, in[ 1], 4, 2763975236); /* 37 */ MD5_HH ( d, a, b, c, in[ 4], 11, 1272893353); /* 38 */ MD5_HH ( c, d, a, b, in[ 7], 16, 4139469664); /* 39 */ MD5_HH ( b, c, d, a, in[10], 23, 3200236656); /* 40 */ MD5_HH ( a, b, c, d, z, 4, 681279174); /* 41 */ MD5_HH ( d, a, b, c, in[ 0], 11, 3936430074); /* 42 */ MD5_HH ( c, d, a, b, in[ 3], 16, 3572445317); /* 43 */ MD5_HH ( b, c, d, a, in[ 6], 23, 76029189); /* 44 */ MD5_HH ( a, b, c, d, in[ 9], 4, 3654602809); /* 45 */ MD5_HH ( d, a, b, c, in[12], 11, 3873151461); /* 46 */ MD5_HH ( c, d, a, b, y, 16, 530742520); /* 47 */ MD5_HH ( b, c, d, a, in[ 2], 23, 3299628645); /* 48 */ MD5_II ( a, b, c, d, in[ 0], 6, 4096336452); /* 49 */ MD5_II ( d, a, b, c, in[ 7], 10, 1126891415); /* 50 */ MD5_II ( c, d, a, b, x, 15, 2878612391); /* 51 */ MD5_II ( b, c, d, a, in[ 5], 21, 4237533241); /* 52 */ MD5_II ( a, b, c, d, in[12], 6, 1700485571); /* 53 */ MD5_II ( d, a, b, c, in[ 3], 10, 2399980690); /* 54 */ MD5_II ( c, d, a, b, in[10], 15, 4293915773); /* 55 */ MD5_II ( b, c, d, a, in[ 1], 21, 2240044497); /* 56 */ MD5_II ( a, b, c, d, in[ 8], 6, 1873313359); /* 57 */ MD5_II ( d, a, b, c, y, 10, 4264355552); /* 58 */ MD5_II ( c, d, a, b, in[ 6], 15, 2734768916); /* 59 */ MD5_II ( b, c, d, a, z, 21, 1309151649); /* 60 */ MD5_II ( a, b, c, d, in[ 4], 6, 4149444226); /* 61 */ MD5_II ( d, a, b, c, in[11], 10, 3174756917); /* 62 */ MD5_II ( c, d, a, b, in[ 2], 15, 718787259); /* 63 */ MD5_II ( b, c, d, a, in[ 9], 21, 3951481745); /* 64 */ if (x <= y) { a += ihv1[0]; b += ihv1[1]; c += ihv1[2]; d += ihv1[3]; } else { a += ihv2mod[0]; b += ihv2mod[1]; c += ihv2mod[2]; d += ihv2mod[3]; } x = a; y = d - c; z = (d - b) & hybridmask; ++len; } { if (0 == (x & distinguishedpointmask)) { buffer2[idx].end[0] = x; buffer2[idx].end[1] = y; buffer2[idx].end[2] = z; buffer2[idx].len = len; buffer2[idx].start[0] = working_states2[idx].start[0]; buffer2[idx].start[1] = working_states2[idx].start[1]; buffer2[idx].start[2] = working_states2[idx].start[2]; x = uint32(seed>>32) ^ (threadIdx.x<<16) + len; y = uint32(seed) ^ blockIdx.x; z = 0; len = 0; working_states2[idx].start[0] = x; working_states2[idx].start[1] = y; working_states2[idx].start[2] = z; } } // __syncthreads(); } working_states2[idx].end[0] = x; working_states2[idx].end[1] = y; working_states2[idx].end[2] = z; working_states2[idx].len = len; } __global__ void cuda_md5_workmod(uint64 seed) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; buffer2[idx].len = 0; uint32 len = working_states2[idx].len; uint32 x = working_states2[idx].end[0]; uint32 y = working_states2[idx].end[1]; uint32 z = working_states2[idx].end[2]; if (len >= maximumpathlength || len == 0) { x = uint32(seed>>32) ^ threadIdx.x; y = uint32(seed) ^ blockIdx.x; z = 0; working_states2[idx].start[0] = x; working_states2[idx].start[1] = y; working_states2[idx].start[2] = z; len = 0; } // __syncthreads(); for (unsigned j = 0; j < 0x100; ++j) { { uint32* in = msg1; uint32 a = precomp1[0], b = precomp1[1], c = precomp1[2], d = precomp1[3]; if (x > y) { in = msg2; a = precomp2[0]; b = precomp2[1]; c = precomp2[2]; d = precomp2[3]; } MD5_FF ( d, a, b, c, z, 12, 4254626195); /* 14 */ MD5_FF ( c, d, a, b, x, 17, 2792965006); /* 15 */ MD5_FF ( b, c, d, a, y, 22, 1236535329); /* 16 */ MD5_GG ( a, b, c, d, in[ 1], 5, 4129170786); /* 17 */ MD5_GG ( d, a, b, c, in[ 6], 9, 3225465664); /* 18 */ MD5_GG ( c, d, a, b, in[11], 14, 643717713); /* 19 */ MD5_GG ( b, c, d, a, in[ 0], 20, 3921069994); /* 20 */ MD5_GG ( a, b, c, d, in[ 5], 5, 3593408605); /* 21 */ MD5_GG ( d, a, b, c, in[10], 9, 38016083); /* 22 */ MD5_GG ( c, d, a, b, y, 14, 3634488961); /* 23 */ MD5_GG ( b, c, d, a, in[ 4], 20, 3889429448); /* 24 */ MD5_GG ( a, b, c, d, in[ 9], 5, 568446438); /* 25 */ MD5_GG ( d, a, b, c, x, 9, 3275163606); /* 26 */ MD5_GG ( c, d, a, b, in[ 3], 14, 4107603335); /* 27 */ MD5_GG ( b, c, d, a, in[ 8], 20, 1163531501); /* 28 */ MD5_GG ( a, b, c, d, z, 5, 2850285829); /* 29 */ MD5_GG ( d, a, b, c, in[ 2], 9, 4243563512); /* 30 */ MD5_GG ( c, d, a, b, in[ 7], 14, 1735328473); /* 31 */ MD5_GG ( b, c, d, a, in[12], 20, 2368359562); /* 32 */ MD5_HH ( a, b, c, d, in[ 5], 4, 4294588738); /* 33 */ MD5_HH ( d, a, b, c, in[ 8], 11, 2272392833); /* 34 */ MD5_HH ( c, d, a, b, in[11], 16, 1839030562); /* 35 */ MD5_HH ( b, c, d, a, x, 23, 4259657740); /* 36 */ MD5_HH ( a, b, c, d, in[ 1], 4, 2763975236); /* 37 */ MD5_HH ( d, a, b, c, in[ 4], 11, 1272893353); /* 38 */ MD5_HH ( c, d, a, b, in[ 7], 16, 4139469664); /* 39 */ MD5_HH ( b, c, d, a, in[10], 23, 3200236656); /* 40 */ MD5_HH ( a, b, c, d, z, 4, 681279174); /* 41 */ MD5_HH ( d, a, b, c, in[ 0], 11, 3936430074); /* 42 */ MD5_HH ( c, d, a, b, in[ 3], 16, 3572445317); /* 43 */ MD5_HH ( b, c, d, a, in[ 6], 23, 76029189); /* 44 */ MD5_HH ( a, b, c, d, in[ 9], 4, 3654602809); /* 45 */ MD5_HH ( d, a, b, c, in[12], 11, 3873151461); /* 46 */ MD5_HH ( c, d, a, b, y, 16, 530742520); /* 47 */ MD5_HH ( b, c, d, a, in[ 2], 23, 3299628645); /* 48 */ MD5_II ( a, b, c, d, in[ 0], 6, 4096336452); /* 49 */ MD5_II ( d, a, b, c, in[ 7], 10, 1126891415); /* 50 */ MD5_II ( c, d, a, b, x, 15, 2878612391); /* 51 */ MD5_II ( b, c, d, a, in[ 5], 21, 4237533241); /* 52 */ MD5_II ( a, b, c, d, in[12], 6, 1700485571); /* 53 */ MD5_II ( d, a, b, c, in[ 3], 10, 2399980690); /* 54 */ MD5_II ( c, d, a, b, in[10], 15, 4293915773); /* 55 */ MD5_II ( b, c, d, a, in[ 1], 21, 2240044497); /* 56 */ MD5_II ( a, b, c, d, in[ 8], 6, 1873313359); /* 57 */ MD5_II ( d, a, b, c, y, 10, 4264355552); /* 58 */ MD5_II ( c, d, a, b, in[ 6], 15, 2734768916); /* 59 */ MD5_II ( b, c, d, a, z, 21, 1309151649); /* 60 */ MD5_II ( a, b, c, d, in[ 4], 6, 4149444226); /* 61 */ MD5_II ( d, a, b, c, in[11], 10, 3174756917); /* 62 */ MD5_II ( c, d, a, b, in[ 2], 15, 718787259); /* 63 */ if (x <= y) { x = a + ihv1[0]; y = d + ihv1[3]; z = (c + ihv1[2]) & hybridmask; } else { x = a + ihv2mod[0]; y = d + ihv2mod[3]; z = (c + ihv2mod[2]) & hybridmask; } ++len; } { if (0 == (x & distinguishedpointmask)) { buffer2[idx].end[0] = x; buffer2[idx].end[1] = y; buffer2[idx].end[2] = z; buffer2[idx].len = len; buffer2[idx].start[0] = working_states2[idx].start[0]; buffer2[idx].start[1] = working_states2[idx].start[1]; buffer2[idx].start[2] = working_states2[idx].start[2]; x = uint32(seed>>32) ^ (threadIdx.x<<16) + len; y = uint32(seed) ^ blockIdx.x; z = 0; len = 0; working_states2[idx].start[0] = x; working_states2[idx].start[1] = y; working_states2[idx].start[2] = z; } } // __syncthreads(); } working_states2[idx].end[0] = x; working_states2[idx].end[1] = y; working_states2[idx].end[2] = z; working_states2[idx].len = len; } void cuda_device::cuda_fill_trail_buffer(uint32 id, uint64 seed, vector<trail_type>& buf, vector< pair<trail_type,trail_type> >& collisions, bool mod) { // transfer results hipMemcpyFromSymbol(detail->buffer_host, buffer2, sizeof(trail_type)*detail->blocks*detail->threadsperblock); // start new cuda computation if (!mod) hipLaunchKernelGGL(( cuda_md5_work), dim3(detail->blocks), dim3(detail->threadsperblock), 0, 0, seed); else hipLaunchKernelGGL(( cuda_md5_workmod), dim3(detail->blocks), dim3(detail->threadsperblock), 0, 0, seed); // process and return results buf.clear(); for (unsigned i = 0; i < detail->blocks*detail->threadsperblock; ++i) if (detail->buffer_host[i].len) buf.push_back(detail->buffer_host[i]); } #ifdef _WIN32 #include <windows.h> #else #include <sys/time.h> #endif class timer_detail; class timer { public: timer(bool direct_start = false); ~timer(); void start(); void stop(); double time() const;// get time between start and stop (or now if still running) in seconds bool isrunning() const { return running; } // check if timer is running private: timer_detail* detail; bool running; }; class timer_detail { public: #ifdef _WIN32 LARGE_INTEGER tstart, tend; double freq; #else struct timeval tstart, tend; struct timezone tz; #endif }; timer::~timer() { delete detail; } timer::timer(bool direct_start): running(false) { detail = new timer_detail; #ifdef _WIN32 LARGE_INTEGER tmp_freq; QueryPerformanceFrequency(&tmp_freq); detail->freq = double(tmp_freq.QuadPart); #endif if (direct_start) start(); } #ifdef _WIN32 void timer::start() { running = true; QueryPerformanceCounter(&detail->tstart); } void timer::stop() { QueryPerformanceCounter(&detail->tend); running = false; } double timer::time() const { if (running) { LARGE_INTEGER tmp_end; QueryPerformanceCounter(&tmp_end); return (double(tmp_end.QuadPart) - double(detail->tstart.QuadPart))/detail->freq; } else return (double(detail->tend.QuadPart) - double(detail->tstart.QuadPart))/detail->freq; } #else void timer::start() { running = true; gettimeofday(&detail->tstart, &detail->tz); } void timer::stop() { gettimeofday(&detail->tend, &detail->tz); running = false; } double timer::time() const { double t1 = double(detail->tstart.tv_sec) + (double(detail->tstart.tv_usec)/1e6); if (running) { struct timeval tmp_end; gettimeofday(&tmp_end, &detail->tz); return double(tmp_end.tv_sec) + (double(tmp_end.tv_usec)/1e6) - t1; } else return double(detail->tend.tv_sec) + (double(detail->tend.tv_usec)/1e6) - t1; } #endif void cuda_device::benchmark() { timer sw; for (int blocksize = 4; blocksize <= 256; ++blocksize) for (int threadsize = 250; threadsize <= 257; ++threadsize) { sw.start(); uint64 work = 0; while (sw.time() < 10) { hipLaunchKernelGGL(( cuda_md5_work), dim3(blocksize), dim3(threadsize), 0, 0, 0); hipMemcpyFromSymbol(detail->buffer_host, buffer2, sizeof(trail_type)*blocksize*threadsize); ++work; } uint64 ow = work; work *= 0x400 * blocksize * threadsize; cout << blocksize << "x" << threadsize << ":\t" << work << " (" << ow << ")" << endl; } } int get_num_cuda_devices() { int deviceCount = 0; hipGetDeviceCount(&deviceCount); return deviceCount; } void cuda_device_query() { int deviceCount; cutilSafeCall(hipGetDeviceCount(&deviceCount)); if (deviceCount == 0) printf("There is no device supporting CUDA\n"); int dev; for (dev = 0; dev < deviceCount; ++dev) { hipDeviceProp_t deviceProp; cutilSafeCall(hipGetDeviceProperties(&deviceProp, dev)); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) printf("There is no device supporting CUDA.\n"); else if (deviceCount == 1) printf("There is 1 device supporting CUDA\n"); else printf("There are %d devices supporting CUDA\n", deviceCount); } printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name); printf(" Major revision number: %d\n", deviceProp.major); printf(" Minor revision number: %d\n", deviceProp.minor); printf(" Total amount of global memory: %u bytes\n", deviceProp.totalGlobalMem); #if CUDART_VERSION >= 2000 printf(" Number of multiprocessors: %d\n", deviceProp.multiProcessorCount); printf(" Number of cores: %d\n", 8 * deviceProp.multiProcessorCount); #endif printf(" Total amount of constant memory: %u bytes\n", deviceProp.totalConstMem); printf(" Total amount of shared memory per block: %u bytes\n", deviceProp.sharedMemPerBlock); printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Warp size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" Maximum memory pitch: %u bytes\n", deviceProp.memPitch); printf(" Texture alignment: %u bytes\n", deviceProp.textureAlignment); printf(" Clock rate: %.2f GHz\n", deviceProp.clockRate * 1e-6f); #if CUDART_VERSION >= 2000 printf(" Concurrent copy and execution: %s\n", deviceProp.deviceOverlap ? "Yes" : "No"); #endif } }
2a0d00c5fc14c8099440435dee052bde4b5ebe57.cu
/**************************************************************************\ | | Copyright (C) 2009 Marc Stevens | | This program is free software: you can redistribute it and/or modify | it under the terms of the GNU General Public License as published by | the Free Software Foundation, either version 3 of the License, or | (at your option) any later version. | | This program is distributed in the hope that it will be useful, | but WITHOUT ANY WARRANTY; without even the implied warranty of | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | GNU General Public License for more details. | | You should have received a copy of the GNU General Public License | along with this program. If not, see <http://www.gnu.org/licenses/>. | \**************************************************************************/ #include <iostream> #include <vector> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> #include <cuda.h> #include <boost/cstdint.hpp> using namespace std; typedef boost::uint32_t uint32; typedef boost::uint64_t uint64; #define MAX_CUDA_THREADS (1<<20) #define REGISTERS_PER_CUDA_THREAD 32 #define TRAIL_NOCONSTRUCTOR #include "birthday_types.hpp" #ifndef CUDA_SAFE_CALL #define CUDA_SAFE_CALL(s) (s) #endif #ifndef cutilSafeCall #define cutilSafeCall(s) (s) #endif class cuda_device_detail { public: uint32 device; uint32 blocks; uint32 threadsperblock; trail_type* buffer_host; }; /* We assume that these are _thread specific_ (instead of global) storage managed by the cuda realtime libraries */ __device__ trail_type working_states2[MAX_CUDA_THREADS]; __device__ trail_type buffer2[MAX_CUDA_THREADS]; __constant__ uint32 msg1[16], msg2[16], ihv1[4], ihv2[4], ihv2mod[4]; __constant__ uint32 precomp1[4], precomp2[4]; __constant__ uint32 hybridmask, distinguishedpointmask, maximumpathlength; /* F, G and H are basic MD5 functions: selection, majority, parity */ #define MD5_F(x, y, z) (((x) & (y)) | ((~x) & (z))) #define MD5_G(x, y, z) (((x) & (z)) | ((y) & (~z))) #define MD5_H(x, y, z) ((x) ^ (y) ^ (z)) #define MD5_I(x, y, z) ((y) ^ ((x) | (~z))) /* ROTATE_LEFT rotates x left n bits */ #define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n)))) /* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */ /* Rotation is separate from addition to prevent recomputation */ #define MD5_FF(a, b, c, d, x, s, ac) \ {(a) += MD5_F ((b), (c), (d)) + (x) + (uint32)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } #define MD5_GG(a, b, c, d, x, s, ac) \ {(a) += MD5_G ((b), (c), (d)) + (x) + (uint32)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } #define MD5_HH(a, b, c, d, x, s, ac) \ {(a) += MD5_H ((b), (c), (d)) + (x) + (uint32)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } #define MD5_II(a, b, c, d, x, s, ac) \ {(a) += MD5_I ((b), (c), (d)) + (x) + (uint32)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } __global__ void cuda_md5_init() { int idx = blockIdx.x * blockDim.x + threadIdx.x; working_states2[idx].len = 0; buffer2[idx].len = 0; } bool cuda_device::init(uint32 device, const uint32 ihv1b[4], const uint32 ihv2b[4], const uint32 ihv2modb[4], const uint32 msg1b[16], const uint32 msg2b[16], uint32 hmask, uint32 dpmask, uint32 maxlen) { detail = new cuda_device_detail; detail->device = device; int deviceCount; CUDA_SAFE_CALL( cudaGetDeviceCount(&deviceCount) ); if (deviceCount == 0) { cout << "There is no device supporting CUDA!" << endl; return false; } cudaDeviceProp deviceProp; CUDA_SAFE_CALL( cudaGetDeviceProperties(&deviceProp, device) ); if (deviceProp.major == 9999) { cout << "Emulation device found." << endl; return false; } cout << "CUDA device " << device << ": " << deviceProp.name << " (" << deviceProp.multiProcessorCount << " MPs)" << endl; unsigned maxthreadspermp = deviceProp.maxThreadsPerMultiProcessor; if (maxthreadspermp > MAX_CUDA_THREADS) maxthreadspermp = (MAX_CUDA_THREADS/32)*32; while (maxthreadspermp > deviceProp.regsPerMultiprocessor * REGISTERS_PER_CUDA_THREAD) maxthreadspermp -= 32; unsigned minblockspermp = 1; while (maxthreadspermp > minblockspermp * deviceProp.maxThreadsPerBlock) minblockspermp += 1; while (maxthreadspermp * REGISTERS_PER_CUDA_THREAD > minblockspermp * deviceProp.regsPerBlock) minblockspermp += 1; detail->threadsperblock = ((maxthreadspermp / minblockspermp) / 32) * 32; detail->blocks = minblockspermp * deviceProp.multiProcessorCount; cout << "Using " << detail->blocks << " blocks with " << detail->threadsperblock << " threads each: total " << detail->blocks * detail->threadsperblock << " threads." << endl; CUDA_SAFE_CALL( cudaSetDevice(device) ); CUDA_SAFE_CALL( cudaSetDeviceFlags( cudaDeviceBlockingSync ) ); CUDA_SAFE_CALL( cudaMallocHost( (void**)(&(detail->buffer_host)), 122880 * sizeof(trail_type) ) ); uint32 pc1[4], pc2[4]; uint32 a = ihv1b[0], b = ihv1b[1], c = ihv1b[2], d = ihv1b[3]; MD5_FF ( a, b, c, d, msg1b[ 0], 7, 3614090360); /* 1 */ MD5_FF ( d, a, b, c, msg1b[ 1], 12, 3905402710); /* 2 */ MD5_FF ( c, d, a, b, msg1b[ 2], 17, 606105819); /* 3 */ MD5_FF ( b, c, d, a, msg1b[ 3], 22, 3250441966); /* 4 */ MD5_FF ( a, b, c, d, msg1b[ 4], 7, 4118548399); /* 5 */ MD5_FF ( d, a, b, c, msg1b[ 5], 12, 1200080426); /* 6 */ MD5_FF ( c, d, a, b, msg1b[ 6], 17, 2821735955); /* 7 */ MD5_FF ( b, c, d, a, msg1b[ 7], 22, 4249261313); /* 8 */ MD5_FF ( a, b, c, d, msg1b[ 8], 7, 1770035416); /* 9 */ MD5_FF ( d, a, b, c, msg1b[ 9], 12, 2336552879); /* 10 */ MD5_FF ( c, d, a, b, msg1b[10], 17, 4294925233); /* 11 */ MD5_FF ( b, c, d, a, msg1b[11], 22, 2304563134); /* 12 */ MD5_FF ( a, b, c, d, msg1b[12], 7, 1804603682); /* 13 */ pc1[0] = a; pc1[1] = b; pc1[2] = c; pc1[3] = d; a = ihv2b[0]; b = ihv2b[1]; c = ihv2b[2]; d = ihv2b[3]; MD5_FF ( a, b, c, d, msg2b[ 0], 7, 3614090360); /* 1 */ MD5_FF ( d, a, b, c, msg2b[ 1], 12, 3905402710); /* 2 */ MD5_FF ( c, d, a, b, msg2b[ 2], 17, 606105819); /* 3 */ MD5_FF ( b, c, d, a, msg2b[ 3], 22, 3250441966); /* 4 */ MD5_FF ( a, b, c, d, msg2b[ 4], 7, 4118548399); /* 5 */ MD5_FF ( d, a, b, c, msg2b[ 5], 12, 1200080426); /* 6 */ MD5_FF ( c, d, a, b, msg2b[ 6], 17, 2821735955); /* 7 */ MD5_FF ( b, c, d, a, msg2b[ 7], 22, 4249261313); /* 8 */ MD5_FF ( a, b, c, d, msg2b[ 8], 7, 1770035416); /* 9 */ MD5_FF ( d, a, b, c, msg2b[ 9], 12, 2336552879); /* 10 */ MD5_FF ( c, d, a, b, msg2b[10], 17, 4294925233); /* 11 */ MD5_FF ( b, c, d, a, msg2b[11], 22, 2304563134); /* 12 */ MD5_FF ( a, b, c, d, msg2b[12], 7, 1804603682); /* 13 */ pc2[0] = a; pc2[1] = b; pc2[2] = c; pc2[3] = d; CUDA_SAFE_CALL( cudaMemcpyToSymbol(msg1, msg1b, sizeof(msg1)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(msg2, msg2b, sizeof(msg2)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(ihv1, ihv1b, sizeof(ihv1)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(ihv2, ihv2b, sizeof(ihv2)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(ihv2mod, ihv2modb, sizeof(ihv2mod)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(precomp1, pc1, sizeof(pc1)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(precomp2, pc2, sizeof(pc2)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(hybridmask, &hmask, sizeof(hmask)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(distinguishedpointmask, &dpmask, sizeof(dpmask)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(maximumpathlength, &maxlen, sizeof(maxlen)) ); cuda_md5_init<<<detail->blocks, detail->threadsperblock>>>(); return true; } __global__ void cuda_md5_work(uint64 seed) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; buffer2[idx].len = 0; uint32 len = working_states2[idx].len; uint32 x = working_states2[idx].end[0]; uint32 y = working_states2[idx].end[1]; uint32 z = working_states2[idx].end[2]; if (len >= maximumpathlength || len == 0) { x = uint32(seed>>32) ^ threadIdx.x; y = uint32(seed) ^ blockIdx.x; z = 0; working_states2[idx].start[0] = x; working_states2[idx].start[1] = y; working_states2[idx].start[2] = z; len = 0; } // __syncthreads(); for (unsigned j = 0; j < 0x100; ++j) { { uint32* in = msg1; uint32 a = precomp1[0], b = precomp1[1], c = precomp1[2], d = precomp1[3]; if (x > y) { in = msg2; a = precomp2[0]; b = precomp2[1]; c = precomp2[2]; d = precomp2[3]; } MD5_FF ( d, a, b, c, z, 12, 4254626195); /* 14 */ MD5_FF ( c, d, a, b, x, 17, 2792965006); /* 15 */ MD5_FF ( b, c, d, a, y, 22, 1236535329); /* 16 */ MD5_GG ( a, b, c, d, in[ 1], 5, 4129170786); /* 17 */ MD5_GG ( d, a, b, c, in[ 6], 9, 3225465664); /* 18 */ MD5_GG ( c, d, a, b, in[11], 14, 643717713); /* 19 */ MD5_GG ( b, c, d, a, in[ 0], 20, 3921069994); /* 20 */ MD5_GG ( a, b, c, d, in[ 5], 5, 3593408605); /* 21 */ MD5_GG ( d, a, b, c, in[10], 9, 38016083); /* 22 */ MD5_GG ( c, d, a, b, y, 14, 3634488961); /* 23 */ MD5_GG ( b, c, d, a, in[ 4], 20, 3889429448); /* 24 */ MD5_GG ( a, b, c, d, in[ 9], 5, 568446438); /* 25 */ MD5_GG ( d, a, b, c, x, 9, 3275163606); /* 26 */ MD5_GG ( c, d, a, b, in[ 3], 14, 4107603335); /* 27 */ MD5_GG ( b, c, d, a, in[ 8], 20, 1163531501); /* 28 */ MD5_GG ( a, b, c, d, z, 5, 2850285829); /* 29 */ MD5_GG ( d, a, b, c, in[ 2], 9, 4243563512); /* 30 */ MD5_GG ( c, d, a, b, in[ 7], 14, 1735328473); /* 31 */ MD5_GG ( b, c, d, a, in[12], 20, 2368359562); /* 32 */ MD5_HH ( a, b, c, d, in[ 5], 4, 4294588738); /* 33 */ MD5_HH ( d, a, b, c, in[ 8], 11, 2272392833); /* 34 */ MD5_HH ( c, d, a, b, in[11], 16, 1839030562); /* 35 */ MD5_HH ( b, c, d, a, x, 23, 4259657740); /* 36 */ MD5_HH ( a, b, c, d, in[ 1], 4, 2763975236); /* 37 */ MD5_HH ( d, a, b, c, in[ 4], 11, 1272893353); /* 38 */ MD5_HH ( c, d, a, b, in[ 7], 16, 4139469664); /* 39 */ MD5_HH ( b, c, d, a, in[10], 23, 3200236656); /* 40 */ MD5_HH ( a, b, c, d, z, 4, 681279174); /* 41 */ MD5_HH ( d, a, b, c, in[ 0], 11, 3936430074); /* 42 */ MD5_HH ( c, d, a, b, in[ 3], 16, 3572445317); /* 43 */ MD5_HH ( b, c, d, a, in[ 6], 23, 76029189); /* 44 */ MD5_HH ( a, b, c, d, in[ 9], 4, 3654602809); /* 45 */ MD5_HH ( d, a, b, c, in[12], 11, 3873151461); /* 46 */ MD5_HH ( c, d, a, b, y, 16, 530742520); /* 47 */ MD5_HH ( b, c, d, a, in[ 2], 23, 3299628645); /* 48 */ MD5_II ( a, b, c, d, in[ 0], 6, 4096336452); /* 49 */ MD5_II ( d, a, b, c, in[ 7], 10, 1126891415); /* 50 */ MD5_II ( c, d, a, b, x, 15, 2878612391); /* 51 */ MD5_II ( b, c, d, a, in[ 5], 21, 4237533241); /* 52 */ MD5_II ( a, b, c, d, in[12], 6, 1700485571); /* 53 */ MD5_II ( d, a, b, c, in[ 3], 10, 2399980690); /* 54 */ MD5_II ( c, d, a, b, in[10], 15, 4293915773); /* 55 */ MD5_II ( b, c, d, a, in[ 1], 21, 2240044497); /* 56 */ MD5_II ( a, b, c, d, in[ 8], 6, 1873313359); /* 57 */ MD5_II ( d, a, b, c, y, 10, 4264355552); /* 58 */ MD5_II ( c, d, a, b, in[ 6], 15, 2734768916); /* 59 */ MD5_II ( b, c, d, a, z, 21, 1309151649); /* 60 */ MD5_II ( a, b, c, d, in[ 4], 6, 4149444226); /* 61 */ MD5_II ( d, a, b, c, in[11], 10, 3174756917); /* 62 */ MD5_II ( c, d, a, b, in[ 2], 15, 718787259); /* 63 */ MD5_II ( b, c, d, a, in[ 9], 21, 3951481745); /* 64 */ if (x <= y) { a += ihv1[0]; b += ihv1[1]; c += ihv1[2]; d += ihv1[3]; } else { a += ihv2mod[0]; b += ihv2mod[1]; c += ihv2mod[2]; d += ihv2mod[3]; } x = a; y = d - c; z = (d - b) & hybridmask; ++len; } { if (0 == (x & distinguishedpointmask)) { buffer2[idx].end[0] = x; buffer2[idx].end[1] = y; buffer2[idx].end[2] = z; buffer2[idx].len = len; buffer2[idx].start[0] = working_states2[idx].start[0]; buffer2[idx].start[1] = working_states2[idx].start[1]; buffer2[idx].start[2] = working_states2[idx].start[2]; x = uint32(seed>>32) ^ (threadIdx.x<<16) + len; y = uint32(seed) ^ blockIdx.x; z = 0; len = 0; working_states2[idx].start[0] = x; working_states2[idx].start[1] = y; working_states2[idx].start[2] = z; } } // __syncthreads(); } working_states2[idx].end[0] = x; working_states2[idx].end[1] = y; working_states2[idx].end[2] = z; working_states2[idx].len = len; } __global__ void cuda_md5_workmod(uint64 seed) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; buffer2[idx].len = 0; uint32 len = working_states2[idx].len; uint32 x = working_states2[idx].end[0]; uint32 y = working_states2[idx].end[1]; uint32 z = working_states2[idx].end[2]; if (len >= maximumpathlength || len == 0) { x = uint32(seed>>32) ^ threadIdx.x; y = uint32(seed) ^ blockIdx.x; z = 0; working_states2[idx].start[0] = x; working_states2[idx].start[1] = y; working_states2[idx].start[2] = z; len = 0; } // __syncthreads(); for (unsigned j = 0; j < 0x100; ++j) { { uint32* in = msg1; uint32 a = precomp1[0], b = precomp1[1], c = precomp1[2], d = precomp1[3]; if (x > y) { in = msg2; a = precomp2[0]; b = precomp2[1]; c = precomp2[2]; d = precomp2[3]; } MD5_FF ( d, a, b, c, z, 12, 4254626195); /* 14 */ MD5_FF ( c, d, a, b, x, 17, 2792965006); /* 15 */ MD5_FF ( b, c, d, a, y, 22, 1236535329); /* 16 */ MD5_GG ( a, b, c, d, in[ 1], 5, 4129170786); /* 17 */ MD5_GG ( d, a, b, c, in[ 6], 9, 3225465664); /* 18 */ MD5_GG ( c, d, a, b, in[11], 14, 643717713); /* 19 */ MD5_GG ( b, c, d, a, in[ 0], 20, 3921069994); /* 20 */ MD5_GG ( a, b, c, d, in[ 5], 5, 3593408605); /* 21 */ MD5_GG ( d, a, b, c, in[10], 9, 38016083); /* 22 */ MD5_GG ( c, d, a, b, y, 14, 3634488961); /* 23 */ MD5_GG ( b, c, d, a, in[ 4], 20, 3889429448); /* 24 */ MD5_GG ( a, b, c, d, in[ 9], 5, 568446438); /* 25 */ MD5_GG ( d, a, b, c, x, 9, 3275163606); /* 26 */ MD5_GG ( c, d, a, b, in[ 3], 14, 4107603335); /* 27 */ MD5_GG ( b, c, d, a, in[ 8], 20, 1163531501); /* 28 */ MD5_GG ( a, b, c, d, z, 5, 2850285829); /* 29 */ MD5_GG ( d, a, b, c, in[ 2], 9, 4243563512); /* 30 */ MD5_GG ( c, d, a, b, in[ 7], 14, 1735328473); /* 31 */ MD5_GG ( b, c, d, a, in[12], 20, 2368359562); /* 32 */ MD5_HH ( a, b, c, d, in[ 5], 4, 4294588738); /* 33 */ MD5_HH ( d, a, b, c, in[ 8], 11, 2272392833); /* 34 */ MD5_HH ( c, d, a, b, in[11], 16, 1839030562); /* 35 */ MD5_HH ( b, c, d, a, x, 23, 4259657740); /* 36 */ MD5_HH ( a, b, c, d, in[ 1], 4, 2763975236); /* 37 */ MD5_HH ( d, a, b, c, in[ 4], 11, 1272893353); /* 38 */ MD5_HH ( c, d, a, b, in[ 7], 16, 4139469664); /* 39 */ MD5_HH ( b, c, d, a, in[10], 23, 3200236656); /* 40 */ MD5_HH ( a, b, c, d, z, 4, 681279174); /* 41 */ MD5_HH ( d, a, b, c, in[ 0], 11, 3936430074); /* 42 */ MD5_HH ( c, d, a, b, in[ 3], 16, 3572445317); /* 43 */ MD5_HH ( b, c, d, a, in[ 6], 23, 76029189); /* 44 */ MD5_HH ( a, b, c, d, in[ 9], 4, 3654602809); /* 45 */ MD5_HH ( d, a, b, c, in[12], 11, 3873151461); /* 46 */ MD5_HH ( c, d, a, b, y, 16, 530742520); /* 47 */ MD5_HH ( b, c, d, a, in[ 2], 23, 3299628645); /* 48 */ MD5_II ( a, b, c, d, in[ 0], 6, 4096336452); /* 49 */ MD5_II ( d, a, b, c, in[ 7], 10, 1126891415); /* 50 */ MD5_II ( c, d, a, b, x, 15, 2878612391); /* 51 */ MD5_II ( b, c, d, a, in[ 5], 21, 4237533241); /* 52 */ MD5_II ( a, b, c, d, in[12], 6, 1700485571); /* 53 */ MD5_II ( d, a, b, c, in[ 3], 10, 2399980690); /* 54 */ MD5_II ( c, d, a, b, in[10], 15, 4293915773); /* 55 */ MD5_II ( b, c, d, a, in[ 1], 21, 2240044497); /* 56 */ MD5_II ( a, b, c, d, in[ 8], 6, 1873313359); /* 57 */ MD5_II ( d, a, b, c, y, 10, 4264355552); /* 58 */ MD5_II ( c, d, a, b, in[ 6], 15, 2734768916); /* 59 */ MD5_II ( b, c, d, a, z, 21, 1309151649); /* 60 */ MD5_II ( a, b, c, d, in[ 4], 6, 4149444226); /* 61 */ MD5_II ( d, a, b, c, in[11], 10, 3174756917); /* 62 */ MD5_II ( c, d, a, b, in[ 2], 15, 718787259); /* 63 */ if (x <= y) { x = a + ihv1[0]; y = d + ihv1[3]; z = (c + ihv1[2]) & hybridmask; } else { x = a + ihv2mod[0]; y = d + ihv2mod[3]; z = (c + ihv2mod[2]) & hybridmask; } ++len; } { if (0 == (x & distinguishedpointmask)) { buffer2[idx].end[0] = x; buffer2[idx].end[1] = y; buffer2[idx].end[2] = z; buffer2[idx].len = len; buffer2[idx].start[0] = working_states2[idx].start[0]; buffer2[idx].start[1] = working_states2[idx].start[1]; buffer2[idx].start[2] = working_states2[idx].start[2]; x = uint32(seed>>32) ^ (threadIdx.x<<16) + len; y = uint32(seed) ^ blockIdx.x; z = 0; len = 0; working_states2[idx].start[0] = x; working_states2[idx].start[1] = y; working_states2[idx].start[2] = z; } } // __syncthreads(); } working_states2[idx].end[0] = x; working_states2[idx].end[1] = y; working_states2[idx].end[2] = z; working_states2[idx].len = len; } void cuda_device::cuda_fill_trail_buffer(uint32 id, uint64 seed, vector<trail_type>& buf, vector< pair<trail_type,trail_type> >& collisions, bool mod) { // transfer results cudaMemcpyFromSymbol(detail->buffer_host, buffer2, sizeof(trail_type)*detail->blocks*detail->threadsperblock); // start new cuda computation if (!mod) cuda_md5_work<<<detail->blocks, detail->threadsperblock>>>(seed); else cuda_md5_workmod<<<detail->blocks, detail->threadsperblock>>>(seed); // process and return results buf.clear(); for (unsigned i = 0; i < detail->blocks*detail->threadsperblock; ++i) if (detail->buffer_host[i].len) buf.push_back(detail->buffer_host[i]); } #ifdef _WIN32 #include <windows.h> #else #include <sys/time.h> #endif class timer_detail; class timer { public: timer(bool direct_start = false); ~timer(); void start(); void stop(); double time() const;// get time between start and stop (or now if still running) in seconds bool isrunning() const { return running; } // check if timer is running private: timer_detail* detail; bool running; }; class timer_detail { public: #ifdef _WIN32 LARGE_INTEGER tstart, tend; double freq; #else struct timeval tstart, tend; struct timezone tz; #endif }; timer::~timer() { delete detail; } timer::timer(bool direct_start): running(false) { detail = new timer_detail; #ifdef _WIN32 LARGE_INTEGER tmp_freq; QueryPerformanceFrequency(&tmp_freq); detail->freq = double(tmp_freq.QuadPart); #endif if (direct_start) start(); } #ifdef _WIN32 void timer::start() { running = true; QueryPerformanceCounter(&detail->tstart); } void timer::stop() { QueryPerformanceCounter(&detail->tend); running = false; } double timer::time() const { if (running) { LARGE_INTEGER tmp_end; QueryPerformanceCounter(&tmp_end); return (double(tmp_end.QuadPart) - double(detail->tstart.QuadPart))/detail->freq; } else return (double(detail->tend.QuadPart) - double(detail->tstart.QuadPart))/detail->freq; } #else void timer::start() { running = true; gettimeofday(&detail->tstart, &detail->tz); } void timer::stop() { gettimeofday(&detail->tend, &detail->tz); running = false; } double timer::time() const { double t1 = double(detail->tstart.tv_sec) + (double(detail->tstart.tv_usec)/1e6); if (running) { struct timeval tmp_end; gettimeofday(&tmp_end, &detail->tz); return double(tmp_end.tv_sec) + (double(tmp_end.tv_usec)/1e6) - t1; } else return double(detail->tend.tv_sec) + (double(detail->tend.tv_usec)/1e6) - t1; } #endif void cuda_device::benchmark() { timer sw; for (int blocksize = 4; blocksize <= 256; ++blocksize) for (int threadsize = 250; threadsize <= 257; ++threadsize) { sw.start(); uint64 work = 0; while (sw.time() < 10) { cuda_md5_work<<<blocksize, threadsize>>>(0); cudaMemcpyFromSymbol(detail->buffer_host, buffer2, sizeof(trail_type)*blocksize*threadsize); ++work; } uint64 ow = work; work *= 0x400 * blocksize * threadsize; cout << blocksize << "x" << threadsize << ":\t" << work << " (" << ow << ")" << endl; } } int get_num_cuda_devices() { int deviceCount = 0; cudaGetDeviceCount(&deviceCount); return deviceCount; } void cuda_device_query() { int deviceCount; cutilSafeCall(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) printf("There is no device supporting CUDA\n"); int dev; for (dev = 0; dev < deviceCount; ++dev) { cudaDeviceProp deviceProp; cutilSafeCall(cudaGetDeviceProperties(&deviceProp, dev)); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) printf("There is no device supporting CUDA.\n"); else if (deviceCount == 1) printf("There is 1 device supporting CUDA\n"); else printf("There are %d devices supporting CUDA\n", deviceCount); } printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name); printf(" Major revision number: %d\n", deviceProp.major); printf(" Minor revision number: %d\n", deviceProp.minor); printf(" Total amount of global memory: %u bytes\n", deviceProp.totalGlobalMem); #if CUDART_VERSION >= 2000 printf(" Number of multiprocessors: %d\n", deviceProp.multiProcessorCount); printf(" Number of cores: %d\n", 8 * deviceProp.multiProcessorCount); #endif printf(" Total amount of constant memory: %u bytes\n", deviceProp.totalConstMem); printf(" Total amount of shared memory per block: %u bytes\n", deviceProp.sharedMemPerBlock); printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Warp size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" Maximum memory pitch: %u bytes\n", deviceProp.memPitch); printf(" Texture alignment: %u bytes\n", deviceProp.textureAlignment); printf(" Clock rate: %.2f GHz\n", deviceProp.clockRate * 1e-6f); #if CUDART_VERSION >= 2000 printf(" Concurrent copy and execution: %s\n", deviceProp.deviceOverlap ? "Yes" : "No"); #endif } }
8651103ff0dcd6cd21bb1fcbb44348635420e703.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // BilateralFilter.cu // #include "BilateralFilter.h" #include <iostream> #include <cmath> using namespace std; #include "ErrorCode.h" // DEF_BLOCK_X DEF_BLOCK_Y // #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // // static texture<unsigned char, 2, hipReadModeElementType> _bilateralInimgTex; // Host initTexture // static __host__ int // NO_ERROR _initTexture( Image *insubimg // ); // Kernel _bilateralFilterKer ImageCuda // static __global__ void // kernel _bilateralFilterKer( ImageCuda outimg, // int radius, // TemplateCuda gauCud, // TemplateCuda euCud // ); // Host initTexture static __host__ int _initTexture(Image *inimg) { hipError_t cuerrcode; int errcode; // // Device errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // ROI ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // // struct hipChannelFormatDesc chndesc; chndesc = hipCreateChannelDesc(sizeof (unsigned char) * 8, 0, 0, 0, hipChannelFormatKindUnsigned); // texture cuerrcode = hipBindTexture2D( NULL, &_bilateralInimgTex, insubimgCud.imgMeta.imgData, &chndesc, insubimgCud.imgMeta.width, insubimgCud.imgMeta.height, insubimgCud.pitchBytes); if (cuerrcode != hipSuccess) return CUDA_ERROR; return NO_ERROR; } // Kernel _bilateralFilterKer ImageCuda static __global__ void _bilateralFilterKer( ImageCuda outimg, int radius, TemplateCuda gauCud, TemplateCuda euCud) { // if (radius <= 0|| radius > DEF_FILTER_RANGE) return; // int gi = (2 * radius + 1) * (2 * radius + 1); // dstc dstr // x y c columnr row // 4 // 4 dstr 4 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // // if (dstc >= outimg.imgMeta.width || dstr >= outimg.imgMeta.height) return; // int dstidx = dstr * outimg.pitchBytes + dstc; // float sum[4] = { 0.0f, 0.0f, 0.0f, 0.0f }; // float factor; // float t[4] = { 0.0f, 0.0f, 0.0f, 0.0f }; // unsigned char center[4]; // center[0] = tex2D(_bilateralInimgTex, dstc, dstr); // center[1] = tex2D(_bilateralInimgTex, dstc, dstr + 1); // center[2] = tex2D(_bilateralInimgTex, dstc, dstr + 2); // center[3] = tex2D(_bilateralInimgTex, dstc, dstr + 3); for (int col = 0; col <= gi; col++) { // int i = gauCud.tplMeta.tplData[2 * col], j = gauCud.tplMeta.tplData[2 * col + 1]; // unsigned char curPix = tex2D(_bilateralInimgTex, dstc + j, dstr + i); // unsigned char euindex = curPix > center[0] ? curPix - center[0] : center[0] - curPix; // factor = gauCud.attachedData[col] * euCud.attachedData[euindex]; t[0] += factor * curPix; sum[0] += factor; // x // y x // ++ // return if (dstr + 1 >= outimg.imgMeta.height) continue; // curPix = tex2D(_bilateralInimgTex, dstc + j, dstr + i); // euindex = curPix > center[1] ? curPix - center[1] : center[1] - curPix; // factor = gauCud.attachedData[col] * euCud.attachedData[euindex]; t[1] += factor * curPix; sum[1] += factor; // x // y x // ++ // return if (dstr + 2 >= outimg.imgMeta.height) continue; // curPix = tex2D(_bilateralInimgTex, dstc + j, dstr + i); // euindex = curPix > center[2] ? curPix - center[2] : center[2] - curPix; // factor = gauCud.attachedData[col] * euCud.attachedData[euindex]; t[2] += factor * curPix; sum[2] += factor; // x // y x // ++ // return if (dstr + 3 >= outimg.imgMeta.height) continue; // curPix = tex2D(_bilateralInimgTex, dstc + j, dstr + i); // euindex = curPix > center[3] ? curPix - center[3] : center[3] - curPix; // factor = gauCud.attachedData[col] * euCud.attachedData[euindex]; t[3] += factor * curPix; sum[3] += factor; } // outimg.imgMeta.imgData[dstidx] = (unsigned char)(t[0] / sum[0]); // return if (++dstr >= outimg.imgMeta.height) return; // dstidx += outimg.pitchBytes; // outimg.imgMeta.imgData[dstidx] = (unsigned char)(t[1] / sum[1]); // if (++dstr >= outimg.imgMeta.height) return; dstidx += outimg.pitchBytes; outimg.imgMeta.imgData[dstidx] = (unsigned char)(t[2] / sum[2]); // if (++dstr >= outimg.imgMeta.height) return; dstidx += outimg.pitchBytes; outimg.imgMeta.imgData[dstidx] = (unsigned char)(t[3] / sum[3]); } // Host doFilter __host__ int BilateralFilter::doFilter(Image *inoutimg) { // if (radius <= 0 && radius > DEF_FILTER_RANGE) return INVALID_DATA; // 0 if (repeat <= 0) return INVALID_DATA; // NULL NULL if (inoutimg == NULL) return NULL_POINTER; // if (gaussian == NULL || euclid == NULL) return INVALID_DATA; int errcode; // // _initTexture(inoutimg); // Device errcode = TemplateBasicOp::copyToCurrentDevice(gaussian); if (errcode != NO_ERROR) return errcode; // Device errcode = TemplateBasicOp::copyToCurrentDevice(euclid); if (errcode != NO_ERROR) return errcode; // ROI ImageCuda inoutsubimgCud; errcode = ImageBasicOp::roiSubImage(inoutimg, &inoutsubimgCud); if (errcode != NO_ERROR) return errcode; // Kernel dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (inoutsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (inoutsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // for (int i = 0; i < repeat; i++) { // hipLaunchKernelGGL(( _bilateralFilterKer), dim3(gridsize), dim3(blocksize), 0, 0, inoutsubimgCud, radius, *TEMPLATE_CUDA(gaussian), *TEMPLATE_CUDA(euclid)); } return NO_ERROR; } // Host doFilter __host__ int BilateralFilter::doFilter(Image *inimg, Image *outimg) { // if (radius <= 0 && radius > DEF_FILTER_RANGE) return INVALID_DATA; // 0 if (repeat <= 0) return INVALID_DATA; // NULL NULL if (inimg == NULL || outimg == NULL) return NULL_POINTER; // if (gaussian == NULL || euclid == NULL) return INVALID_DATA; int errcode; // // outimg // outimg inplace inimg // _initTexture(inimg); // Device errcode = TemplateBasicOp::copyToCurrentDevice(gaussian); if (errcode != NO_ERROR) return errcode; // Device errcode = TemplateBasicOp::copyToCurrentDevice(euclid); if (errcode != NO_ERROR) return errcode; // ROI ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // Device errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); if (errcode != NO_ERROR) return errcode; } // ROI ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width) insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width; else outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width; if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height) insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height; else outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height; // Kernel dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // hipLaunchKernelGGL(( _bilateralFilterKer), dim3(gridsize), dim3(blocksize), 0, 0, outsubimgCud, radius, *TEMPLATE_CUDA(gaussian), *TEMPLATE_CUDA(euclid)); // for (int i = 1; i < repeat; i++) { // hipLaunchKernelGGL(( _bilateralFilterKer), dim3(gridsize), dim3(blocksize), 0, 0, outsubimgCud, radius, *TEMPLATE_CUDA(gaussian), *TEMPLATE_CUDA(euclid)); } return NO_ERROR; }
8651103ff0dcd6cd21bb1fcbb44348635420e703.cu
// BilateralFilter.cu // 实现图像的双边滤波 #include "BilateralFilter.h" #include <iostream> #include <cmath> using namespace std; #include "ErrorCode.h" // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 纹理内存只能用于全局变量,使用全局存储时需要加入边界判断,经测试效率不及 // 纹理内存 static texture<unsigned char, 2, cudaReadModeElementType> _bilateralInimgTex; // Host 函数:initTexture(初始化纹理内存) // 将输入图像数据绑定到纹理内存 static __host__ int // 返回值:若正确执行返回 NO_ERROR _initTexture( Image *insubimg // 输入图像 ); // Kernel 函数:_bilateralFilterKer(使用 ImageCuda 实现的双边滤波) // 空域参数只影响高斯表,在调用该方法前初始化高斯表即可 static __global__ void // kernel 函数无返回值 _bilateralFilterKer( ImageCuda outimg, // 输出图像 int radius, // 双边滤波半径 TemplateCuda gauCud, // 高斯表 TemplateCuda euCud // 欧氏距离表 ); // Host 函数:initTexture(初始化纹理内存) static __host__ int _initTexture(Image *inimg) { cudaError_t cuerrcode; int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 设置数据通道描述符,因为只有一个颜色通道(灰度图),因此描述符中只有第一 // 个分量含有数据。概述据通道描述符用于纹理内存的绑定操作。 struct cudaChannelFormatDesc chndesc; chndesc = cudaCreateChannelDesc(sizeof (unsigned char) * 8, 0, 0, 0, cudaChannelFormatKindUnsigned); // 将输入图像数据绑定至纹理内存(texture) cuerrcode = cudaBindTexture2D( NULL, &_bilateralInimgTex, insubimgCud.imgMeta.imgData, &chndesc, insubimgCud.imgMeta.width, insubimgCud.imgMeta.height, insubimgCud.pitchBytes); if (cuerrcode != cudaSuccess) return CUDA_ERROR; return NO_ERROR; } // Kernel 函数:_bilateralFilterKer(使用 ImageCuda 实现的双边滤波) static __global__ void _bilateralFilterKer( ImageCuda outimg, int radius, TemplateCuda gauCud, TemplateCuda euCud) { // 给定半径不在范围内时直接跳出 if (radius <= 0|| radius > DEF_FILTER_RANGE) return; // 半径对应的高斯表数组的下标 int gi = (2 * radius + 1) * (2 * radius + 1); // 计算想成对应的输出点的位置,其中 dstc 和 dstr 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并 // 行度缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 // 4 行上,因此,对于 dstr 需要进行乘 4 计算。 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (dstc >= outimg.imgMeta.width || dstr >= outimg.imgMeta.height) return; // 计算第一个输出坐标点对应的图像数据数组下标。 int dstidx = dstr * outimg.pitchBytes + dstc; // 邻域像素与参数乘积的累加值 float sum[4] = { 0.0f, 0.0f, 0.0f, 0.0f }; // 存储参数的临时变量 float factor; // 邻域参数的累加值 float t[4] = { 0.0f, 0.0f, 0.0f, 0.0f }; // 获取当前处理点的像素值,即为中心点,取同一列的四个点 unsigned char center[4]; // 第一个中心点 center[0] = tex2D(_bilateralInimgTex, dstc, dstr); // 第二个中心点,位于第一个中心点下方 center[1] = tex2D(_bilateralInimgTex, dstc, dstr + 1); // 处于同列的第三个中心点 center[2] = tex2D(_bilateralInimgTex, dstc, dstr + 2); // 处于同列的第四个中心点 center[3] = tex2D(_bilateralInimgTex, dstc, dstr + 3); for (int col = 0; col <= gi; col++) { // 获取当前处理点的横纵坐标 int i = gauCud.tplMeta.tplData[2 * col], j = gauCud.tplMeta.tplData[2 * col + 1]; // 获取当前处理点的像素值 unsigned char curPix = tex2D(_bilateralInimgTex, dstc + j, dstr + i); // 计算当前点与中心点的像素差值 unsigned char euindex = curPix > center[0] ? curPix - center[0] : center[0] - curPix; // 欧氏距离与高斯距离的乘积 factor = gauCud.attachedData[col] * euCud.attachedData[euindex]; t[0] += factor * curPix; sum[0] += factor; // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各 // 点不变,由于是位于循环体内部不可直接进行 ++ 运算,且当列超出时也 // 不能进行 return,否则邻域扫描将终止,且输出图像不能赋值 if (dstr + 1 >= outimg.imgMeta.height) continue; // 获取当前处理点的像素值 curPix = tex2D(_bilateralInimgTex, dstc + j, dstr + i); // 计算当前点与中心点的像素差值 euindex = curPix > center[1] ? curPix - center[1] : center[1] - curPix; // 欧氏距离与高斯距离的乘积 factor = gauCud.attachedData[col] * euCud.attachedData[euindex]; t[1] += factor * curPix; sum[1] += factor; // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各 // 点不变,由于是位于循环体内部不可直接进行 ++ 运算,且当列超出时也 // 不能进行 return,否则邻域扫描将终止,且输出图像不能赋值 if (dstr + 2 >= outimg.imgMeta.height) continue; // 获取当前处理点的像素值 curPix = tex2D(_bilateralInimgTex, dstc + j, dstr + i); // 计算当前点与中心点的像素差值 euindex = curPix > center[2] ? curPix - center[2] : center[2] - curPix; // 欧氏距离与高斯距离的乘积 factor = gauCud.attachedData[col] * euCud.attachedData[euindex]; t[2] += factor * curPix; sum[2] += factor; // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各 // 点不变,由于是位于循环体内部不可直接进行 ++ 运算,且当列超出时也 // 不能进行 return,否则邻域扫描将终止,且输出图像不能赋值 if (dstr + 3 >= outimg.imgMeta.height) continue; // 获取当前处理点的像素值 curPix = tex2D(_bilateralInimgTex, dstc + j, dstr + i); // 计算当前点与中心点的像素差值 euindex = curPix > center[3] ? curPix - center[3] : center[3] - curPix; // 欧氏距离与高斯距离的乘积 factor = gauCud.attachedData[col] * euCud.attachedData[euindex]; t[3] += factor * curPix; sum[3] += factor; } // 对第一列的点进行赋值 outimg.imgMeta.imgData[dstidx] = (unsigned char)(t[0] / sum[0]); // 若列超出范围,此处可直接使用 return 直接跳出 if (++dstr >= outimg.imgMeta.height) return; // 将对应数据的下标加一行 dstidx += outimg.pitchBytes; // 对第二列的点进行赋值 outimg.imgMeta.imgData[dstidx] = (unsigned char)(t[1] / sum[1]); // 准备处理第三列 if (++dstr >= outimg.imgMeta.height) return; dstidx += outimg.pitchBytes; outimg.imgMeta.imgData[dstidx] = (unsigned char)(t[2] / sum[2]); // 处理第四列 if (++dstr >= outimg.imgMeta.height) return; dstidx += outimg.pitchBytes; outimg.imgMeta.imgData[dstidx] = (unsigned char)(t[3] / sum[3]); } // Host 成员方法:doFilter(执行滤波) __host__ int BilateralFilter::doFilter(Image *inoutimg) { // 给定半径不在范围内时直接跳出 if (radius <= 0 && radius > DEF_FILTER_RANGE) return INVALID_DATA; // 若滤波的重复次数为 0 ,则不进行滤波返回正确执行 if (repeat <= 0) return INVALID_DATA; // 检查输入图像是否为 NULL,如果为 NULL 直接报错返回。 if (inoutimg == NULL) return NULL_POINTER; // 检查模板数据 if (gaussian == NULL || euclid == NULL) return INVALID_DATA; int errcode; // 局部变量,错误码 // 初始化纹理内存,将输入图像与之绑定 _initTexture(inoutimg); // 将高斯模板数据拷贝至 Device 端避免核函数中无法访问 errcode = TemplateBasicOp::copyToCurrentDevice(gaussian); if (errcode != NO_ERROR) return errcode; // 将欧式距离模板数据拷贝至 Device 端避免核函数中无法访问 errcode = TemplateBasicOp::copyToCurrentDevice(euclid); if (errcode != NO_ERROR) return errcode; // 提取输入图像的 ROI 子图像。 ImageCuda inoutsubimgCud; errcode = ImageBasicOp::roiSubImage(inoutimg, &inoutsubimgCud); if (errcode != NO_ERROR) return errcode; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (inoutsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (inoutsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // 进行重复滤波以提高质量 for (int i = 0; i < repeat; i++) { // 调用核函数进行滤波 _bilateralFilterKer<<<gridsize, blocksize>>>( inoutsubimgCud, radius, *TEMPLATE_CUDA(gaussian), *TEMPLATE_CUDA(euclid)); } return NO_ERROR; } // Host 成员方法:doFilter(执行滤波) __host__ int BilateralFilter::doFilter(Image *inimg, Image *outimg) { // 给定半径不在范围内时直接跳出 if (radius <= 0 && radius > DEF_FILTER_RANGE) return INVALID_DATA; // 若滤波的重复次数为 0 ,则不进行滤波返回正确执行 if (repeat <= 0) return INVALID_DATA; // 检查输入图像是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || outimg == NULL) return NULL_POINTER; // 检查模板数据 if (gaussian == NULL || euclid == NULL) return INVALID_DATA; int errcode; // 局部变量,错误码 // 初始化纹理内存,将输入图像与之绑定,需将第一次运行结果保存至 outimg , // 之后的重复则相当于在 outimg 上的 inplace 版本,这样保证了 inimg 中的数据 // 一致性 _initTexture(inimg); // 将高斯模板数据拷贝至 Device 端避免核函数中无法访问 errcode = TemplateBasicOp::copyToCurrentDevice(gaussian); if (errcode != NO_ERROR) return errcode; // 将欧式距离模板数据拷贝至 Device 端避免核函数中无法访问 errcode = TemplateBasicOp::copyToCurrentDevice(euclid); if (errcode != NO_ERROR) return errcode; // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); if (errcode != NO_ERROR) return errcode; } // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一 if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width) insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width; else outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width; if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height) insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height; else outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // 调用核函数进行滤波 _bilateralFilterKer<<<gridsize, blocksize>>>( outsubimgCud, radius, *TEMPLATE_CUDA(gaussian), *TEMPLATE_CUDA(euclid)); // 进行重复滤波以提高质量 for (int i = 1; i < repeat; i++) { // 调用核函数进行滤波 _bilateralFilterKer<<<gridsize, blocksize>>>( outsubimgCud, radius, *TEMPLATE_CUDA(gaussian), *TEMPLATE_CUDA(euclid)); } return NO_ERROR; }
19ae2061cee7497ec65823584bb4a8d79da43dec.hip
// !!! This is a file automatically generated by hipify!!! /** * fdtd2d.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 10.05 #define GPU_DEVICE 0 /* Problem size */ #define tmax 1 #define NX 4096 #define NY 4096 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 1024 #define DIM_THREAD_BLOCK_Y 1 #define NUM_STREAMS 4 #define NUM_CHUNK 32 #define CHUNK_SIZE NY/NUM_CHUNK //4096/4= 1024 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz) { int i, j; for (i = 0; i < tmax; i++) { _fict_[i] = (DATA_TYPE) i; } for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { ex[i*NY + j] = ((DATA_TYPE) i*(j+1) + 1) / NX; ey[i*NY + j] = ((DATA_TYPE) (i-1)*(j+2) + 2) / NX; hz[i*NY + j] = ((DATA_TYPE) (i-9)*(j+4) + 3) / NX; } } } void runFdtd(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz) { int t, i, j; for (t=0; t < tmax; t++) { for (j=0; j < NY; j++) { ey[0*NY + j] = _fict_[0]; } for (i = 1; i < NX; i++) { for (j = 0; j < NY; j++) { ey[i*NY + j] = ey[i*NY + j] - 0.5*(hz[i*NY + j] - hz[(i-1)*NY + j]); } } for (i = 0; i < NX; i++) { for (j = 1; j < NY; j++) { ex[i*(NY+1) + j] = ex[i*(NY+1) + j] - 0.5*(hz[i*NY + j] - hz[i*NY + (j-1)]); } } for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { hz[i*NY + j] = hz[i*NY + j] - 0.7*(ex[i*(NY+1) + (j+1)] - ex[i*(NY+1) + j] + ey[(i+1)*NY + j] - ey[i*NY + j]); } } } } void compareResults(DATA_TYPE* hz1, DATA_TYPE* hz2) { int i, j, fail; fail = 0; for (i=0; i < NX; i++) { for (j=0; j < NY; j++) { if (percentDiff(hz1[i*NY + j], hz2[i*NY + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); hipSetDevice( GPU_DEVICE ); } __global__ void fdtd_step1_kernel(DATA_TYPE* _fict_, DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int i_base) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = i_base+blockIdx.y ; if ((i < NX) && (j < NY)) { if (i == 0) { ey[i * NY + j] = 0; } else { ey[i * NY + j] = ey[i * NY + j] - 0.5f*(hz[i * NY + j] - hz[(i-1) * NY + j]); } } } __global__ void fdtd_step2_kernel(DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int i_base) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = i_base + blockIdx.y; if ((i < NX) && (j < NY) && (j > 0)) { ex[i * (NY+1) + j] = ex[i * (NY+1) + j] - 0.5f*(hz[i * NY + j] - hz[i * NY + (j-1)]); } } __global__ void fdtd_step3_kernel(DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int i_base,DATA_TYPE *hz_out) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = i_base+blockIdx.y; if ((i < NX) && (j < NY)) { hz_out[i * NY + j] = hz[i * NY + j] - 0.7f*(ex[i * (NY+1) + (j+1)] - ex[i * (NY+1) + j] + ey[(i + 1) * NY + j] - ey[i * NY + j]); } } void fdtdCuda(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz, DATA_TYPE* hz_outputFromGpu) { DATA_TYPE *_fict_gpu; DATA_TYPE *ex_gpu; DATA_TYPE *ey_gpu; DATA_TYPE *hz_gpu; DATA_TYPE *hz_gpu_out; hipMalloc((void **)&_fict_gpu, sizeof(DATA_TYPE) * tmax); hipMalloc((void **)&ex_gpu, sizeof(DATA_TYPE) * NX * (NY + 1)); hipMalloc((void **)&ey_gpu, sizeof(DATA_TYPE) * (NX + 1) * NY); hipMalloc((void **)&hz_gpu, sizeof(DATA_TYPE) * NX * NY); hipMalloc((void **)&hz_gpu_out, sizeof(DATA_TYPE) * NX * NY); hipEvent_t start,stop; float elapsedTimeInMs = 0.0f; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipStream_t streams[NUM_STREAMS]; for (int i=0; i< NUM_STREAMS; i++) hipStreamCreate(&(streams[i])); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid( (size_t)ceil(((float)NY) / ((float)block.x)), (size_t)(CHUNK_SIZE) ); //hipMemcpy(ex_gpu, ex, sizeof(DATA_TYPE) * NX * (NY + 1), hipMemcpyHostToDevice); //hipMemcpy(ey_gpu, ey, sizeof(DATA_TYPE) * (NX + 1) * NY, hipMemcpyHostToDevice); //hipMemcpy(hz_gpu, hz, sizeof(DATA_TYPE) * NX * NY, hipMemcpyHostToDevice); for(int t = 0; t < NUM_CHUNK; t++) { int i_base; i_base=CHUNK_SIZE*t; hipMemcpyAsync(ex_gpu+i_base*(NY+1), ex+i_base*(NY+1), sizeof(DATA_TYPE) * CHUNK_SIZE * (NY + 1), hipMemcpyHostToDevice,streams[t % NUM_STREAMS]); hipMemcpyAsync(ey_gpu+i_base*NY, ey+i_base*NY, sizeof(DATA_TYPE) * CHUNK_SIZE * NY, hipMemcpyHostToDevice,streams[t % NUM_STREAMS]); hipMemcpyAsync(hz_gpu+i_base*NY, hz+i_base*NY, sizeof(DATA_TYPE) * CHUNK_SIZE * NY, hipMemcpyHostToDevice,streams[t % NUM_STREAMS]); hipLaunchKernelGGL(( fdtd_step1_kernel), dim3(grid),dim3(block),0,streams[t % NUM_STREAMS], _fict_gpu, ex_gpu, ey_gpu, hz_gpu, i_base); hipLaunchKernelGGL(( fdtd_step2_kernel), dim3(grid),dim3(block),0,streams[t % NUM_STREAMS], ex_gpu, ey_gpu, hz_gpu, i_base); if (t>0){ hipLaunchKernelGGL(( fdtd_step3_kernel), dim3(grid),dim3(block),0,streams[t % NUM_STREAMS], ex_gpu, ey_gpu, hz_gpu, i_base-CHUNK_SIZE,hz_gpu_out); hipMemcpyAsync(hz_outputFromGpu+(i_base-CHUNK_SIZE)*NY, hz_gpu_out+(i_base-CHUNK_SIZE)*NY, sizeof(DATA_TYPE) * CHUNK_SIZE * NY, hipMemcpyDeviceToHost,streams[t % NUM_STREAMS]); }else{ hipMemcpyAsync(hz_outputFromGpu+(i_base-CHUNK_SIZE)*NY, hz_gpu_out+(i_base-CHUNK_SIZE)*NY, 0, hipMemcpyDeviceToHost,streams[t % NUM_STREAMS]); } } //fdtd_step3_kernel<<<grid,block,0,0>>>(ex_gpu, ey_gpu, hz_gpu, NX-CHUNK_SIZE,hz_gpu_out); hipLaunchKernelGGL(( fdtd_step3_kernel), dim3(grid),dim3(block),0,streams[(NUM_CHUNK-1) % NUM_STREAMS], ex_gpu, ey_gpu, hz_gpu, NX-CHUNK_SIZE,hz_gpu_out); hipMemcpyAsync(hz_outputFromGpu+(NX-CHUNK_SIZE)*NY, hz_gpu_out+(NX-CHUNK_SIZE)*NY, sizeof(DATA_TYPE) * CHUNK_SIZE * NY, hipMemcpyDeviceToHost,streams[(NUM_CHUNK-1) % NUM_STREAMS]); //hipMemcpyAsync(hz_outputFromGpu+(NX-CHUNK_SIZE)*NY, hz_gpu_out+(NX-CHUNK_SIZE)*NY, sizeof(DATA_TYPE) * CHUNK_SIZE * NY, hipMemcpyDeviceToHost,0); //hipMemcpy(hz_outputFromGpu, hz_gpu_out, sizeof(DATA_TYPE) * NX * NY, hipMemcpyDeviceToHost); hipDeviceSynchronize(); hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTimeInMs, start, stop); fprintf(stdout,"GPU RunTime= %.1f Ms \n", elapsedTimeInMs); hipFree(_fict_gpu); hipFree(ex_gpu); hipFree(ey_gpu); hipFree(hz_gpu); } int main() { double t_start, t_end; DATA_TYPE* _fict_; DATA_TYPE* ex; DATA_TYPE* ey; DATA_TYPE* hz; DATA_TYPE* hz_outputFromGpu; /* _fict_ = (DATA_TYPE*)malloc(tmax*sizeof(DATA_TYPE)); ex = (DATA_TYPE*)malloc(NX*(NY+1)*sizeof(DATA_TYPE)); ey = (DATA_TYPE*)malloc((NX+1)*NY*sizeof(DATA_TYPE)); hz = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); hz_outputFromGpu = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); */ hipHostMalloc((void **)&_fict_, sizeof(DATA_TYPE) * tmax, hipHostMallocPortable); hipHostMalloc((void **)&ex, sizeof(DATA_TYPE)*NX*(NY+1), hipHostMallocPortable); hipHostMalloc((void **)&ey, sizeof(DATA_TYPE)*NX*(NY+1), hipHostMallocPortable); hipHostMalloc((void **)&hz, sizeof(DATA_TYPE)*NX*NY, hipHostMallocPortable); hipHostMalloc((void **)&hz_outputFromGpu, sizeof(DATA_TYPE)*NX*NY, hipHostMallocPortable); init_arrays(_fict_, ex, ey, hz); GPU_argv_init(); fdtdCuda(_fict_, ex, ey, hz, hz_outputFromGpu); /* t_start = rtclock(); runFdtd(_fict_, ex, ey, hz); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(hz, hz_outputFromGpu); */ hipFree(_fict_); hipFree(ex); hipFree(ey); hipFree(hz); hipFree(hz_outputFromGpu); return 0; }
19ae2061cee7497ec65823584bb4a8d79da43dec.cu
/** * fdtd2d.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <cuda.h> #include <cuda_runtime.h> #include "polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 10.05 #define GPU_DEVICE 0 /* Problem size */ #define tmax 1 #define NX 4096 #define NY 4096 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 1024 #define DIM_THREAD_BLOCK_Y 1 #define NUM_STREAMS 4 #define NUM_CHUNK 32 #define CHUNK_SIZE NY/NUM_CHUNK //4096/4= 1024 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz) { int i, j; for (i = 0; i < tmax; i++) { _fict_[i] = (DATA_TYPE) i; } for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { ex[i*NY + j] = ((DATA_TYPE) i*(j+1) + 1) / NX; ey[i*NY + j] = ((DATA_TYPE) (i-1)*(j+2) + 2) / NX; hz[i*NY + j] = ((DATA_TYPE) (i-9)*(j+4) + 3) / NX; } } } void runFdtd(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz) { int t, i, j; for (t=0; t < tmax; t++) { for (j=0; j < NY; j++) { ey[0*NY + j] = _fict_[0]; } for (i = 1; i < NX; i++) { for (j = 0; j < NY; j++) { ey[i*NY + j] = ey[i*NY + j] - 0.5*(hz[i*NY + j] - hz[(i-1)*NY + j]); } } for (i = 0; i < NX; i++) { for (j = 1; j < NY; j++) { ex[i*(NY+1) + j] = ex[i*(NY+1) + j] - 0.5*(hz[i*NY + j] - hz[i*NY + (j-1)]); } } for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { hz[i*NY + j] = hz[i*NY + j] - 0.7*(ex[i*(NY+1) + (j+1)] - ex[i*(NY+1) + j] + ey[(i+1)*NY + j] - ey[i*NY + j]); } } } } void compareResults(DATA_TYPE* hz1, DATA_TYPE* hz2) { int i, j, fail; fail = 0; for (i=0; i < NX; i++) { for (j=0; j < NY; j++) { if (percentDiff(hz1[i*NY + j], hz2[i*NY + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); cudaSetDevice( GPU_DEVICE ); } __global__ void fdtd_step1_kernel(DATA_TYPE* _fict_, DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int i_base) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = i_base+blockIdx.y ; if ((i < NX) && (j < NY)) { if (i == 0) { ey[i * NY + j] = 0; } else { ey[i * NY + j] = ey[i * NY + j] - 0.5f*(hz[i * NY + j] - hz[(i-1) * NY + j]); } } } __global__ void fdtd_step2_kernel(DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int i_base) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = i_base + blockIdx.y; if ((i < NX) && (j < NY) && (j > 0)) { ex[i * (NY+1) + j] = ex[i * (NY+1) + j] - 0.5f*(hz[i * NY + j] - hz[i * NY + (j-1)]); } } __global__ void fdtd_step3_kernel(DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int i_base,DATA_TYPE *hz_out) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = i_base+blockIdx.y; if ((i < NX) && (j < NY)) { hz_out[i * NY + j] = hz[i * NY + j] - 0.7f*(ex[i * (NY+1) + (j+1)] - ex[i * (NY+1) + j] + ey[(i + 1) * NY + j] - ey[i * NY + j]); } } void fdtdCuda(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz, DATA_TYPE* hz_outputFromGpu) { DATA_TYPE *_fict_gpu; DATA_TYPE *ex_gpu; DATA_TYPE *ey_gpu; DATA_TYPE *hz_gpu; DATA_TYPE *hz_gpu_out; cudaMalloc((void **)&_fict_gpu, sizeof(DATA_TYPE) * tmax); cudaMalloc((void **)&ex_gpu, sizeof(DATA_TYPE) * NX * (NY + 1)); cudaMalloc((void **)&ey_gpu, sizeof(DATA_TYPE) * (NX + 1) * NY); cudaMalloc((void **)&hz_gpu, sizeof(DATA_TYPE) * NX * NY); cudaMalloc((void **)&hz_gpu_out, sizeof(DATA_TYPE) * NX * NY); cudaEvent_t start,stop; float elapsedTimeInMs = 0.0f; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); cudaStream_t streams[NUM_STREAMS]; for (int i=0; i< NUM_STREAMS; i++) cudaStreamCreate(&(streams[i])); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid( (size_t)ceil(((float)NY) / ((float)block.x)), (size_t)(CHUNK_SIZE) ); //cudaMemcpy(ex_gpu, ex, sizeof(DATA_TYPE) * NX * (NY + 1), cudaMemcpyHostToDevice); //cudaMemcpy(ey_gpu, ey, sizeof(DATA_TYPE) * (NX + 1) * NY, cudaMemcpyHostToDevice); //cudaMemcpy(hz_gpu, hz, sizeof(DATA_TYPE) * NX * NY, cudaMemcpyHostToDevice); for(int t = 0; t < NUM_CHUNK; t++) { int i_base; i_base=CHUNK_SIZE*t; cudaMemcpyAsync(ex_gpu+i_base*(NY+1), ex+i_base*(NY+1), sizeof(DATA_TYPE) * CHUNK_SIZE * (NY + 1), cudaMemcpyHostToDevice,streams[t % NUM_STREAMS]); cudaMemcpyAsync(ey_gpu+i_base*NY, ey+i_base*NY, sizeof(DATA_TYPE) * CHUNK_SIZE * NY, cudaMemcpyHostToDevice,streams[t % NUM_STREAMS]); cudaMemcpyAsync(hz_gpu+i_base*NY, hz+i_base*NY, sizeof(DATA_TYPE) * CHUNK_SIZE * NY, cudaMemcpyHostToDevice,streams[t % NUM_STREAMS]); fdtd_step1_kernel<<<grid,block,0,streams[t % NUM_STREAMS]>>>(_fict_gpu, ex_gpu, ey_gpu, hz_gpu, i_base); fdtd_step2_kernel<<<grid,block,0,streams[t % NUM_STREAMS]>>>(ex_gpu, ey_gpu, hz_gpu, i_base); if (t>0){ fdtd_step3_kernel<<<grid,block,0,streams[t % NUM_STREAMS]>>>(ex_gpu, ey_gpu, hz_gpu, i_base-CHUNK_SIZE,hz_gpu_out); cudaMemcpyAsync(hz_outputFromGpu+(i_base-CHUNK_SIZE)*NY, hz_gpu_out+(i_base-CHUNK_SIZE)*NY, sizeof(DATA_TYPE) * CHUNK_SIZE * NY, cudaMemcpyDeviceToHost,streams[t % NUM_STREAMS]); }else{ cudaMemcpyAsync(hz_outputFromGpu+(i_base-CHUNK_SIZE)*NY, hz_gpu_out+(i_base-CHUNK_SIZE)*NY, 0, cudaMemcpyDeviceToHost,streams[t % NUM_STREAMS]); } } //fdtd_step3_kernel<<<grid,block,0,0>>>(ex_gpu, ey_gpu, hz_gpu, NX-CHUNK_SIZE,hz_gpu_out); fdtd_step3_kernel<<<grid,block,0,streams[(NUM_CHUNK-1) % NUM_STREAMS]>>>(ex_gpu, ey_gpu, hz_gpu, NX-CHUNK_SIZE,hz_gpu_out); cudaMemcpyAsync(hz_outputFromGpu+(NX-CHUNK_SIZE)*NY, hz_gpu_out+(NX-CHUNK_SIZE)*NY, sizeof(DATA_TYPE) * CHUNK_SIZE * NY, cudaMemcpyDeviceToHost,streams[(NUM_CHUNK-1) % NUM_STREAMS]); //cudaMemcpyAsync(hz_outputFromGpu+(NX-CHUNK_SIZE)*NY, hz_gpu_out+(NX-CHUNK_SIZE)*NY, sizeof(DATA_TYPE) * CHUNK_SIZE * NY, cudaMemcpyDeviceToHost,0); //cudaMemcpy(hz_outputFromGpu, hz_gpu_out, sizeof(DATA_TYPE) * NX * NY, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); cudaThreadSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTimeInMs, start, stop); fprintf(stdout,"GPU RunTime= %.1f Ms \n", elapsedTimeInMs); cudaFree(_fict_gpu); cudaFree(ex_gpu); cudaFree(ey_gpu); cudaFree(hz_gpu); } int main() { double t_start, t_end; DATA_TYPE* _fict_; DATA_TYPE* ex; DATA_TYPE* ey; DATA_TYPE* hz; DATA_TYPE* hz_outputFromGpu; /* _fict_ = (DATA_TYPE*)malloc(tmax*sizeof(DATA_TYPE)); ex = (DATA_TYPE*)malloc(NX*(NY+1)*sizeof(DATA_TYPE)); ey = (DATA_TYPE*)malloc((NX+1)*NY*sizeof(DATA_TYPE)); hz = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); hz_outputFromGpu = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); */ cudaHostAlloc((void **)&_fict_, sizeof(DATA_TYPE) * tmax, cudaHostAllocPortable); cudaHostAlloc((void **)&ex, sizeof(DATA_TYPE)*NX*(NY+1), cudaHostAllocPortable); cudaHostAlloc((void **)&ey, sizeof(DATA_TYPE)*NX*(NY+1), cudaHostAllocPortable); cudaHostAlloc((void **)&hz, sizeof(DATA_TYPE)*NX*NY, cudaHostAllocPortable); cudaHostAlloc((void **)&hz_outputFromGpu, sizeof(DATA_TYPE)*NX*NY, cudaHostAllocPortable); init_arrays(_fict_, ex, ey, hz); GPU_argv_init(); fdtdCuda(_fict_, ex, ey, hz, hz_outputFromGpu); /* t_start = rtclock(); runFdtd(_fict_, ex, ey, hz); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(hz, hz_outputFromGpu); */ cudaFree(_fict_); cudaFree(ex); cudaFree(ey); cudaFree(hz); cudaFree(hz_outputFromGpu); return 0; }
28393f2ac5ca1b3c080261ce6b7435de3b6d803b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Author: Dai-Ni Hsieh ([email protected]) // Date : 11/17/2020 #include <cmath> #include "besselk.h" #include "polybesselk.h" #include "matvec.h" #include "constants.h" inline void setBesselkCoefficients() { hipMemcpyToSymbol(c_P01Vec, P01Vec, sizeof(double) * (P01Deg + 1), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(c_Q01Vec, Q01Vec, sizeof(double) * (Q01Deg + 1), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(c_P02Vec, P02Vec, sizeof(double) * (P02Deg + 1), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(c_Q02Vec, Q02Vec, sizeof(double) * (Q02Deg + 1), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(c_P03Vec, P03Vec, sizeof(double) * (P03Deg + 1), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(c_Q03Vec, Q03Vec, sizeof(double) * (Q03Deg + 1), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(c_P11Vec, P11Vec, sizeof(double) * (P11Deg + 1), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(c_Q11Vec, Q11Vec, sizeof(double) * (Q11Deg + 1), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(c_P12Vec, P12Vec, sizeof(double) * (P12Deg + 1), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(c_Q12Vec, Q12Vec, sizeof(double) * (Q12Deg + 1), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(c_P13Vec, P13Vec, sizeof(double) * (P13Deg + 1), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(c_Q13Vec, Q13Vec, sizeof(double) * (Q13Deg + 1), 0, hipMemcpyHostToDevice); return; } __global__ void dqGaussian(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkNum) { int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkiIdx < lmkNum ) { vector dqKVec = {0.0, 0.0}; vector qiVec, liVec, riVec; getVector(qiVec, d_lmkMat, lmkiIdx, lmkNum); getVector(liVec, d_lftMat, lmkiIdx, lmkNum); getVector(riVec, d_rgtMat, lmkiIdx, lmkNum); for ( int lmkjIdx = 0; lmkjIdx < lmkNum; ++lmkjIdx ) { vector qjVec, ljVec, rjVec; getVector(qjVec, d_lmkMat, lmkjIdx, lmkNum); getVector(ljVec, d_lftMat, lmkjIdx, lmkNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkNum); vector qijVec; vectorSubtract(qijVec, qiVec, qjVec); double dijSqu = eucnormSqu(qijVec) / knlWidthSqu; double dqKVal = -2.0 / knlWidthSqu * exp(-dijSqu); double lrVal = dotProduct(liVec, rjVec) + dotProduct(ljVec, riVec); dqKVec.x += lrVal * dqKVal * qijVec.x; dqKVec.y += lrVal * dqKVal * qijVec.y; } setVector(d_dqKMat, dqKVec, lmkiIdx, lmkNum); } return; } __global__ void dqiGaussian(double *d_dqiKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum) { int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkiIdx < lmkiNum ) { vector dqKVec = {0.0, 0.0}; vector qiVec, liVec; getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum); getVector(liVec, d_lftMat, lmkiIdx, lmkiNum); for ( int lmkjIdx = 0; lmkjIdx < lmkjNum; ++lmkjIdx ) { vector qjVec, rjVec; getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum); vector qijVec; vectorSubtract(qijVec, qiVec, qjVec); double dijSqu = eucnormSqu(qijVec) / knlWidthSqu; double dqKVal = -2.0 / knlWidthSqu * exp(-dijSqu); double lrVal = dotProduct(liVec, rjVec); dqKVec.x += lrVal * dqKVal * qijVec.x; dqKVec.y += lrVal * dqKVal * qijVec.y; } setVector(d_dqiKMat, dqKVec, lmkiIdx, lmkiNum); } return; } __global__ void dqjGaussian(double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum) { int lmkjIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkjIdx < lmkjNum ) { vector dqKVec = {0.0, 0.0}; vector qjVec, rjVec; getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum); for ( int lmkiIdx = 0; lmkiIdx < lmkiNum; ++lmkiIdx ) { vector qiVec, liVec; getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum); getVector(liVec, d_lftMat, lmkiIdx, lmkiNum); vector qjiVec; vectorSubtract(qjiVec, qjVec, qiVec); double dijSqu = eucnormSqu(qjiVec) / knlWidthSqu; double dqKVal = -2.0 / knlWidthSqu * exp(-dijSqu); double lrVal = dotProduct(liVec, rjVec); dqKVec.x += lrVal * dqKVal * qjiVec.x; dqKVec.y += lrVal * dqKVal * qjiVec.y; } setVector(d_dqjKMat, dqKVec, lmkjIdx, lmkjNum); } return; } __global__ void dqMatern1(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkNum) { int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkiIdx < lmkNum ) { vector dqKVec = {0.0, 0.0}; vector qiVec, liVec, riVec; getVector(qiVec, d_lmkMat, lmkiIdx, lmkNum); getVector(liVec, d_lftMat, lmkiIdx, lmkNum); getVector(riVec, d_rgtMat, lmkiIdx, lmkNum); for ( int lmkjIdx = 0; lmkjIdx < lmkNum; ++lmkjIdx ) { vector qjVec, ljVec, rjVec; getVector(qjVec, d_lmkMat, lmkjIdx, lmkNum); getVector(ljVec, d_lftMat, lmkjIdx, lmkNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkNum); vector qijVec; vectorSubtract(qijVec, qiVec, qjVec); double dijVal = eucnorm(qijVec) / knlWidth; double p1Val; p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (2.0 * knlWidthSqu) * p1Val; double lrVal = dotProduct(liVec, rjVec) + dotProduct(ljVec, riVec); dqKVec.x += lrVal * dqKVal * qijVec.x; dqKVec.y += lrVal * dqKVal * qijVec.y; } setVector(d_dqKMat, dqKVec, lmkiIdx, lmkNum); } return; } __global__ void dqiMatern1(double *d_dqiKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum) { int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkiIdx < lmkiNum ) { vector dqKVec = {0.0, 0.0}; vector qiVec, liVec; getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum); getVector(liVec, d_lftMat, lmkiIdx, lmkiNum); for ( int lmkjIdx = 0; lmkjIdx < lmkjNum; ++lmkjIdx ) { vector qjVec, rjVec; getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum); vector qijVec; vectorSubtract(qijVec, qiVec, qjVec); double dijVal = eucnorm(qijVec) / knlWidth; double p1Val; p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (2.0 * knlWidthSqu) * p1Val; double lrVal = dotProduct(liVec, rjVec); dqKVec.x += lrVal * dqKVal * qijVec.x; dqKVec.y += lrVal * dqKVal * qijVec.y; } setVector(d_dqiKMat, dqKVec, lmkiIdx, lmkiNum); } return; } __global__ void dqjMatern1(double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum) { int lmkjIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkjIdx < lmkjNum ) { vector dqKVec = {0.0, 0.0}; vector qjVec, rjVec; getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum); for ( int lmkiIdx = 0; lmkiIdx < lmkiNum; ++lmkiIdx ) { vector qiVec, liVec; getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum); getVector(liVec, d_lftMat, lmkiIdx, lmkiNum); vector qjiVec; vectorSubtract(qjiVec, qjVec, qiVec); double dijVal = eucnorm(qjiVec) / knlWidth; double p1Val; p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (2.0 * knlWidthSqu) * p1Val; double lrVal = dotProduct(liVec, rjVec); dqKVec.x += lrVal * dqKVal * qjiVec.x; dqKVec.y += lrVal * dqKVal * qjiVec.y; } setVector(d_dqjKMat, dqKVec, lmkjIdx, lmkjNum); } return; } __global__ void dqMatern2(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkNum) { int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkiIdx < lmkNum ) { vector dqKVec = {0.0, 0.0}; vector qiVec, liVec, riVec; getVector(qiVec, d_lmkMat, lmkiIdx, lmkNum); getVector(liVec, d_lftMat, lmkiIdx, lmkNum); getVector(riVec, d_rgtMat, lmkiIdx, lmkNum); for ( int lmkjIdx = 0; lmkjIdx < lmkNum; ++lmkjIdx ) { vector qjVec, ljVec, rjVec; getVector(qjVec, d_lmkMat, lmkjIdx, lmkNum); getVector(ljVec, d_lftMat, lmkjIdx, lmkNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkNum); vector qijVec; vectorSubtract(qijVec, qiVec, qjVec); double dijVal = eucnorm(qijVec) / knlWidth; double p0Val, p1Val; p0Fcn(p0Val, dijVal); p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (8.0 * knlWidthSqu) * (p0Val + 2.0 * p1Val); double lrVal = dotProduct(liVec, rjVec) + dotProduct(ljVec, riVec); dqKVec.x += lrVal * dqKVal * qijVec.x; dqKVec.y += lrVal * dqKVal * qijVec.y; } setVector(d_dqKMat, dqKVec, lmkiIdx, lmkNum); } return; } __global__ void dqiMatern2(double *d_dqiKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum) { int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkiIdx < lmkiNum ) { vector dqKVec = {0.0, 0.0}; vector qiVec, liVec; getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum); getVector(liVec, d_lftMat, lmkiIdx, lmkiNum); for ( int lmkjIdx = 0; lmkjIdx < lmkjNum; ++lmkjIdx ) { vector qjVec, rjVec; getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum); vector qijVec; vectorSubtract(qijVec, qiVec, qjVec); double dijVal = eucnorm(qijVec) / knlWidth; double p0Val, p1Val; p0Fcn(p0Val, dijVal); p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (8.0 * knlWidthSqu) * (p0Val + 2.0 * p1Val); double lrVal = dotProduct(liVec, rjVec); dqKVec.x += lrVal * dqKVal * qijVec.x; dqKVec.y += lrVal * dqKVal * qijVec.y; } setVector(d_dqiKMat, dqKVec, lmkiIdx, lmkiNum); } return; } __global__ void dqjMatern2(double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum) { int lmkjIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkjIdx < lmkjNum ) { vector dqKVec = {0.0, 0.0}; vector qjVec, rjVec; getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum); for ( int lmkiIdx = 0; lmkiIdx < lmkiNum; ++lmkiIdx ) { vector qiVec, liVec; getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum); getVector(liVec, d_lftMat, lmkiIdx, lmkiNum); vector qjiVec; vectorSubtract(qjiVec, qjVec, qiVec); double dijVal = eucnorm(qjiVec) / knlWidth; double p0Val, p1Val; p0Fcn(p0Val, dijVal); p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (8.0 * knlWidthSqu) * (p0Val + 2.0 * p1Val); double lrVal = dotProduct(liVec, rjVec); dqKVec.x += lrVal * dqKVal * qjiVec.x; dqKVec.y += lrVal * dqKVal * qjiVec.y; } setVector(d_dqjKMat, dqKVec, lmkjIdx, lmkjNum); } return; } __global__ void dqMatern3(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkNum) { int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkiIdx < lmkNum ) { vector dqKVec = {0.0, 0.0}; vector qiVec, liVec, riVec; getVector(qiVec, d_lmkMat, lmkiIdx, lmkNum); getVector(liVec, d_lftMat, lmkiIdx, lmkNum); getVector(riVec, d_rgtMat, lmkiIdx, lmkNum); for ( int lmkjIdx = 0; lmkjIdx < lmkNum; ++lmkjIdx ) { vector qjVec, ljVec, rjVec; getVector(qjVec, d_lmkMat, lmkjIdx, lmkNum); getVector(ljVec, d_lftMat, lmkjIdx, lmkNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkNum); vector qijVec; vectorSubtract(qijVec, qiVec, qjVec); double dijVal = eucnorm(qijVec) / knlWidth; double dijSqu = dijVal * dijVal; double p0Val, p1Val; p0Fcn(p0Val, dijVal); p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (48.0 * knlWidthSqu) * (4.0 * p0Val + (8.0 + dijSqu) * p1Val); double lrVal = dotProduct(liVec, rjVec) + dotProduct(ljVec, riVec); dqKVec.x += lrVal * dqKVal * qijVec.x; dqKVec.y += lrVal * dqKVal * qijVec.y; } setVector(d_dqKMat, dqKVec, lmkiIdx, lmkNum); } return; } __global__ void dqiMatern3(double *d_dqiKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum) { int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkiIdx < lmkiNum ) { vector dqKVec = {0.0, 0.0}; vector qiVec, liVec; getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum); getVector(liVec, d_lftMat, lmkiIdx, lmkiNum); for ( int lmkjIdx = 0; lmkjIdx < lmkjNum; ++lmkjIdx ) { vector qjVec, rjVec; getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum); vector qijVec; vectorSubtract(qijVec, qiVec, qjVec); double dijVal = eucnorm(qijVec) / knlWidth; double dijSqu = dijVal * dijVal; double p0Val, p1Val; p0Fcn(p0Val, dijVal); p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (48.0 * knlWidthSqu) * (4.0 * p0Val + (8.0 + dijSqu) * p1Val); double lrVal = dotProduct(liVec, rjVec); dqKVec.x += lrVal * dqKVal * qijVec.x; dqKVec.y += lrVal * dqKVal * qijVec.y; } setVector(d_dqiKMat, dqKVec, lmkiIdx, lmkiNum); } return; } __global__ void dqjMatern3(double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum) { int lmkjIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkjIdx < lmkjNum ) { vector dqKVec = {0.0, 0.0}; vector qjVec, rjVec; getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum); for ( int lmkiIdx = 0; lmkiIdx < lmkiNum; ++lmkiIdx ) { vector qiVec, liVec; getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum); getVector(liVec, d_lftMat, lmkiIdx, lmkiNum); vector qjiVec; vectorSubtract(qjiVec, qjVec, qiVec); double dijVal = eucnorm(qjiVec) / knlWidth; double dijSqu = dijVal * dijVal; double p0Val, p1Val; p0Fcn(p0Val, dijVal); p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (48.0 * knlWidthSqu) * (4.0 * p0Val + (8.0 + dijSqu) * p1Val); double lrVal = dotProduct(liVec, rjVec); dqKVec.x += lrVal * dqKVal * qjiVec.x; dqKVec.y += lrVal * dqKVal * qjiVec.y; } setVector(d_dqjKMat, dqKVec, lmkjIdx, lmkjNum); } return; } __global__ void dqMatern4(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkNum) { int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkiIdx < lmkNum ) { vector dqKVec = {0.0, 0.0}; vector qiVec, liVec, riVec; getVector(qiVec, d_lmkMat, lmkiIdx, lmkNum); getVector(liVec, d_lftMat, lmkiIdx, lmkNum); getVector(riVec, d_rgtMat, lmkiIdx, lmkNum); for ( int lmkjIdx = 0; lmkjIdx < lmkNum; ++lmkjIdx ) { vector qjVec, ljVec, rjVec; getVector(qjVec, d_lmkMat, lmkjIdx, lmkNum); getVector(ljVec, d_lftMat, lmkjIdx, lmkNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkNum); vector qijVec; vectorSubtract(qijVec, qiVec, qjVec); double dijVal = eucnorm(qijVec) / knlWidth; double dijSqu = dijVal * dijVal; double p0Val, p1Val; p0Fcn(p0Val, dijVal); p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (384.0 * knlWidthSqu) * ((24.0 + dijSqu) * p0Val + 8.0 * (6.0 + dijSqu) * p1Val); double lrVal = dotProduct(liVec, rjVec) + dotProduct(ljVec, riVec); dqKVec.x += lrVal * dqKVal * qijVec.x; dqKVec.y += lrVal * dqKVal * qijVec.y; } setVector(d_dqKMat, dqKVec, lmkiIdx, lmkNum); } return; } __global__ void dqiMatern4(double *d_dqiKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum) { int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkiIdx < lmkiNum ) { vector dqKVec = {0.0, 0.0}; vector qiVec, liVec; getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum); getVector(liVec, d_lftMat, lmkiIdx, lmkiNum); for ( int lmkjIdx = 0; lmkjIdx < lmkjNum; ++lmkjIdx ) { vector qjVec, rjVec; getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum); vector qijVec; vectorSubtract(qijVec, qiVec, qjVec); double dijVal = eucnorm(qijVec) / knlWidth; double dijSqu = dijVal * dijVal; double p0Val, p1Val; p0Fcn(p0Val, dijVal); p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (384.0 * knlWidthSqu) * ((24.0 + dijSqu) * p0Val + 8.0 * (6.0 + dijSqu) * p1Val); double lrVal = dotProduct(liVec, rjVec); dqKVec.x += lrVal * dqKVal * qijVec.x; dqKVec.y += lrVal * dqKVal * qijVec.y; } setVector(d_dqiKMat, dqKVec, lmkiIdx, lmkiNum); } return; } __global__ void dqjMatern4(double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum) { int lmkjIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkjIdx < lmkjNum ) { vector dqKVec = {0.0, 0.0}; vector qjVec, rjVec; getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum); for ( int lmkiIdx = 0; lmkiIdx < lmkiNum; ++lmkiIdx ) { vector qiVec, liVec; getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum); getVector(liVec, d_lftMat, lmkiIdx, lmkiNum); vector qjiVec; vectorSubtract(qjiVec, qjVec, qiVec); double dijVal = eucnorm(qjiVec) / knlWidth; double dijSqu = dijVal * dijVal; double p0Val, p1Val; p0Fcn(p0Val, dijVal); p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (384.0 * knlWidthSqu) * ((24.0 + dijSqu) * p0Val + 8.0 * (6.0 + dijSqu) * p1Val); double lrVal = dotProduct(liVec, rjVec); dqKVec.x += lrVal * dqKVal * qjiVec.x; dqKVec.y += lrVal * dqKVal * qjiVec.y; } setVector(d_dqjKMat, dqKVec, lmkjIdx, lmkjNum); } return; } void dqKernel(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat, int knlOrder, double knlWidth, int lmkNum) { // order 0 to 4: Matern kernel of order 0 to 4 // order -1: Gaussian kernel setBesselkCoefficients(); double knlWidthSqu = knlWidth * knlWidth; int blkNum = (lmkNum - 1) / BLKDIM + 1; switch ( knlOrder ) { case -1: hipLaunchKernelGGL(( dqGaussian) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_dqKMat, d_lmkMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkNum); break; // Matern0 is not differentiable case 1: hipLaunchKernelGGL(( dqMatern1) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_dqKMat, d_lmkMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkNum); break; case 2: hipLaunchKernelGGL(( dqMatern2) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_dqKMat, d_lmkMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkNum); break; case 3: hipLaunchKernelGGL(( dqMatern3) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_dqKMat, d_lmkMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkNum); break; case 4: hipLaunchKernelGGL(( dqMatern4) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_dqKMat, d_lmkMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkNum); break; } return; } void dqKernel(double *d_dqiKMat, double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, int knlOrder, double knlWidth, int lmkiNum, int lmkjNum) { // order 0 to 4: Matern kernel of order 0 to 4 // order -1: Gaussian kernel setBesselkCoefficients(); double knlWidthSqu = knlWidth * knlWidth; int blkiNum = (lmkiNum - 1) / BLKDIM + 1; int blkjNum = (lmkjNum - 1) / BLKDIM + 1; switch ( knlOrder ) { case -1: hipLaunchKernelGGL(( dqiGaussian) , dim3(blkiNum), dim3(BLKDIM), 0, 0, d_dqiKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkiNum, lmkjNum); hipLaunchKernelGGL(( dqjGaussian) , dim3(blkjNum), dim3(BLKDIM), 0, 0, d_dqjKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkiNum, lmkjNum); break; // Matern0 is not differentiable case 1: hipLaunchKernelGGL(( dqiMatern1) , dim3(blkiNum), dim3(BLKDIM), 0, 0, d_dqiKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkiNum, lmkjNum); hipLaunchKernelGGL(( dqjMatern1) , dim3(blkjNum), dim3(BLKDIM), 0, 0, d_dqjKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkiNum, lmkjNum); break; case 2: hipLaunchKernelGGL(( dqiMatern2) , dim3(blkiNum), dim3(BLKDIM), 0, 0, d_dqiKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkiNum, lmkjNum); hipLaunchKernelGGL(( dqjMatern2) , dim3(blkjNum), dim3(BLKDIM), 0, 0, d_dqjKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkiNum, lmkjNum); break; case 3: hipLaunchKernelGGL(( dqiMatern3) , dim3(blkiNum), dim3(BLKDIM), 0, 0, d_dqiKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkiNum, lmkjNum); hipLaunchKernelGGL(( dqjMatern3) , dim3(blkjNum), dim3(BLKDIM), 0, 0, d_dqjKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkiNum, lmkjNum); break; case 4: hipLaunchKernelGGL(( dqiMatern4) , dim3(blkiNum), dim3(BLKDIM), 0, 0, d_dqiKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkiNum, lmkjNum); hipLaunchKernelGGL(( dqjMatern4) , dim3(blkjNum), dim3(BLKDIM), 0, 0, d_dqjKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkiNum, lmkjNum); break; } return; }
28393f2ac5ca1b3c080261ce6b7435de3b6d803b.cu
// Author: Dai-Ni Hsieh ([email protected]) // Date : 11/17/2020 #include <cmath> #include "besselk.h" #include "polybesselk.h" #include "matvec.h" #include "constants.h" inline void setBesselkCoefficients() { cudaMemcpyToSymbol(c_P01Vec, P01Vec, sizeof(double) * (P01Deg + 1), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_Q01Vec, Q01Vec, sizeof(double) * (Q01Deg + 1), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_P02Vec, P02Vec, sizeof(double) * (P02Deg + 1), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_Q02Vec, Q02Vec, sizeof(double) * (Q02Deg + 1), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_P03Vec, P03Vec, sizeof(double) * (P03Deg + 1), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_Q03Vec, Q03Vec, sizeof(double) * (Q03Deg + 1), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_P11Vec, P11Vec, sizeof(double) * (P11Deg + 1), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_Q11Vec, Q11Vec, sizeof(double) * (Q11Deg + 1), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_P12Vec, P12Vec, sizeof(double) * (P12Deg + 1), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_Q12Vec, Q12Vec, sizeof(double) * (Q12Deg + 1), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_P13Vec, P13Vec, sizeof(double) * (P13Deg + 1), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_Q13Vec, Q13Vec, sizeof(double) * (Q13Deg + 1), 0, cudaMemcpyHostToDevice); return; } __global__ void dqGaussian(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkNum) { int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkiIdx < lmkNum ) { vector dqKVec = {0.0, 0.0}; vector qiVec, liVec, riVec; getVector(qiVec, d_lmkMat, lmkiIdx, lmkNum); getVector(liVec, d_lftMat, lmkiIdx, lmkNum); getVector(riVec, d_rgtMat, lmkiIdx, lmkNum); for ( int lmkjIdx = 0; lmkjIdx < lmkNum; ++lmkjIdx ) { vector qjVec, ljVec, rjVec; getVector(qjVec, d_lmkMat, lmkjIdx, lmkNum); getVector(ljVec, d_lftMat, lmkjIdx, lmkNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkNum); vector qijVec; vectorSubtract(qijVec, qiVec, qjVec); double dijSqu = eucnormSqu(qijVec) / knlWidthSqu; double dqKVal = -2.0 / knlWidthSqu * exp(-dijSqu); double lrVal = dotProduct(liVec, rjVec) + dotProduct(ljVec, riVec); dqKVec.x += lrVal * dqKVal * qijVec.x; dqKVec.y += lrVal * dqKVal * qijVec.y; } setVector(d_dqKMat, dqKVec, lmkiIdx, lmkNum); } return; } __global__ void dqiGaussian(double *d_dqiKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum) { int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkiIdx < lmkiNum ) { vector dqKVec = {0.0, 0.0}; vector qiVec, liVec; getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum); getVector(liVec, d_lftMat, lmkiIdx, lmkiNum); for ( int lmkjIdx = 0; lmkjIdx < lmkjNum; ++lmkjIdx ) { vector qjVec, rjVec; getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum); vector qijVec; vectorSubtract(qijVec, qiVec, qjVec); double dijSqu = eucnormSqu(qijVec) / knlWidthSqu; double dqKVal = -2.0 / knlWidthSqu * exp(-dijSqu); double lrVal = dotProduct(liVec, rjVec); dqKVec.x += lrVal * dqKVal * qijVec.x; dqKVec.y += lrVal * dqKVal * qijVec.y; } setVector(d_dqiKMat, dqKVec, lmkiIdx, lmkiNum); } return; } __global__ void dqjGaussian(double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum) { int lmkjIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkjIdx < lmkjNum ) { vector dqKVec = {0.0, 0.0}; vector qjVec, rjVec; getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum); for ( int lmkiIdx = 0; lmkiIdx < lmkiNum; ++lmkiIdx ) { vector qiVec, liVec; getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum); getVector(liVec, d_lftMat, lmkiIdx, lmkiNum); vector qjiVec; vectorSubtract(qjiVec, qjVec, qiVec); double dijSqu = eucnormSqu(qjiVec) / knlWidthSqu; double dqKVal = -2.0 / knlWidthSqu * exp(-dijSqu); double lrVal = dotProduct(liVec, rjVec); dqKVec.x += lrVal * dqKVal * qjiVec.x; dqKVec.y += lrVal * dqKVal * qjiVec.y; } setVector(d_dqjKMat, dqKVec, lmkjIdx, lmkjNum); } return; } __global__ void dqMatern1(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkNum) { int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkiIdx < lmkNum ) { vector dqKVec = {0.0, 0.0}; vector qiVec, liVec, riVec; getVector(qiVec, d_lmkMat, lmkiIdx, lmkNum); getVector(liVec, d_lftMat, lmkiIdx, lmkNum); getVector(riVec, d_rgtMat, lmkiIdx, lmkNum); for ( int lmkjIdx = 0; lmkjIdx < lmkNum; ++lmkjIdx ) { vector qjVec, ljVec, rjVec; getVector(qjVec, d_lmkMat, lmkjIdx, lmkNum); getVector(ljVec, d_lftMat, lmkjIdx, lmkNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkNum); vector qijVec; vectorSubtract(qijVec, qiVec, qjVec); double dijVal = eucnorm(qijVec) / knlWidth; double p1Val; p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (2.0 * knlWidthSqu) * p1Val; double lrVal = dotProduct(liVec, rjVec) + dotProduct(ljVec, riVec); dqKVec.x += lrVal * dqKVal * qijVec.x; dqKVec.y += lrVal * dqKVal * qijVec.y; } setVector(d_dqKMat, dqKVec, lmkiIdx, lmkNum); } return; } __global__ void dqiMatern1(double *d_dqiKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum) { int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkiIdx < lmkiNum ) { vector dqKVec = {0.0, 0.0}; vector qiVec, liVec; getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum); getVector(liVec, d_lftMat, lmkiIdx, lmkiNum); for ( int lmkjIdx = 0; lmkjIdx < lmkjNum; ++lmkjIdx ) { vector qjVec, rjVec; getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum); vector qijVec; vectorSubtract(qijVec, qiVec, qjVec); double dijVal = eucnorm(qijVec) / knlWidth; double p1Val; p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (2.0 * knlWidthSqu) * p1Val; double lrVal = dotProduct(liVec, rjVec); dqKVec.x += lrVal * dqKVal * qijVec.x; dqKVec.y += lrVal * dqKVal * qijVec.y; } setVector(d_dqiKMat, dqKVec, lmkiIdx, lmkiNum); } return; } __global__ void dqjMatern1(double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum) { int lmkjIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkjIdx < lmkjNum ) { vector dqKVec = {0.0, 0.0}; vector qjVec, rjVec; getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum); for ( int lmkiIdx = 0; lmkiIdx < lmkiNum; ++lmkiIdx ) { vector qiVec, liVec; getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum); getVector(liVec, d_lftMat, lmkiIdx, lmkiNum); vector qjiVec; vectorSubtract(qjiVec, qjVec, qiVec); double dijVal = eucnorm(qjiVec) / knlWidth; double p1Val; p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (2.0 * knlWidthSqu) * p1Val; double lrVal = dotProduct(liVec, rjVec); dqKVec.x += lrVal * dqKVal * qjiVec.x; dqKVec.y += lrVal * dqKVal * qjiVec.y; } setVector(d_dqjKMat, dqKVec, lmkjIdx, lmkjNum); } return; } __global__ void dqMatern2(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkNum) { int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkiIdx < lmkNum ) { vector dqKVec = {0.0, 0.0}; vector qiVec, liVec, riVec; getVector(qiVec, d_lmkMat, lmkiIdx, lmkNum); getVector(liVec, d_lftMat, lmkiIdx, lmkNum); getVector(riVec, d_rgtMat, lmkiIdx, lmkNum); for ( int lmkjIdx = 0; lmkjIdx < lmkNum; ++lmkjIdx ) { vector qjVec, ljVec, rjVec; getVector(qjVec, d_lmkMat, lmkjIdx, lmkNum); getVector(ljVec, d_lftMat, lmkjIdx, lmkNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkNum); vector qijVec; vectorSubtract(qijVec, qiVec, qjVec); double dijVal = eucnorm(qijVec) / knlWidth; double p0Val, p1Val; p0Fcn(p0Val, dijVal); p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (8.0 * knlWidthSqu) * (p0Val + 2.0 * p1Val); double lrVal = dotProduct(liVec, rjVec) + dotProduct(ljVec, riVec); dqKVec.x += lrVal * dqKVal * qijVec.x; dqKVec.y += lrVal * dqKVal * qijVec.y; } setVector(d_dqKMat, dqKVec, lmkiIdx, lmkNum); } return; } __global__ void dqiMatern2(double *d_dqiKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum) { int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkiIdx < lmkiNum ) { vector dqKVec = {0.0, 0.0}; vector qiVec, liVec; getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum); getVector(liVec, d_lftMat, lmkiIdx, lmkiNum); for ( int lmkjIdx = 0; lmkjIdx < lmkjNum; ++lmkjIdx ) { vector qjVec, rjVec; getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum); vector qijVec; vectorSubtract(qijVec, qiVec, qjVec); double dijVal = eucnorm(qijVec) / knlWidth; double p0Val, p1Val; p0Fcn(p0Val, dijVal); p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (8.0 * knlWidthSqu) * (p0Val + 2.0 * p1Val); double lrVal = dotProduct(liVec, rjVec); dqKVec.x += lrVal * dqKVal * qijVec.x; dqKVec.y += lrVal * dqKVal * qijVec.y; } setVector(d_dqiKMat, dqKVec, lmkiIdx, lmkiNum); } return; } __global__ void dqjMatern2(double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum) { int lmkjIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkjIdx < lmkjNum ) { vector dqKVec = {0.0, 0.0}; vector qjVec, rjVec; getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum); for ( int lmkiIdx = 0; lmkiIdx < lmkiNum; ++lmkiIdx ) { vector qiVec, liVec; getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum); getVector(liVec, d_lftMat, lmkiIdx, lmkiNum); vector qjiVec; vectorSubtract(qjiVec, qjVec, qiVec); double dijVal = eucnorm(qjiVec) / knlWidth; double p0Val, p1Val; p0Fcn(p0Val, dijVal); p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (8.0 * knlWidthSqu) * (p0Val + 2.0 * p1Val); double lrVal = dotProduct(liVec, rjVec); dqKVec.x += lrVal * dqKVal * qjiVec.x; dqKVec.y += lrVal * dqKVal * qjiVec.y; } setVector(d_dqjKMat, dqKVec, lmkjIdx, lmkjNum); } return; } __global__ void dqMatern3(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkNum) { int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkiIdx < lmkNum ) { vector dqKVec = {0.0, 0.0}; vector qiVec, liVec, riVec; getVector(qiVec, d_lmkMat, lmkiIdx, lmkNum); getVector(liVec, d_lftMat, lmkiIdx, lmkNum); getVector(riVec, d_rgtMat, lmkiIdx, lmkNum); for ( int lmkjIdx = 0; lmkjIdx < lmkNum; ++lmkjIdx ) { vector qjVec, ljVec, rjVec; getVector(qjVec, d_lmkMat, lmkjIdx, lmkNum); getVector(ljVec, d_lftMat, lmkjIdx, lmkNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkNum); vector qijVec; vectorSubtract(qijVec, qiVec, qjVec); double dijVal = eucnorm(qijVec) / knlWidth; double dijSqu = dijVal * dijVal; double p0Val, p1Val; p0Fcn(p0Val, dijVal); p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (48.0 * knlWidthSqu) * (4.0 * p0Val + (8.0 + dijSqu) * p1Val); double lrVal = dotProduct(liVec, rjVec) + dotProduct(ljVec, riVec); dqKVec.x += lrVal * dqKVal * qijVec.x; dqKVec.y += lrVal * dqKVal * qijVec.y; } setVector(d_dqKMat, dqKVec, lmkiIdx, lmkNum); } return; } __global__ void dqiMatern3(double *d_dqiKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum) { int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkiIdx < lmkiNum ) { vector dqKVec = {0.0, 0.0}; vector qiVec, liVec; getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum); getVector(liVec, d_lftMat, lmkiIdx, lmkiNum); for ( int lmkjIdx = 0; lmkjIdx < lmkjNum; ++lmkjIdx ) { vector qjVec, rjVec; getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum); vector qijVec; vectorSubtract(qijVec, qiVec, qjVec); double dijVal = eucnorm(qijVec) / knlWidth; double dijSqu = dijVal * dijVal; double p0Val, p1Val; p0Fcn(p0Val, dijVal); p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (48.0 * knlWidthSqu) * (4.0 * p0Val + (8.0 + dijSqu) * p1Val); double lrVal = dotProduct(liVec, rjVec); dqKVec.x += lrVal * dqKVal * qijVec.x; dqKVec.y += lrVal * dqKVal * qijVec.y; } setVector(d_dqiKMat, dqKVec, lmkiIdx, lmkiNum); } return; } __global__ void dqjMatern3(double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum) { int lmkjIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkjIdx < lmkjNum ) { vector dqKVec = {0.0, 0.0}; vector qjVec, rjVec; getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum); for ( int lmkiIdx = 0; lmkiIdx < lmkiNum; ++lmkiIdx ) { vector qiVec, liVec; getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum); getVector(liVec, d_lftMat, lmkiIdx, lmkiNum); vector qjiVec; vectorSubtract(qjiVec, qjVec, qiVec); double dijVal = eucnorm(qjiVec) / knlWidth; double dijSqu = dijVal * dijVal; double p0Val, p1Val; p0Fcn(p0Val, dijVal); p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (48.0 * knlWidthSqu) * (4.0 * p0Val + (8.0 + dijSqu) * p1Val); double lrVal = dotProduct(liVec, rjVec); dqKVec.x += lrVal * dqKVal * qjiVec.x; dqKVec.y += lrVal * dqKVal * qjiVec.y; } setVector(d_dqjKMat, dqKVec, lmkjIdx, lmkjNum); } return; } __global__ void dqMatern4(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkNum) { int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkiIdx < lmkNum ) { vector dqKVec = {0.0, 0.0}; vector qiVec, liVec, riVec; getVector(qiVec, d_lmkMat, lmkiIdx, lmkNum); getVector(liVec, d_lftMat, lmkiIdx, lmkNum); getVector(riVec, d_rgtMat, lmkiIdx, lmkNum); for ( int lmkjIdx = 0; lmkjIdx < lmkNum; ++lmkjIdx ) { vector qjVec, ljVec, rjVec; getVector(qjVec, d_lmkMat, lmkjIdx, lmkNum); getVector(ljVec, d_lftMat, lmkjIdx, lmkNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkNum); vector qijVec; vectorSubtract(qijVec, qiVec, qjVec); double dijVal = eucnorm(qijVec) / knlWidth; double dijSqu = dijVal * dijVal; double p0Val, p1Val; p0Fcn(p0Val, dijVal); p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (384.0 * knlWidthSqu) * ((24.0 + dijSqu) * p0Val + 8.0 * (6.0 + dijSqu) * p1Val); double lrVal = dotProduct(liVec, rjVec) + dotProduct(ljVec, riVec); dqKVec.x += lrVal * dqKVal * qijVec.x; dqKVec.y += lrVal * dqKVal * qijVec.y; } setVector(d_dqKMat, dqKVec, lmkiIdx, lmkNum); } return; } __global__ void dqiMatern4(double *d_dqiKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum) { int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkiIdx < lmkiNum ) { vector dqKVec = {0.0, 0.0}; vector qiVec, liVec; getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum); getVector(liVec, d_lftMat, lmkiIdx, lmkiNum); for ( int lmkjIdx = 0; lmkjIdx < lmkjNum; ++lmkjIdx ) { vector qjVec, rjVec; getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum); vector qijVec; vectorSubtract(qijVec, qiVec, qjVec); double dijVal = eucnorm(qijVec) / knlWidth; double dijSqu = dijVal * dijVal; double p0Val, p1Val; p0Fcn(p0Val, dijVal); p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (384.0 * knlWidthSqu) * ((24.0 + dijSqu) * p0Val + 8.0 * (6.0 + dijSqu) * p1Val); double lrVal = dotProduct(liVec, rjVec); dqKVec.x += lrVal * dqKVal * qijVec.x; dqKVec.y += lrVal * dqKVal * qijVec.y; } setVector(d_dqiKMat, dqKVec, lmkiIdx, lmkiNum); } return; } __global__ void dqjMatern4(double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum) { int lmkjIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( lmkjIdx < lmkjNum ) { vector dqKVec = {0.0, 0.0}; vector qjVec, rjVec; getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum); getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum); for ( int lmkiIdx = 0; lmkiIdx < lmkiNum; ++lmkiIdx ) { vector qiVec, liVec; getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum); getVector(liVec, d_lftMat, lmkiIdx, lmkiNum); vector qjiVec; vectorSubtract(qjiVec, qjVec, qiVec); double dijVal = eucnorm(qjiVec) / knlWidth; double dijSqu = dijVal * dijVal; double p0Val, p1Val; p0Fcn(p0Val, dijVal); p1Fcn(p1Val, dijVal); double dqKVal = -1.0 / (384.0 * knlWidthSqu) * ((24.0 + dijSqu) * p0Val + 8.0 * (6.0 + dijSqu) * p1Val); double lrVal = dotProduct(liVec, rjVec); dqKVec.x += lrVal * dqKVal * qjiVec.x; dqKVec.y += lrVal * dqKVal * qjiVec.y; } setVector(d_dqjKMat, dqKVec, lmkjIdx, lmkjNum); } return; } void dqKernel(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat, int knlOrder, double knlWidth, int lmkNum) { // order 0 to 4: Matern kernel of order 0 to 4 // order -1: Gaussian kernel setBesselkCoefficients(); double knlWidthSqu = knlWidth * knlWidth; int blkNum = (lmkNum - 1) / BLKDIM + 1; switch ( knlOrder ) { case -1: dqGaussian <<<blkNum, BLKDIM>>> (d_dqKMat, d_lmkMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkNum); break; // Matern0 is not differentiable case 1: dqMatern1 <<<blkNum, BLKDIM>>> (d_dqKMat, d_lmkMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkNum); break; case 2: dqMatern2 <<<blkNum, BLKDIM>>> (d_dqKMat, d_lmkMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkNum); break; case 3: dqMatern3 <<<blkNum, BLKDIM>>> (d_dqKMat, d_lmkMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkNum); break; case 4: dqMatern4 <<<blkNum, BLKDIM>>> (d_dqKMat, d_lmkMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkNum); break; } return; } void dqKernel(double *d_dqiKMat, double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat, double *d_lftMat, double *d_rgtMat, int knlOrder, double knlWidth, int lmkiNum, int lmkjNum) { // order 0 to 4: Matern kernel of order 0 to 4 // order -1: Gaussian kernel setBesselkCoefficients(); double knlWidthSqu = knlWidth * knlWidth; int blkiNum = (lmkiNum - 1) / BLKDIM + 1; int blkjNum = (lmkjNum - 1) / BLKDIM + 1; switch ( knlOrder ) { case -1: dqiGaussian <<<blkiNum, BLKDIM>>> (d_dqiKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkiNum, lmkjNum); dqjGaussian <<<blkjNum, BLKDIM>>> (d_dqjKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkiNum, lmkjNum); break; // Matern0 is not differentiable case 1: dqiMatern1 <<<blkiNum, BLKDIM>>> (d_dqiKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkiNum, lmkjNum); dqjMatern1 <<<blkjNum, BLKDIM>>> (d_dqjKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkiNum, lmkjNum); break; case 2: dqiMatern2 <<<blkiNum, BLKDIM>>> (d_dqiKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkiNum, lmkjNum); dqjMatern2 <<<blkjNum, BLKDIM>>> (d_dqjKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkiNum, lmkjNum); break; case 3: dqiMatern3 <<<blkiNum, BLKDIM>>> (d_dqiKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkiNum, lmkjNum); dqjMatern3 <<<blkjNum, BLKDIM>>> (d_dqjKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkiNum, lmkjNum); break; case 4: dqiMatern4 <<<blkiNum, BLKDIM>>> (d_dqiKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkiNum, lmkjNum); dqjMatern4 <<<blkjNum, BLKDIM>>> (d_dqjKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat, knlWidth, knlWidthSqu, lmkiNum, lmkjNum); break; } return; }
b08ba6e384b86b2ee7bb70cb28851d386ac0bbee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define QUEENS 10 __device__ void kiir(char *A) { char s[QUEENS*21 + 1]; int k = 0; for(int i = 0; i < QUEENS; i++) { for(int j = 0; j < QUEENS; j++) { if(A[i] == j) s[k++] = 'Q'; else s[k++] = '.'; s[k++] = ' '; } s[k++] = '\n'; } s[k] = '\0'; printf("%s\n", s); } __global__ void queen(int *db, const int n) { { bool B[QUEENS]; for(int i = 0; i < QUEENS; i++) B[i] = 0; B[threadIdx.x] = 1; B[threadIdx.y] = 1; B[threadIdx.z] = 1; B[blockIdx.x/10] = 1; B[blockIdx.x%10] = 1; B[blockIdx.y/10] = 1; B[blockIdx.y%10] = 1; B[blockIdx.z/10] = 1; B[blockIdx.z%10] = 1; B[n] = 1; for(int i = 0; i < QUEENS; i++) if(B[i] == 0) return; } char A[QUEENS]; A[0] = threadIdx.x; A[1] = threadIdx.y; A[2] = threadIdx.z; A[3] = blockIdx.x/10; A[4] = blockIdx.x%10; A[5] = blockIdx.y/10; A[6] = blockIdx.y%10; A[7] = blockIdx.z/10; A[8] = blockIdx.z%10; A[9] = n; for(int i = 0; i < QUEENS - 1; i++) for(int j = i + 1; j < QUEENS; j++) if(abs(i - j) == abs(A[i] - A[j])) return; atomicAdd(db, 1); printf("%d.\n", *db); kiir(A); } int main() { int h = 0, *d; hipMalloc((void**) &d, sizeof(int)); hipMemcpy(d, &h, sizeof(int), hipMemcpyHostToDevice); dim3 blocksPerGrid(100, 100, 100); dim3 threadsPerBlock(10, 10, 10); for(int i = 0; i < QUEENS; i++) hipLaunchKernelGGL(( queen), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d, i); hipMemcpy(&h, d, sizeof(int), hipMemcpyDeviceToHost); hipFree(d); hipDeviceReset(); hipError_t error = hipGetLastError(); if(error != hipSuccess) { fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(error)); return -1; } fprintf(stderr, "Solutions: %d\n", h); fprintf(stderr, "\nDone\n"); return 0; }
b08ba6e384b86b2ee7bb70cb28851d386ac0bbee.cu
#include <stdio.h> #define QUEENS 10 __device__ void kiir(char *A) { char s[QUEENS*21 + 1]; int k = 0; for(int i = 0; i < QUEENS; i++) { for(int j = 0; j < QUEENS; j++) { if(A[i] == j) s[k++] = 'Q'; else s[k++] = '.'; s[k++] = ' '; } s[k++] = '\n'; } s[k] = '\0'; printf("%s\n", s); } __global__ void queen(int *db, const int n) { { bool B[QUEENS]; for(int i = 0; i < QUEENS; i++) B[i] = 0; B[threadIdx.x] = 1; B[threadIdx.y] = 1; B[threadIdx.z] = 1; B[blockIdx.x/10] = 1; B[blockIdx.x%10] = 1; B[blockIdx.y/10] = 1; B[blockIdx.y%10] = 1; B[blockIdx.z/10] = 1; B[blockIdx.z%10] = 1; B[n] = 1; for(int i = 0; i < QUEENS; i++) if(B[i] == 0) return; } char A[QUEENS]; A[0] = threadIdx.x; A[1] = threadIdx.y; A[2] = threadIdx.z; A[3] = blockIdx.x/10; A[4] = blockIdx.x%10; A[5] = blockIdx.y/10; A[6] = blockIdx.y%10; A[7] = blockIdx.z/10; A[8] = blockIdx.z%10; A[9] = n; for(int i = 0; i < QUEENS - 1; i++) for(int j = i + 1; j < QUEENS; j++) if(abs(i - j) == abs(A[i] - A[j])) return; atomicAdd(db, 1); printf("%d.\n", *db); kiir(A); } int main() { int h = 0, *d; cudaMalloc((void**) &d, sizeof(int)); cudaMemcpy(d, &h, sizeof(int), cudaMemcpyHostToDevice); dim3 blocksPerGrid(100, 100, 100); dim3 threadsPerBlock(10, 10, 10); for(int i = 0; i < QUEENS; i++) queen<<<blocksPerGrid, threadsPerBlock>>>(d, i); cudaMemcpy(&h, d, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d); cudaDeviceReset(); cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(error)); return -1; } fprintf(stderr, "Solutions: %d\n", h); fprintf(stderr, "\nDone\n"); return 0; }
3c6858cbdad17a84f0260fff9a659ef4d4397610.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* #include <iostream> using namespace std; __global__ void kernelFunction() { return; } //extern "C" void CudaMain() { int threads = 32; dim3 gridSize(1, 1, 1); dim3 blockSize(threads, 1, 1); kernelFunction<<<gridSize, blockSize>>>(); } */
3c6858cbdad17a84f0260fff9a659ef4d4397610.cu
/* #include <iostream> using namespace std; __global__ void kernelFunction() { return; } //extern "C" void CudaMain() { int threads = 32; dim3 gridSize(1, 1, 1); dim3 blockSize(threads, 1, 1); kernelFunction<<<gridSize, blockSize>>>(); } */
6d4558a5b3ec490a61721ce48437c7fdf826de50.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuDBSCAN.hpp" __global__ void assignInitialClusters(KernelObject *cudbscan); __global__ void mergeInto(KernelObject *cudbscan, int clusterIndex); cuDBSCAN::cuDBSCAN(std::vector<Point2> &points, float eps, int mnpts) { ko.pointCount = points.size(); ko.eps = eps; ko.mnpts = mnpts; ko.allocate(); for (int i = 0; i < points.size(); i++) labels[i] = -99; setData(points); } void cuDBSCAN::setData(std::vector<Point2> points) { cuda_call(hipMemcpy(ko.data, &points[0], points.size() * sizeof(Point2), hipMemcpyHostToDevice)); cuda_call(hipMemcpy(ko.devPtr, &ko, sizeof(KernelObject), hipMemcpyHostToDevice)); } int cuDBSCAN::run() { int blockSize = 32; int nBlocks = (int) ::ceil(ko.pointCount / (float) blockSize); hipLaunchKernelGGL(( assignInitialClusters) , dim3(nBlocks), dim3(blockSize) , 0, 0, ko.devPtr); cuda_call(hipPeekAtLastError()); cuda_call(hipDeviceSynchronize()); // Now merge the clusters for (int i = 0; i < ko.pointCount; i++) while (merge(i)); // Keep merging all clusters into this one // Copy back the results int *clusterMatrix = new int[ko.pointCount * ko.pointCount]; int *valid = new int[ko.pointCount]; cuda_call(hipMemcpy(clusterMatrix, ko.clusterMatrix, ko.pointCount * ko.pointCount * sizeof(int), hipMemcpyDeviceToHost)); cuda_call(hipMemcpy(valid, ko.valid, ko.pointCount * sizeof(int), hipMemcpyDeviceToHost)); int runningClusterIndex = 0; for (int col = 0; col < ko.pointCount; col++) { if (!valid[col]) continue; std::vector<int> assignedIndices; for (int row = 0; row < ko.pointCount; row++) if (clusterMatrix[row * ko.pointCount + col]) assignedIndices.push_back(row); if (assignedIndices.size() > ko.mnpts) clusters[runningClusterIndex++] = assignedIndices; } for (auto &c : clusters) for (auto &index : c.second) labels[index] = c.first; delete[] clusterMatrix; delete[] valid; return clusters.size(); } bool cuDBSCAN::merge(int clusterIndex) { int blockSize = 32; int nBlocks = (int) ::ceil(ko.pointCount / (float) blockSize); // Set cluster unmerged! cuda_call(hipMemset(ko.mergeHappened, 0, sizeof(int))); hipLaunchKernelGGL(( mergeInto) , dim3(nBlocks), dim3(blockSize) , 0, 0, ko.devPtr, clusterIndex); cuda_call(hipPeekAtLastError()); cuda_call(hipDeviceSynchronize()); // Did a merge happen? int merged = -1; cuda_call(hipMemcpy(&merged, ko.mergeHappened, sizeof(int), hipMemcpyDeviceToHost)); assert(merged != -1); bool ret = merged > 0; return ret; } __global__ void assignInitialClusters(KernelObject *cudbscan) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= cudbscan->pointCount) return; for (int i = 0; i < cudbscan->pointCount; i++) { // Same elem is always 1 if (i == tid) { cudbscan->set(i, tid); continue; } // Compute the distance if (cudbscan->computeDistance(i, tid) <= cudbscan->eps) cudbscan->set(i, tid); } cudbscan->valid[tid] = 1; cudbscan->reroute[tid] = -1; } __global__ void mergeInto(KernelObject *cudbscan, int clusterIndex) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= cudbscan->pointCount || !cudbscan->isValid(tid) || // Am I valid? tid == clusterIndex) // Am I trying to merge myself into myself? return; int targetCluster = cudbscan->getReroutedIndex(clusterIndex); // Don't merge me into my old self! if (tid == targetCluster) return; bool doMerge = false; for (int i = 0; i < cudbscan->pointCount; i++) { if (cudbscan->get(i, tid) && cudbscan->get(i, targetCluster)) { doMerge = true; break; } } if (doMerge) { // Invalidate and set reroute cudbscan->valid[tid] = 0; cudbscan->reroute[tid] = targetCluster; *cudbscan->mergeHappened = 1; // Do the merge for (int i = 0; i < cudbscan->pointCount; i++) { if (cudbscan->get(i, tid)) cudbscan->set(i, targetCluster); } } } cuDBSCAN::~cuDBSCAN() { ko.free(); }
6d4558a5b3ec490a61721ce48437c7fdf826de50.cu
#include "cuDBSCAN.hpp" __global__ void assignInitialClusters(KernelObject *cudbscan); __global__ void mergeInto(KernelObject *cudbscan, int clusterIndex); cuDBSCAN::cuDBSCAN(std::vector<Point2> &points, float eps, int mnpts) { ko.pointCount = points.size(); ko.eps = eps; ko.mnpts = mnpts; ko.allocate(); for (int i = 0; i < points.size(); i++) labels[i] = -99; setData(points); } void cuDBSCAN::setData(std::vector<Point2> points) { cuda_call(cudaMemcpy(ko.data, &points[0], points.size() * sizeof(Point2), cudaMemcpyHostToDevice)); cuda_call(cudaMemcpy(ko.devPtr, &ko, sizeof(KernelObject), cudaMemcpyHostToDevice)); } int cuDBSCAN::run() { int blockSize = 32; int nBlocks = (int) std::ceil(ko.pointCount / (float) blockSize); assignInitialClusters <<< nBlocks, blockSize >>> (ko.devPtr); cuda_call(cudaPeekAtLastError()); cuda_call(cudaDeviceSynchronize()); // Now merge the clusters for (int i = 0; i < ko.pointCount; i++) while (merge(i)); // Keep merging all clusters into this one // Copy back the results int *clusterMatrix = new int[ko.pointCount * ko.pointCount]; int *valid = new int[ko.pointCount]; cuda_call(cudaMemcpy(clusterMatrix, ko.clusterMatrix, ko.pointCount * ko.pointCount * sizeof(int), cudaMemcpyDeviceToHost)); cuda_call(cudaMemcpy(valid, ko.valid, ko.pointCount * sizeof(int), cudaMemcpyDeviceToHost)); int runningClusterIndex = 0; for (int col = 0; col < ko.pointCount; col++) { if (!valid[col]) continue; std::vector<int> assignedIndices; for (int row = 0; row < ko.pointCount; row++) if (clusterMatrix[row * ko.pointCount + col]) assignedIndices.push_back(row); if (assignedIndices.size() > ko.mnpts) clusters[runningClusterIndex++] = assignedIndices; } for (auto &c : clusters) for (auto &index : c.second) labels[index] = c.first; delete[] clusterMatrix; delete[] valid; return clusters.size(); } bool cuDBSCAN::merge(int clusterIndex) { int blockSize = 32; int nBlocks = (int) std::ceil(ko.pointCount / (float) blockSize); // Set cluster unmerged! cuda_call(cudaMemset(ko.mergeHappened, 0, sizeof(int))); mergeInto <<< nBlocks, blockSize >>> (ko.devPtr, clusterIndex); cuda_call(cudaPeekAtLastError()); cuda_call(cudaDeviceSynchronize()); // Did a merge happen? int merged = -1; cuda_call(cudaMemcpy(&merged, ko.mergeHappened, sizeof(int), cudaMemcpyDeviceToHost)); assert(merged != -1); bool ret = merged > 0; return ret; } __global__ void assignInitialClusters(KernelObject *cudbscan) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= cudbscan->pointCount) return; for (int i = 0; i < cudbscan->pointCount; i++) { // Same elem is always 1 if (i == tid) { cudbscan->set(i, tid); continue; } // Compute the distance if (cudbscan->computeDistance(i, tid) <= cudbscan->eps) cudbscan->set(i, tid); } cudbscan->valid[tid] = 1; cudbscan->reroute[tid] = -1; } __global__ void mergeInto(KernelObject *cudbscan, int clusterIndex) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= cudbscan->pointCount || !cudbscan->isValid(tid) || // Am I valid? tid == clusterIndex) // Am I trying to merge myself into myself? return; int targetCluster = cudbscan->getReroutedIndex(clusterIndex); // Don't merge me into my old self! if (tid == targetCluster) return; bool doMerge = false; for (int i = 0; i < cudbscan->pointCount; i++) { if (cudbscan->get(i, tid) && cudbscan->get(i, targetCluster)) { doMerge = true; break; } } if (doMerge) { // Invalidate and set reroute cudbscan->valid[tid] = 0; cudbscan->reroute[tid] = targetCluster; *cudbscan->mergeHappened = 1; // Do the merge for (int i = 0; i < cudbscan->pointCount; i++) { if (cudbscan->get(i, tid)) cudbscan->set(i, targetCluster); } } } cuDBSCAN::~cuDBSCAN() { ko.free(); }
dbab2feaa4b4ad87b1a6654b1bdb9ff49bbb3327.hip
// !!! This is a file automatically generated by hipify!!! /* * University of Illinois Open Source License * Copyright 2010-2018 Luthey-Schulten Group, * All rights reserved. * * Developed by: Luthey-Schulten Group * University of Illinois at Urbana-Champaign * http://www.scs.uiuc.edu/~schulten * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the Software), to deal with * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished to * do so, subject to the following conditions: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimers. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimers in the documentation * and/or other materials provided with the distribution. * * - Neither the names of the Luthey-Schulten Group, University of Illinois at * Urbana-Champaign, nor the names of its contributors may be used to endorse or * promote products derived from this Software without specific prior written * permission. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS WITH THE SOFTWARE. * * Author(s): Elijah Roberts */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "config.h" /* Required flags: LS_WORDS_PER_SITE LS_APRON_SIZE LS_XYZ_BLOCK_X_SIZE LS_XYZ_BLOCK_Y_SIZE LS_XYZ_BLOCK_Z_SIZE Optional flags: LS_BOUNDARY_PERIODIC - defined if the lattice should laod with periodic boundary conditions LS_BOUNDARY_VALUE - the value to laod into lattice boundary sites (default 0) LS_PACKED_SITES - defined if objects are stored in lattice sites in such a way that if one object position in a site is empty all higher words are empty LS_PACKED_LAST_OBJECT_MASK - or mask for the last object in a word LS_LATTICE_SIZE_POWER_TWO - defined if lattice x, y, and z sizes are enforced to be a power of two */ #if !defined LS_WORDS_PER_SITE #error "Must define the number of 32-bit words per lattice site." #endif #if !defined LS_APRON_SIZE #error "Must define the number of apron sites for the lattice window." #endif #if !defined LS_XYZ_BLOCK_X_SIZE || !defined LS_XYZ_BLOCK_Y_SIZE || !defined LS_XYZ_BLOCK_Z_SIZE #error "Must define the x, y, and z dimensions of a block for xyz window copying." #endif #if !defined LS_BOUNDARY_VALUE #define LS_BOUNDARY_VALUE 0 #endif #if defined LS_PACKED_SITES && !defined LS_PACKED_LAST_OBJECT_MASK #error "Must set the last object mask when using packed sites." #endif #define LS_XYZ_WINDOW_X_SIZE (LS_XYZ_BLOCK_X_SIZE+2*LS_APRON_SIZE) #define LS_XYZ_WINDOW_Y_SIZE (LS_XYZ_BLOCK_Y_SIZE+2*LS_APRON_SIZE) #define LS_XYZ_WINDOW_Z_SIZE (LS_XYZ_BLOCK_Z_SIZE+2*LS_APRON_SIZE) #define LS_XYZ_WINDOW_SIZE (LS_XYZ_WINDOW_X_SIZE*LS_XYZ_WINDOW_Y_SIZE*LS_XYZ_WINDOW_Y_SIZE) inline bool calculateLaunchParameters(unsigned int * gridXSize, dim3 * gridSize, dim3 * threadBlockSize, const unsigned int blockXSize, const unsigned int blockYSize, const unsigned int blockZSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize, const unsigned int xThreads, const unsigned int yThreads, const unsigned int zThreads) { if (latticeXSize%32 != 0 || latticeXSize%blockXSize != 0 || latticeYSize%blockYSize != 0 || latticeZSize%blockZSize != 0) return false; // Calculate the grid size. *gridXSize = latticeXSize/blockXSize; gridSize->x = (*gridXSize)*(latticeYSize/blockYSize); gridSize->y = latticeZSize/blockZSize; gridSize->z = 1; threadBlockSize->x = blockXSize; threadBlockSize->y = blockYSize+2*apronSize; threadBlockSize->z = 1; return true; } /** * Calculates the x, y, and z indices of the current thread block. When the lattice size is not * guaranteed to be a power of two, the integer divide is only performed once in the first thread * for optimal performance. */ inline __device__ void calculateBlockIndices(unsigned int * bx, unsigned int * by, unsigned int * bz, unsigned int * gx, unsigned int * gy, unsigned int gridXSize) { if (threadIdx.x == 0 && threadIdx.y == 0) { *by = blockIdx.x/gridXSize; *bx = blockIdx.x-gridXSize*(*by); *bz = blockIdx.y; *gx = gridXSize; *gy = gridDim.x/gridXSize; } __syncthreads(); } inline __device__ void calculateXYZThreadIndices(const unsigned int bx, const unsigned int by, const unsigned int bz, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYSize, int * blockXIndex, int * blockYIndex, int * blockZIndex, int * latticeXIndex, int * latticeYIndex, int * latticeZIndex, unsigned int * latticeIndex, unsigned int * windowXIndex, unsigned int * windowYIndex, unsigned int * windowZIndex, unsigned int * windowIndex) { // Figure out the index of this thread in the block. *blockXIndex = threadIdx.x; *blockYIndex = threadIdx.y-LS_APRON_SIZE; *blockZIndex = -LS_APRON_SIZE; // Figure out the index of this thread in the lattice. *latticeXIndex = (bx*LS_XYZ_BLOCK_X_SIZE) + *blockXIndex; *latticeYIndex = (by*LS_XYZ_BLOCK_Y_SIZE) + *blockYIndex; *latticeZIndex = (bz*LS_XYZ_BLOCK_Z_SIZE) + *blockZIndex; int latticeYWrap=0; if (*latticeYIndex < 0) latticeYWrap = latticeYSize; else if (*latticeYIndex >= latticeYSize) latticeYWrap = -latticeYSize; *latticeIndex = (bz*latticeXYSize) + (unsigned int)(((*latticeYIndex+latticeYWrap)*latticeXSize) + *latticeXIndex); // Figure out the index of this thread in the window. *windowXIndex = threadIdx.x+LS_APRON_SIZE; *windowYIndex = threadIdx.y; *windowZIndex = 0; *windowIndex = (*windowYIndex*LS_XYZ_WINDOW_X_SIZE) + *windowXIndex; } // On architectures that support warp voting and if sites are packed, skip loading the next word if no threads in the warp had a completely filled previous word. #if defined LS_PACKED_SITES && __CUDA_ARCH__ >= 120 #define _LS_COND_LOAD_SITE(window, window2, windowIndex, value)\ if (__any(window[windowIndex]&LS_PACKED_LAST_OBJECT_MASK)) window2[windowIndex] = value;\ else window2[windowIndex] = 0; #else #define _LS_COND_LOAD_SITE(window, window2, windowIndex, value) window2[windowIndex] = value; #endif /** * Copies a window of a lattice from device memory to shared memory. */ inline __device__ void copyXYZWindowFromLattice(const unsigned int* lattice, unsigned int* window, const unsigned int origLatticeIndex, const int latticeYIndex, const int origLatticeZIndex, const unsigned int latticeYSize, const unsigned int latticeZSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned int origWindowIndex, const unsigned int windowXIndex, const unsigned int bx, const unsigned int gx) { //Create any needed pointer aliases. #if LS_WORDS_PER_SITE >= 2 const unsigned int* lattice2 = lattice+latticeXYZSize; unsigned int* window2 = window+LS_XYZ_WINDOW_SIZE; #endif #if !defined LS_BOUNDARY_PERIODIC int isYBoundary = (latticeYIndex < 0 || latticeYIndex >= latticeYSize); #endif // Loop through each z plane in the window. unsigned int latticeIndex = origLatticeIndex; int latticeZIndex = origLatticeZIndex; unsigned int windowIndex = origWindowIndex; for (int windowZIndex=0; windowZIndex<LS_XYZ_WINDOW_Z_SIZE; latticeIndex+=latticeXYSize, latticeZIndex++, windowIndex+=LS_XYZ_WINDOW_X_SIZE*LS_XYZ_WINDOW_Y_SIZE, windowZIndex++) { #if defined LS_BOUNDARY_PERIODIC #error "Unimplemented." #else int isZBoundary = (latticeZIndex < 0 || latticeZIndex >= latticeZSize); #endif #if LS_APRON_SIZE > 0 // If this thread is one that needs to load part of the leading apron, load it. if (windowXIndex >= LS_XY_BLOCK_X_SIZE) { #if defined LS_BOUNDARY_PERIODIC window[windowIndex-LS_XYZ_BLOCK_X_SIZE] = (bx>0)?lattice[latticeIndex-LS_XYZ_BLOCK_X_SIZE]:lattice[latticeIndex-LS_XYZ_BLOCK_X_SIZE+latticeXSize]; #if LS_WORDS_PER_SITE >= 2 _LS_COND_LOAD_SITE(window, window2, windowIndex-LS_XYZ_BLOCK_X_SIZE, ((bx>0)?lattice2[latticeIndex-LS_XYZ_BLOCK_X_SIZE]:lattice2[latticeIndex-LS_XYZ_BLOCK_X_SIZE+latticeXSize])); #endif #else int isBoundary = (bx == 0 || isYBoundary || isZBoundary); window[windowIndex-LS_XYZ_BLOCK_X_SIZE] = (!isBoundary)?lattice[latticeIndex-LS_XYZ_BLOCK_X_SIZE]:LS_BOUNDARY_VALUE; #if LS_WORDS_PER_SITE >= 2 _LS_COND_LOAD_SITE(window, window2, windowIndex-LS_XYZ_BLOCK_X_SIZE, ((!isBoundary)?lattice2[latticeIndex-LS_XYZ_BLOCK_X_SIZE]:LS_BOUNDARY_VALUE)); #endif #endif } #endif // Load the sites. #if defined LS_BOUNDARY_PERIODIC window[windowIndex] = lattice[latticeIndex]; #if LS_WORDS_PER_SITE >= 2 _LS_COND_LOAD_SITE(window, window2, windowIndex, lattice2[latticeIndex]); #endif #else window[windowIndex] = (!isYBoundary && !isZBoundary)?lattice[latticeIndex]:LS_BOUNDARY_VALUE; #if LS_WORDS_PER_SITE >= 2 _LS_COND_LOAD_SITE(window, window2, windowIndex, ((!isYBoundary && !isZBoundary)?lattice2[latticeIndex]:LS_BOUNDARY_VALUE)); #endif #endif // Load the trailing apron. #if LS_APRON_SIZE > 0 // If this thread is one that needs to load part of the leading apron, load it. if (windowXIndex < 2*LS_APRON_SIZE) { #if defined LS_BOUNDARY_PERIODIC window[windowIndex+LS_XYZ_BLOCK_X_SIZE] = (bx<gx-1)?lattice[latticeIndex+LS_XYZ_BLOCK_X_SIZE]:lattice[latticeIndex+LS_XYZ_BLOCK_X_SIZE-latticeXSize]; #if LS_WORDS_PER_SITE >= 2 _LS_COND_LOAD_SITE(window, window2, windowIndex+LS_XYZ_BLOCK_X_SIZE, ((bx<gx-1)?lattice2[latticeIndex+LS_XYZ_BLOCK_X_SIZE]:lattice2[latticeIndex+LS_XYZ_BLOCK_X_SIZE-latticeXSize])); #endif #else int isBoundary = (bx == gx-1 || isYBoundary || isZBoundary); window[windowIndex+LS_XYZ_BLOCK_X_SIZE] = (!isBoundary)?lattice[latticeIndex+LS_XYZ_BLOCK_X_SIZE]:LS_BOUNDARY_VALUE; #if LS_WORDS_PER_SITE >= 2 _LS_COND_LOAD_SITE(window, window2, windowIndex+LS_XYZ_BLOCK_X_SIZE, ((!isBoundary)?lattice2[latticeIndex+LS_XYZ_BLOCK_X_SIZE]:LS_BOUNDARY_VALUE)); #endif #endif } #endif } } inline __device__ void copyXYZWindowToLattice(unsigned int* lattice, const unsigned int* window, const unsigned int latticeIndex, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned int windowIndex) { //Create any needed pointer aliases. #if LS_WORDS_PER_SITE >= 2 unsigned int* lattice2 = lattice+latticeXYZSize; const unsigned int* window2 = window+LS_XYZ_WINDOW_SIZE; #endif // Load the block. for (int i=0; i<LS_XYZ_Z_LOOPS; i++) { //int loopLatticeZIndex = LS_Z_LOOP_Z_INDEX(latticeZIndex,i); int loopLatticeIndex = LS_Z_LOOP_LATTICE_INDEX(latticeIndex,i,latticeXYSize); unsigned int loopWindowIndex = LS_Z_LOOP_WINDOW_INDEX(windowIndex,i); // Copy the site for the block. lattice[loopLatticeIndex] = window[loopWindowIndex]; // Load the next part of the site, if it exists. #if LS_WORDS_PER_SITE >= 2 lattice2[loopLatticeIndex] = window2[loopWindowIndex]; #endif } }
dbab2feaa4b4ad87b1a6654b1bdb9ff49bbb3327.cu
/* * University of Illinois Open Source License * Copyright 2010-2018 Luthey-Schulten Group, * All rights reserved. * * Developed by: Luthey-Schulten Group * University of Illinois at Urbana-Champaign * http://www.scs.uiuc.edu/~schulten * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the Software), to deal with * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished to * do so, subject to the following conditions: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimers. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimers in the documentation * and/or other materials provided with the distribution. * * - Neither the names of the Luthey-Schulten Group, University of Illinois at * Urbana-Champaign, nor the names of its contributors may be used to endorse or * promote products derived from this Software without specific prior written * permission. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS WITH THE SOFTWARE. * * Author(s): Elijah Roberts */ #include <cuda.h> #include <cuda_runtime.h> #include "config.h" /* Required flags: LS_WORDS_PER_SITE LS_APRON_SIZE LS_XYZ_BLOCK_X_SIZE LS_XYZ_BLOCK_Y_SIZE LS_XYZ_BLOCK_Z_SIZE Optional flags: LS_BOUNDARY_PERIODIC - defined if the lattice should laod with periodic boundary conditions LS_BOUNDARY_VALUE - the value to laod into lattice boundary sites (default 0) LS_PACKED_SITES - defined if objects are stored in lattice sites in such a way that if one object position in a site is empty all higher words are empty LS_PACKED_LAST_OBJECT_MASK - or mask for the last object in a word LS_LATTICE_SIZE_POWER_TWO - defined if lattice x, y, and z sizes are enforced to be a power of two */ #if !defined LS_WORDS_PER_SITE #error "Must define the number of 32-bit words per lattice site." #endif #if !defined LS_APRON_SIZE #error "Must define the number of apron sites for the lattice window." #endif #if !defined LS_XYZ_BLOCK_X_SIZE || !defined LS_XYZ_BLOCK_Y_SIZE || !defined LS_XYZ_BLOCK_Z_SIZE #error "Must define the x, y, and z dimensions of a block for xyz window copying." #endif #if !defined LS_BOUNDARY_VALUE #define LS_BOUNDARY_VALUE 0 #endif #if defined LS_PACKED_SITES && !defined LS_PACKED_LAST_OBJECT_MASK #error "Must set the last object mask when using packed sites." #endif #define LS_XYZ_WINDOW_X_SIZE (LS_XYZ_BLOCK_X_SIZE+2*LS_APRON_SIZE) #define LS_XYZ_WINDOW_Y_SIZE (LS_XYZ_BLOCK_Y_SIZE+2*LS_APRON_SIZE) #define LS_XYZ_WINDOW_Z_SIZE (LS_XYZ_BLOCK_Z_SIZE+2*LS_APRON_SIZE) #define LS_XYZ_WINDOW_SIZE (LS_XYZ_WINDOW_X_SIZE*LS_XYZ_WINDOW_Y_SIZE*LS_XYZ_WINDOW_Y_SIZE) inline bool calculateLaunchParameters(unsigned int * gridXSize, dim3 * gridSize, dim3 * threadBlockSize, const unsigned int blockXSize, const unsigned int blockYSize, const unsigned int blockZSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize, const unsigned int xThreads, const unsigned int yThreads, const unsigned int zThreads) { if (latticeXSize%32 != 0 || latticeXSize%blockXSize != 0 || latticeYSize%blockYSize != 0 || latticeZSize%blockZSize != 0) return false; // Calculate the grid size. *gridXSize = latticeXSize/blockXSize; gridSize->x = (*gridXSize)*(latticeYSize/blockYSize); gridSize->y = latticeZSize/blockZSize; gridSize->z = 1; threadBlockSize->x = blockXSize; threadBlockSize->y = blockYSize+2*apronSize; threadBlockSize->z = 1; return true; } /** * Calculates the x, y, and z indices of the current thread block. When the lattice size is not * guaranteed to be a power of two, the integer divide is only performed once in the first thread * for optimal performance. */ inline __device__ void calculateBlockIndices(unsigned int * bx, unsigned int * by, unsigned int * bz, unsigned int * gx, unsigned int * gy, unsigned int gridXSize) { if (threadIdx.x == 0 && threadIdx.y == 0) { *by = blockIdx.x/gridXSize; *bx = blockIdx.x-gridXSize*(*by); *bz = blockIdx.y; *gx = gridXSize; *gy = gridDim.x/gridXSize; } __syncthreads(); } inline __device__ void calculateXYZThreadIndices(const unsigned int bx, const unsigned int by, const unsigned int bz, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYSize, int * blockXIndex, int * blockYIndex, int * blockZIndex, int * latticeXIndex, int * latticeYIndex, int * latticeZIndex, unsigned int * latticeIndex, unsigned int * windowXIndex, unsigned int * windowYIndex, unsigned int * windowZIndex, unsigned int * windowIndex) { // Figure out the index of this thread in the block. *blockXIndex = threadIdx.x; *blockYIndex = threadIdx.y-LS_APRON_SIZE; *blockZIndex = -LS_APRON_SIZE; // Figure out the index of this thread in the lattice. *latticeXIndex = (bx*LS_XYZ_BLOCK_X_SIZE) + *blockXIndex; *latticeYIndex = (by*LS_XYZ_BLOCK_Y_SIZE) + *blockYIndex; *latticeZIndex = (bz*LS_XYZ_BLOCK_Z_SIZE) + *blockZIndex; int latticeYWrap=0; if (*latticeYIndex < 0) latticeYWrap = latticeYSize; else if (*latticeYIndex >= latticeYSize) latticeYWrap = -latticeYSize; *latticeIndex = (bz*latticeXYSize) + (unsigned int)(((*latticeYIndex+latticeYWrap)*latticeXSize) + *latticeXIndex); // Figure out the index of this thread in the window. *windowXIndex = threadIdx.x+LS_APRON_SIZE; *windowYIndex = threadIdx.y; *windowZIndex = 0; *windowIndex = (*windowYIndex*LS_XYZ_WINDOW_X_SIZE) + *windowXIndex; } // On architectures that support warp voting and if sites are packed, skip loading the next word if no threads in the warp had a completely filled previous word. #if defined LS_PACKED_SITES && __CUDA_ARCH__ >= 120 #define _LS_COND_LOAD_SITE(window, window2, windowIndex, value)\ if (__any(window[windowIndex]&LS_PACKED_LAST_OBJECT_MASK)) window2[windowIndex] = value;\ else window2[windowIndex] = 0; #else #define _LS_COND_LOAD_SITE(window, window2, windowIndex, value) window2[windowIndex] = value; #endif /** * Copies a window of a lattice from device memory to shared memory. */ inline __device__ void copyXYZWindowFromLattice(const unsigned int* lattice, unsigned int* window, const unsigned int origLatticeIndex, const int latticeYIndex, const int origLatticeZIndex, const unsigned int latticeYSize, const unsigned int latticeZSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned int origWindowIndex, const unsigned int windowXIndex, const unsigned int bx, const unsigned int gx) { //Create any needed pointer aliases. #if LS_WORDS_PER_SITE >= 2 const unsigned int* lattice2 = lattice+latticeXYZSize; unsigned int* window2 = window+LS_XYZ_WINDOW_SIZE; #endif #if !defined LS_BOUNDARY_PERIODIC int isYBoundary = (latticeYIndex < 0 || latticeYIndex >= latticeYSize); #endif // Loop through each z plane in the window. unsigned int latticeIndex = origLatticeIndex; int latticeZIndex = origLatticeZIndex; unsigned int windowIndex = origWindowIndex; for (int windowZIndex=0; windowZIndex<LS_XYZ_WINDOW_Z_SIZE; latticeIndex+=latticeXYSize, latticeZIndex++, windowIndex+=LS_XYZ_WINDOW_X_SIZE*LS_XYZ_WINDOW_Y_SIZE, windowZIndex++) { #if defined LS_BOUNDARY_PERIODIC #error "Unimplemented." #else int isZBoundary = (latticeZIndex < 0 || latticeZIndex >= latticeZSize); #endif #if LS_APRON_SIZE > 0 // If this thread is one that needs to load part of the leading apron, load it. if (windowXIndex >= LS_XY_BLOCK_X_SIZE) { #if defined LS_BOUNDARY_PERIODIC window[windowIndex-LS_XYZ_BLOCK_X_SIZE] = (bx>0)?lattice[latticeIndex-LS_XYZ_BLOCK_X_SIZE]:lattice[latticeIndex-LS_XYZ_BLOCK_X_SIZE+latticeXSize]; #if LS_WORDS_PER_SITE >= 2 _LS_COND_LOAD_SITE(window, window2, windowIndex-LS_XYZ_BLOCK_X_SIZE, ((bx>0)?lattice2[latticeIndex-LS_XYZ_BLOCK_X_SIZE]:lattice2[latticeIndex-LS_XYZ_BLOCK_X_SIZE+latticeXSize])); #endif #else int isBoundary = (bx == 0 || isYBoundary || isZBoundary); window[windowIndex-LS_XYZ_BLOCK_X_SIZE] = (!isBoundary)?lattice[latticeIndex-LS_XYZ_BLOCK_X_SIZE]:LS_BOUNDARY_VALUE; #if LS_WORDS_PER_SITE >= 2 _LS_COND_LOAD_SITE(window, window2, windowIndex-LS_XYZ_BLOCK_X_SIZE, ((!isBoundary)?lattice2[latticeIndex-LS_XYZ_BLOCK_X_SIZE]:LS_BOUNDARY_VALUE)); #endif #endif } #endif // Load the sites. #if defined LS_BOUNDARY_PERIODIC window[windowIndex] = lattice[latticeIndex]; #if LS_WORDS_PER_SITE >= 2 _LS_COND_LOAD_SITE(window, window2, windowIndex, lattice2[latticeIndex]); #endif #else window[windowIndex] = (!isYBoundary && !isZBoundary)?lattice[latticeIndex]:LS_BOUNDARY_VALUE; #if LS_WORDS_PER_SITE >= 2 _LS_COND_LOAD_SITE(window, window2, windowIndex, ((!isYBoundary && !isZBoundary)?lattice2[latticeIndex]:LS_BOUNDARY_VALUE)); #endif #endif // Load the trailing apron. #if LS_APRON_SIZE > 0 // If this thread is one that needs to load part of the leading apron, load it. if (windowXIndex < 2*LS_APRON_SIZE) { #if defined LS_BOUNDARY_PERIODIC window[windowIndex+LS_XYZ_BLOCK_X_SIZE] = (bx<gx-1)?lattice[latticeIndex+LS_XYZ_BLOCK_X_SIZE]:lattice[latticeIndex+LS_XYZ_BLOCK_X_SIZE-latticeXSize]; #if LS_WORDS_PER_SITE >= 2 _LS_COND_LOAD_SITE(window, window2, windowIndex+LS_XYZ_BLOCK_X_SIZE, ((bx<gx-1)?lattice2[latticeIndex+LS_XYZ_BLOCK_X_SIZE]:lattice2[latticeIndex+LS_XYZ_BLOCK_X_SIZE-latticeXSize])); #endif #else int isBoundary = (bx == gx-1 || isYBoundary || isZBoundary); window[windowIndex+LS_XYZ_BLOCK_X_SIZE] = (!isBoundary)?lattice[latticeIndex+LS_XYZ_BLOCK_X_SIZE]:LS_BOUNDARY_VALUE; #if LS_WORDS_PER_SITE >= 2 _LS_COND_LOAD_SITE(window, window2, windowIndex+LS_XYZ_BLOCK_X_SIZE, ((!isBoundary)?lattice2[latticeIndex+LS_XYZ_BLOCK_X_SIZE]:LS_BOUNDARY_VALUE)); #endif #endif } #endif } } inline __device__ void copyXYZWindowToLattice(unsigned int* lattice, const unsigned int* window, const unsigned int latticeIndex, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned int windowIndex) { //Create any needed pointer aliases. #if LS_WORDS_PER_SITE >= 2 unsigned int* lattice2 = lattice+latticeXYZSize; const unsigned int* window2 = window+LS_XYZ_WINDOW_SIZE; #endif // Load the block. for (int i=0; i<LS_XYZ_Z_LOOPS; i++) { //int loopLatticeZIndex = LS_Z_LOOP_Z_INDEX(latticeZIndex,i); int loopLatticeIndex = LS_Z_LOOP_LATTICE_INDEX(latticeIndex,i,latticeXYSize); unsigned int loopWindowIndex = LS_Z_LOOP_WINDOW_INDEX(windowIndex,i); // Copy the site for the block. lattice[loopLatticeIndex] = window[loopWindowIndex]; // Load the next part of the site, if it exists. #if LS_WORDS_PER_SITE >= 2 lattice2[loopLatticeIndex] = window2[loopWindowIndex]; #endif } }
6888c66d7f9a36716c6bf0e2460d6b425da34fc0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <math.h> #include <hip/hip_runtime.h> #include "common.h" #define NUM_THREADS 256 #define b(x, y, p, n) (bins[(x)+(y)*(n)+(p)*(n)*(n)]) #define c(x, y, n) (counts[(x)+(y)*(n)]) // check if there are particles in a bin int bs; // bin size int np; // number of max particles per bin int* bins; int* counts; int* d_bins; int* d_counts; /* hipMalloc((void**) &d_bins, bs * bs * np * sizeof(int)); hipMalloc((void**) &d_counts, bs * bs * sizeof(int)); hipMemset(d_counts, 0, bs * bs * sizeof(int)); */ extern double size; int get_binsize() { return (int)(size / cutoff) + 1; } // // benchmarking program // __global__ void assign_bins_gpu(particle_t * particles, int * bins, int * counts, int n, int bs) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= n) return; int x = (int)(particles[tid].x/cutoff); int y = (int)(particles[tid].y/cutoff); int new_c = atomicAdd(&c(x, y, bs), 1); b(x, y, new_c, bs) = tid; } __device__ void apply_force_gpu(particle_t &particle, particle_t &neighbor) { double dx = neighbor.x - particle.x; double dy = neighbor.y - particle.y; double r2 = dx * dx + dy * dy; if( r2 > cutoff*cutoff ) return; //r2 = fmax( r2, min_r*min_r ); r2 = (r2 > min_r*min_r) ? r2 : min_r*min_r; double r = sqrt( r2 ); // // very simple short-range repulsive force // double coef = ( 1 - cutoff / r ) / r2 / mass; particle.ax += coef * dx; particle.ay += coef * dy; } __global__ void compute_forces_gpu(particle_t * particles, int n) { int id = threadIdx.x + blockIdx.x * blockDim.x; if(id >= bs*bs) return; // return if out of bounds int bx = id % bs; // get the x bin position int by = id / bs; // get the y bin position int nx, ny; // Get thread (particle) ID int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= n) return; particles[tid].ax = particles[tid].ay = 0; for(int j = 0 ; j < n ; j++) apply_force_gpu(particles[tid], particles[j]); } __global__ void compute_forces_bin_gpu(particle_t * particles, int bs, int * bins, int * counts) { int id = threadIdx.x + blockIdx.x * blockDim.x; if(id >= bs*bs) return; // return if out of bounds int bx = id % bs; // get the x bin position int by = id / bs; // get the y bin position int nx, ny; // work goes here } __global__ void move_gpu (particle_t * particles, int n, double size) { // Get thread (particle) ID int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= n) return; particle_t * p = &particles[tid]; // // slightly simplified Velocity Verlet integration // conserves energy better than explicit Euler method // p->vx += p->ax * dt; p->vy += p->ay * dt; p->x += p->vx * dt; p->y += p->vy * dt; // // bounce from walls // while( p->x < 0 || p->x > size ) { p->x = p->x < 0 ? -(p->x) : 2*size-p->x; p->vx = -(p->vx); } while( p->y < 0 || p->y > size ) { p->y = p->y < 0 ? -(p->y) : 2*size-p->y; p->vy = -(p->vy); } } int main( int argc, char **argv ) { bs = get_binsize(); np = bs / 10; // This takes a few seconds to initialize the runtime hipDeviceSynchronize(); if( find_option( argc, argv, "-h" ) >= 0 ) { printf( "Options:\n" ); printf( "-h to see this help\n" ); printf( "-n <int> to set the number of particles\n" ); printf( "-o <filename> to specify the output file name\n" ); return 0; } int n = read_int( argc, argv, "-n", 1000 ); char *savename = read_string( argc, argv, "-o", NULL ); FILE *fsave = savename ? fopen( savename, "w" ) : NULL; particle_t *particles = (particle_t*) malloc( n * sizeof(particle_t) ); // GPU particle data structure particle_t * d_particles; hipMalloc((void **) &d_particles, n * sizeof(particle_t)); set_size( n ); init_particles( n, particles ); hipDeviceSynchronize(); double copy_time = read_timer( ); // Copy the particles to the GPU hipMemcpy(d_particles, particles, n * sizeof(particle_t), hipMemcpyHostToDevice); hipDeviceSynchronize(); copy_time = read_timer( ) - copy_time; // // simulate a number of time steps // hipDeviceSynchronize(); double simulation_time = read_timer( ); // assign bins for( int step = 0; step < NSTEPS; step++ ) { // assign bins hipLaunchKernelGGL(( assign_bins_gpu) , dim3(blks), dim3(NUM_THREADS) , 0, 0, d_particles, d_bins, d_counts, n, bs); hipLaunchKernelGGL(( clear_accel_gpu) , dim3(blks), dim3(NUM_THREADS) , 0, 0, d_particles, n); // // compute forces // int bin_blks = (bs * bs + NUM_THREADS - 1) / NUM_THREADS; int blks = (n + NUM_THREADS - 1) / NUM_THREADS; hipLaunchKernelGGL(( compute_forces_gpu) , dim3(blks), dim3(NUM_THREADS) , 0, 0, d_particles, n); // // move particles // hipLaunchKernelGGL(( move_gpu) , dim3(blks), dim3(NUM_THREADS) , 0, 0, d_particles, n, size); int bin_blks = (bs * bs + NUM_THREADS - 1) / NUM_THREADS; hipLaunchKernelGGL(( compute_forces_bin_gpu) , dim3(bin_blks), dim3(NUM_THREADS) , 0, 0, d_particles, bs, d_bins, d_counts); hipLaunchKernelGGL(( move_gpu) , dim3(blks), dim3(NUM_THREADS) , 0, 0, d_particles, n, size); // // save if necessary // if( fsave && (step%SAVEFREQ) == 0 ) { // Copy the particles back to the CPU hipMemcpy(particles, d_particles, n * sizeof(particle_t), hipMemcpyDeviceToHost); save( fsave, n, particles); } } hipDeviceSynchronize(); simulation_time = read_timer( ) - simulation_time; printf( "CPU-GPU copy time = %g seconds\n", copy_time); printf( "n = %d, simulation time = %g seconds\n", n, simulation_time ); free( particles ); hipFree(d_particles); if( fsave ) fclose( fsave ); return 0; }
6888c66d7f9a36716c6bf0e2460d6b425da34fc0.cu
#include <stdlib.h> #include <stdio.h> #include <assert.h> #include <math.h> #include <cuda.h> #include "common.h" #define NUM_THREADS 256 #define b(x, y, p, n) (bins[(x)+(y)*(n)+(p)*(n)*(n)]) #define c(x, y, n) (counts[(x)+(y)*(n)]) // check if there are particles in a bin int bs; // bin size int np; // number of max particles per bin int* bins; int* counts; int* d_bins; int* d_counts; /* cudaMalloc((void**) &d_bins, bs * bs * np * sizeof(int)); cudaMalloc((void**) &d_counts, bs * bs * sizeof(int)); cudaMemset(d_counts, 0, bs * bs * sizeof(int)); */ extern double size; int get_binsize() { return (int)(size / cutoff) + 1; } // // benchmarking program // __global__ void assign_bins_gpu(particle_t * particles, int * bins, int * counts, int n, int bs) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= n) return; int x = (int)(particles[tid].x/cutoff); int y = (int)(particles[tid].y/cutoff); int new_c = atomicAdd(&c(x, y, bs), 1); b(x, y, new_c, bs) = tid; } __device__ void apply_force_gpu(particle_t &particle, particle_t &neighbor) { double dx = neighbor.x - particle.x; double dy = neighbor.y - particle.y; double r2 = dx * dx + dy * dy; if( r2 > cutoff*cutoff ) return; //r2 = fmax( r2, min_r*min_r ); r2 = (r2 > min_r*min_r) ? r2 : min_r*min_r; double r = sqrt( r2 ); // // very simple short-range repulsive force // double coef = ( 1 - cutoff / r ) / r2 / mass; particle.ax += coef * dx; particle.ay += coef * dy; } __global__ void compute_forces_gpu(particle_t * particles, int n) { int id = threadIdx.x + blockIdx.x * blockDim.x; if(id >= bs*bs) return; // return if out of bounds int bx = id % bs; // get the x bin position int by = id / bs; // get the y bin position int nx, ny; // Get thread (particle) ID int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= n) return; particles[tid].ax = particles[tid].ay = 0; for(int j = 0 ; j < n ; j++) apply_force_gpu(particles[tid], particles[j]); } __global__ void compute_forces_bin_gpu(particle_t * particles, int bs, int * bins, int * counts) { int id = threadIdx.x + blockIdx.x * blockDim.x; if(id >= bs*bs) return; // return if out of bounds int bx = id % bs; // get the x bin position int by = id / bs; // get the y bin position int nx, ny; // … work goes here } __global__ void move_gpu (particle_t * particles, int n, double size) { // Get thread (particle) ID int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= n) return; particle_t * p = &particles[tid]; // // slightly simplified Velocity Verlet integration // conserves energy better than explicit Euler method // p->vx += p->ax * dt; p->vy += p->ay * dt; p->x += p->vx * dt; p->y += p->vy * dt; // // bounce from walls // while( p->x < 0 || p->x > size ) { p->x = p->x < 0 ? -(p->x) : 2*size-p->x; p->vx = -(p->vx); } while( p->y < 0 || p->y > size ) { p->y = p->y < 0 ? -(p->y) : 2*size-p->y; p->vy = -(p->vy); } } int main( int argc, char **argv ) { bs = get_binsize(); np = bs / 10; // This takes a few seconds to initialize the runtime cudaThreadSynchronize(); if( find_option( argc, argv, "-h" ) >= 0 ) { printf( "Options:\n" ); printf( "-h to see this help\n" ); printf( "-n <int> to set the number of particles\n" ); printf( "-o <filename> to specify the output file name\n" ); return 0; } int n = read_int( argc, argv, "-n", 1000 ); char *savename = read_string( argc, argv, "-o", NULL ); FILE *fsave = savename ? fopen( savename, "w" ) : NULL; particle_t *particles = (particle_t*) malloc( n * sizeof(particle_t) ); // GPU particle data structure particle_t * d_particles; cudaMalloc((void **) &d_particles, n * sizeof(particle_t)); set_size( n ); init_particles( n, particles ); cudaThreadSynchronize(); double copy_time = read_timer( ); // Copy the particles to the GPU cudaMemcpy(d_particles, particles, n * sizeof(particle_t), cudaMemcpyHostToDevice); cudaThreadSynchronize(); copy_time = read_timer( ) - copy_time; // // simulate a number of time steps // cudaThreadSynchronize(); double simulation_time = read_timer( ); // assign bins for( int step = 0; step < NSTEPS; step++ ) { // assign bins assign_bins_gpu <<< blks, NUM_THREADS >>> (d_particles, d_bins, d_counts, n, bs); clear_accel_gpu <<< blks, NUM_THREADS >>> (d_particles, n); // // compute forces // int bin_blks = (bs * bs + NUM_THREADS - 1) / NUM_THREADS; int blks = (n + NUM_THREADS - 1) / NUM_THREADS; compute_forces_gpu <<< blks, NUM_THREADS >>> (d_particles, n); // // move particles // move_gpu <<< blks, NUM_THREADS >>> (d_particles, n, size); int bin_blks = (bs * bs + NUM_THREADS - 1) / NUM_THREADS; compute_forces_bin_gpu <<< bin_blks, NUM_THREADS >>> (d_particles, bs, d_bins, d_counts); move_gpu <<< blks, NUM_THREADS >>> (d_particles, n, size); // // save if necessary // if( fsave && (step%SAVEFREQ) == 0 ) { // Copy the particles back to the CPU cudaMemcpy(particles, d_particles, n * sizeof(particle_t), cudaMemcpyDeviceToHost); save( fsave, n, particles); } } cudaThreadSynchronize(); simulation_time = read_timer( ) - simulation_time; printf( "CPU-GPU copy time = %g seconds\n", copy_time); printf( "n = %d, simulation time = %g seconds\n", n, simulation_time ); free( particles ); cudaFree(d_particles); if( fsave ) fclose( fsave ); return 0; }
f7a5f6ac7481f6ccb42fc865e6818551bd3f349c.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> #include <iostream> namespace { template <typename scalar_t> __global__ void binactive_cuda_forward_kernel( const scalar_t* __restrict__ input, scalar_t* __restrict__ output, const int32_t size) { const int32_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { // output[idx] = fminf(fmaxf(delta*floor((input[idx] / delta) + 0.5), minv), maxv); output[idx] = (input[idx]>0)?1:(input[idx]<0?-1:0); } } template <typename scalar_t> __global__ void binactive_cuda_backward_kernel( scalar_t* __restrict__ grad_h, const scalar_t* __restrict__ input, const int32_t size) { const int32_t idx = blockIdx.x * blockDim.x + threadIdx.x; const float inp = input[idx]; if (idx < size) { grad_h[idx] *= (inp > -1)*(inp < 1); } } } // namespace at::Tensor binactive_cuda_forward( const at::Tensor input) { const auto size = input.size(0)*input.size(1)*input.size(2)*input.size(3); auto output = at::zeros_like(input); const int32_t threads = 1024; const int32_t blocks = (size + threads - 1) / threads; AT_DISPATCH_FLOATING_TYPES(input.type(), "binactive_forward_cuda", ([&] { hipLaunchKernelGGL(( binactive_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, input.data<scalar_t>(), output.data<scalar_t>(), size); })); return output; } at::Tensor binactive_cuda_backward( at::Tensor grad_h, const at::Tensor input) { const auto size = input.size(0)*input.size(1)*input.size(2)*input.size(3); const int threads = 1024; const int blocks = (size + threads - 1) / threads; AT_DISPATCH_FLOATING_TYPES(input.type(), "binactive_backward_cuda", ([&] { hipLaunchKernelGGL(( binactive_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, grad_h.data<scalar_t>(), input.data<scalar_t>(), size); })); return grad_h; }
f7a5f6ac7481f6ccb42fc865e6818551bd3f349c.cu
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> #include <iostream> namespace { template <typename scalar_t> __global__ void binactive_cuda_forward_kernel( const scalar_t* __restrict__ input, scalar_t* __restrict__ output, const int32_t size) { const int32_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { // output[idx] = fminf(fmaxf(delta*floor((input[idx] / delta) + 0.5), minv), maxv); output[idx] = (input[idx]>0)?1:(input[idx]<0?-1:0); } } template <typename scalar_t> __global__ void binactive_cuda_backward_kernel( scalar_t* __restrict__ grad_h, const scalar_t* __restrict__ input, const int32_t size) { const int32_t idx = blockIdx.x * blockDim.x + threadIdx.x; const float inp = input[idx]; if (idx < size) { grad_h[idx] *= (inp > -1)*(inp < 1); } } } // namespace at::Tensor binactive_cuda_forward( const at::Tensor input) { const auto size = input.size(0)*input.size(1)*input.size(2)*input.size(3); auto output = at::zeros_like(input); const int32_t threads = 1024; const int32_t blocks = (size + threads - 1) / threads; AT_DISPATCH_FLOATING_TYPES(input.type(), "binactive_forward_cuda", ([&] { binactive_cuda_forward_kernel<scalar_t><<<blocks, threads>>>( input.data<scalar_t>(), output.data<scalar_t>(), size); })); return output; } at::Tensor binactive_cuda_backward( at::Tensor grad_h, const at::Tensor input) { const auto size = input.size(0)*input.size(1)*input.size(2)*input.size(3); const int threads = 1024; const int blocks = (size + threads - 1) / threads; AT_DISPATCH_FLOATING_TYPES(input.type(), "binactive_backward_cuda", ([&] { binactive_cuda_backward_kernel<scalar_t><<<blocks, threads>>>( grad_h.data<scalar_t>(), input.data<scalar_t>(), size); })); return grad_h; }
a0dbe465dcd356b08d564a919d9d69d88fcd8f27.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/null_mask.hpp> #include <cudf/detail/null_mask.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/error.hpp> #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/extrema.h> #include <thrust/binary_search.h> #include <hipcub/hipcub.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_scalar.hpp> #include <rmm/mr/device_memory_resource.hpp> #include <rmm/thrust_rmm_allocator.h> #include <algorithm> #include <numeric> #include <type_traits> namespace cudf { size_type state_null_count(mask_state state, size_type size) { switch (state) { case UNALLOCATED: return 0; case UNINITIALIZED: return UNKNOWN_NULL_COUNT; case ALL_NULL: return size; case ALL_VALID: return 0; default: CUDF_FAIL("Invalid null mask state."); } } // Computes required allocation size of a bitmask std::size_t bitmask_allocation_size_bytes(size_type number_of_bits, std::size_t padding_boundary) { CUDF_EXPECTS(padding_boundary > 0, "Invalid padding boundary"); auto necessary_bytes = cudf::util::div_rounding_up_safe<size_type>(number_of_bits, CHAR_BIT); auto padded_bytes = padding_boundary * cudf::util::div_rounding_up_safe<size_type>( necessary_bytes, padding_boundary); return padded_bytes; } // Computes number of *actual* bitmask_type elements needed size_type num_bitmask_words(size_type number_of_bits) { return cudf::util::div_rounding_up_safe<size_type>( number_of_bits, detail::size_in_bits<bitmask_type>()); } // Create a device_buffer for a null mask rmm::device_buffer create_null_mask(size_type size, mask_state state, hipStream_t stream, rmm::mr::device_memory_resource *mr) { size_type mask_size{0}; if (state != UNALLOCATED) { mask_size = bitmask_allocation_size_bytes(size); } rmm::device_buffer mask(mask_size, stream, mr); if (state != UNINITIALIZED) { uint8_t fill_value = (state == ALL_VALID) ? 0xff : 0x00; CUDA_TRY(hipMemsetAsync(static_cast<bitmask_type *>(mask.data()), fill_value, mask_size, stream)); } return mask; } namespace { /**---------------------------------------------------------------------------* * @brief Counts the number of non-zero bits in a bitmask in the range * `[first_bit_index, last_bit_index]`. * * Expects `0 <= first_bit_index <= last_bit_index`. * * @param[in] bitmask The bitmask whose non-zero bits will be counted. * @param[in] first_bit_index The index (inclusive) of the first bit to count * @param[in] last_bit_index The index (inclusive) of the last bit to count * @param[out] global_count The number of non-zero bits in the specified range *---------------------------------------------------------------------------**/ template <size_type block_size> __global__ void count_set_bits_kernel(bitmask_type const *bitmask, size_type first_bit_index, size_type last_bit_index, size_type *global_count) { constexpr auto const word_size{detail::size_in_bits<bitmask_type>()}; auto const first_word_index{word_index(first_bit_index)}; auto const last_word_index{word_index(last_bit_index)}; auto const tid = threadIdx.x + blockIdx.x * blockDim.x; auto thread_word_index = tid + first_word_index; size_type thread_count{0}; // First, just count the bits in all words while (thread_word_index <= last_word_index) { thread_count += __popc(bitmask[thread_word_index]); thread_word_index += blockDim.x * gridDim.x; } // Subtract any slack bits counted from the first and last word // Two threads handle this -- one for first word, one for last if (tid < 2) { bool const first{tid == 0}; bool const last{not first}; size_type bit_index = (first) ? first_bit_index : last_bit_index; size_type word_index = (first) ? first_word_index : last_word_index; size_type num_slack_bits = bit_index % word_size; if (last) { num_slack_bits = word_size - num_slack_bits - 1; } if (num_slack_bits > 0) { bitmask_type word = bitmask[word_index]; auto slack_mask = (first) ? set_least_significant_bits(num_slack_bits) : set_most_significant_bits(num_slack_bits); thread_count -= __popc(word & slack_mask); } } using BlockReduce = hipcub::BlockReduce<size_type, block_size>; __shared__ typename BlockReduce::TempStorage temp_storage; size_type block_count{BlockReduce(temp_storage).Sum(thread_count)}; if (threadIdx.x == 0) { atomicAdd(global_count, block_count); } } /**---------------------------------------------------------------------------* * @brief Copies the bits starting at the specified offset from a source * bitmask into the destination bitmask. * * Bit `i` in `destination` will be equal to bit `i + offset` from `source`. * * @param destination The mask to copy into * @param source The mask to copy from * @param source_begin_bit The offset into `source` from which to begin the copy * @param source_end_bit The offset into `source` till which copying is done *---------------------------------------------------------------------------**/ __global__ void copy_offset_bitmask(bitmask_type *__restrict__ destination, bitmask_type const *__restrict__ source, size_type source_begin_bit, size_type source_end_bit, size_type number_of_mask_words) { for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x; destination_word_index < number_of_mask_words; destination_word_index += blockDim.x * gridDim.x) { size_type source_word_index = destination_word_index + word_index(source_begin_bit); bitmask_type curr_word = source[source_word_index]; bitmask_type next_word = 0; if ((word_index(source_begin_bit) != 0) && (word_index(source_end_bit) > word_index(source_begin_bit + destination_word_index * detail::size_in_bits<bitmask_type>()))) { next_word = source[source_word_index + 1]; } bitmask_type write_word = __funnelshift_r(curr_word, next_word, source_begin_bit); destination[destination_word_index] = write_word; } } /**---------------------------------------------------------------------------* * @brief Concatenates the null mask bits of all the column device views in the * `views` array to the destination bitmask. * * @param views Array of column_device_view * @param output_offsets Prefix sum of sizes of elements of `views` * @param number_of_views Size of `views` array * @param dest_mask The output buffer to copy null masks into * @param number_of_mask_bits The total number of null masks bits that are being * copied *---------------------------------------------------------------------------**/ __global__ void concatenate_masks_kernel( column_device_view* views, size_type* output_offsets, size_type number_of_views, bitmask_type* dest_mask, size_type number_of_mask_bits) { size_type mask_index = threadIdx.x + blockIdx.x * blockDim.x; auto active_mask = __ballot_sync(0xFFFF'FFFF, mask_index < number_of_mask_bits); while (mask_index < number_of_mask_bits) { size_type source_view_index = thrust::upper_bound(thrust::seq, output_offsets, output_offsets + number_of_views, mask_index) - output_offsets - 1; bool bit_is_set = 1; if (source_view_index < number_of_views) { size_type column_element_index = mask_index - output_offsets[source_view_index]; bit_is_set = views[source_view_index].is_valid(column_element_index); } bitmask_type const new_word = __ballot_sync(active_mask, bit_is_set); if (threadIdx.x % experimental::detail::warp_size == 0) { dest_mask[word_index(mask_index)] = new_word; } mask_index += blockDim.x * gridDim.x; active_mask = __ballot_sync(active_mask, mask_index < number_of_mask_bits); } } } // namespace namespace detail { cudf::size_type count_set_bits(bitmask_type const *bitmask, size_type start, size_type stop, hipStream_t stream = 0) { if (nullptr == bitmask) { return 0; } CUDF_EXPECTS(start >= 0, "Invalid range."); CUDF_EXPECTS(start <= stop, "Invalid bit range."); std::size_t num_bits_to_count = stop - start; if (num_bits_to_count == 0) { return 0; } auto num_words = cudf::util::div_rounding_up_safe( num_bits_to_count, detail::size_in_bits<bitmask_type>()); constexpr size_type block_size{256}; cudf::experimental::detail::grid_1d grid(num_words, block_size); rmm::device_scalar<size_type> non_zero_count(0, stream); hipLaunchKernelGGL(( count_set_bits_kernel<block_size>) , dim3(grid.num_blocks), dim3(grid.num_threads_per_block), 0, stream, bitmask, start, stop - 1, non_zero_count.data()); return non_zero_count.value(); } cudf::size_type count_unset_bits(bitmask_type const *bitmask, size_type start, size_type stop, hipStream_t stream = 0) { if (nullptr == bitmask) { return 0; } auto num_bits = (stop - start); return (num_bits - detail::count_set_bits(bitmask, start, stop, stream)); } // Create a bitmask from a vector of column views void concatenate_masks(std::vector<column_view> const &views, bitmask_type * dest_mask, hipStream_t stream) { using CDViewPtr = decltype(column_device_view::create(std::declval<column_view>(), std::declval<hipStream_t>())); std::vector<CDViewPtr> cols; thrust::host_vector<column_device_view> device_views; thrust::host_vector<size_type> view_offsets(1, 0); for (auto &v : views) { cols.emplace_back(column_device_view::create(v, stream)); device_views.push_back(*(cols.back())); view_offsets.push_back(v.size()); } thrust::inclusive_scan(thrust::host, view_offsets.begin(), view_offsets.end(), view_offsets.begin()); rmm::device_vector<column_device_view> d_views{device_views}; rmm::device_vector<size_type> d_offsets{view_offsets}; auto number_of_mask_bits = view_offsets.back(); constexpr size_type block_size{256}; cudf::experimental::detail::grid_1d config(number_of_mask_bits, block_size); hipLaunchKernelGGL(( concatenate_masks_kernel), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream, d_views.data().get(), d_offsets.data().get(), static_cast<size_type>(d_views.size()), dest_mask, number_of_mask_bits); } } // namespace detail // Count non-zero bits in the specified range cudf::size_type count_set_bits(bitmask_type const *bitmask, size_type start, size_type stop) { return detail::count_set_bits(bitmask, start, stop); } // Count zero bits in the specified range cudf::size_type count_unset_bits(bitmask_type const *bitmask, size_type start, size_type stop) { return detail::count_unset_bits(bitmask, start, stop); } // Create a bitmask from a specific range rmm::device_buffer copy_bitmask(bitmask_type const *mask, size_type begin_bit, size_type end_bit, hipStream_t stream, rmm::mr::device_memory_resource *mr) { CUDF_EXPECTS(begin_bit >= 0, "Invalid range."); CUDF_EXPECTS(begin_bit <= end_bit, "Invalid bit range."); rmm::device_buffer dest_mask{}; auto num_bytes = bitmask_allocation_size_bytes(end_bit - begin_bit); if ((mask == nullptr) || (num_bytes == 0)) { return dest_mask; } if (begin_bit == 0) { dest_mask = rmm::device_buffer{static_cast<void const *>(mask), num_bytes, stream, mr}; } else { auto number_of_mask_words = cudf::util::div_rounding_up_safe( static_cast<size_t>(end_bit - begin_bit), detail::size_in_bits<bitmask_type>()); dest_mask = rmm::device_buffer{num_bytes, stream, mr}; cudf::experimental::detail::grid_1d config(number_of_mask_words, 256); hipLaunchKernelGGL(( copy_offset_bitmask), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream, static_cast<bitmask_type *>(dest_mask.data()), mask, begin_bit, end_bit, number_of_mask_words); CUDA_CHECK_LAST(); } return dest_mask; } // Create a bitmask from a column view rmm::device_buffer copy_bitmask(column_view const &view, hipStream_t stream, rmm::mr::device_memory_resource *mr) { rmm::device_buffer null_mask{}; if (view.nullable()) { null_mask = copy_bitmask(view.null_mask(), view.offset(), view.offset() + view.size(), stream, mr); } return null_mask; } // Create a bitmask from a vector of column views rmm::device_buffer concatenate_masks(std::vector<column_view> const &views, rmm::mr::device_memory_resource *mr, hipStream_t stream) { rmm::device_buffer null_mask{}; bool has_nulls = std::any_of(views.begin(), views.end(), [](const column_view col) { return col.has_nulls(); }); if (has_nulls) { size_type total_element_count = std::accumulate(views.begin(), views.end(), 0, [](auto accumulator, auto const& v) { return accumulator + v.size(); }); null_mask = create_null_mask(total_element_count, UNINITIALIZED, stream, mr); detail::concatenate_masks( views, static_cast<bitmask_type *>(null_mask.data()), stream); } return null_mask; } } // namespace cudf
a0dbe465dcd356b08d564a919d9d69d88fcd8f27.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/null_mask.hpp> #include <cudf/detail/null_mask.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/error.hpp> #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/extrema.h> #include <thrust/binary_search.h> #include <cub/cub.cuh> #include <rmm/device_buffer.hpp> #include <rmm/device_scalar.hpp> #include <rmm/mr/device_memory_resource.hpp> #include <rmm/thrust_rmm_allocator.h> #include <algorithm> #include <numeric> #include <type_traits> namespace cudf { size_type state_null_count(mask_state state, size_type size) { switch (state) { case UNALLOCATED: return 0; case UNINITIALIZED: return UNKNOWN_NULL_COUNT; case ALL_NULL: return size; case ALL_VALID: return 0; default: CUDF_FAIL("Invalid null mask state."); } } // Computes required allocation size of a bitmask std::size_t bitmask_allocation_size_bytes(size_type number_of_bits, std::size_t padding_boundary) { CUDF_EXPECTS(padding_boundary > 0, "Invalid padding boundary"); auto necessary_bytes = cudf::util::div_rounding_up_safe<size_type>(number_of_bits, CHAR_BIT); auto padded_bytes = padding_boundary * cudf::util::div_rounding_up_safe<size_type>( necessary_bytes, padding_boundary); return padded_bytes; } // Computes number of *actual* bitmask_type elements needed size_type num_bitmask_words(size_type number_of_bits) { return cudf::util::div_rounding_up_safe<size_type>( number_of_bits, detail::size_in_bits<bitmask_type>()); } // Create a device_buffer for a null mask rmm::device_buffer create_null_mask(size_type size, mask_state state, cudaStream_t stream, rmm::mr::device_memory_resource *mr) { size_type mask_size{0}; if (state != UNALLOCATED) { mask_size = bitmask_allocation_size_bytes(size); } rmm::device_buffer mask(mask_size, stream, mr); if (state != UNINITIALIZED) { uint8_t fill_value = (state == ALL_VALID) ? 0xff : 0x00; CUDA_TRY(cudaMemsetAsync(static_cast<bitmask_type *>(mask.data()), fill_value, mask_size, stream)); } return mask; } namespace { /**---------------------------------------------------------------------------* * @brief Counts the number of non-zero bits in a bitmask in the range * `[first_bit_index, last_bit_index]`. * * Expects `0 <= first_bit_index <= last_bit_index`. * * @param[in] bitmask The bitmask whose non-zero bits will be counted. * @param[in] first_bit_index The index (inclusive) of the first bit to count * @param[in] last_bit_index The index (inclusive) of the last bit to count * @param[out] global_count The number of non-zero bits in the specified range *---------------------------------------------------------------------------**/ template <size_type block_size> __global__ void count_set_bits_kernel(bitmask_type const *bitmask, size_type first_bit_index, size_type last_bit_index, size_type *global_count) { constexpr auto const word_size{detail::size_in_bits<bitmask_type>()}; auto const first_word_index{word_index(first_bit_index)}; auto const last_word_index{word_index(last_bit_index)}; auto const tid = threadIdx.x + blockIdx.x * blockDim.x; auto thread_word_index = tid + first_word_index; size_type thread_count{0}; // First, just count the bits in all words while (thread_word_index <= last_word_index) { thread_count += __popc(bitmask[thread_word_index]); thread_word_index += blockDim.x * gridDim.x; } // Subtract any slack bits counted from the first and last word // Two threads handle this -- one for first word, one for last if (tid < 2) { bool const first{tid == 0}; bool const last{not first}; size_type bit_index = (first) ? first_bit_index : last_bit_index; size_type word_index = (first) ? first_word_index : last_word_index; size_type num_slack_bits = bit_index % word_size; if (last) { num_slack_bits = word_size - num_slack_bits - 1; } if (num_slack_bits > 0) { bitmask_type word = bitmask[word_index]; auto slack_mask = (first) ? set_least_significant_bits(num_slack_bits) : set_most_significant_bits(num_slack_bits); thread_count -= __popc(word & slack_mask); } } using BlockReduce = cub::BlockReduce<size_type, block_size>; __shared__ typename BlockReduce::TempStorage temp_storage; size_type block_count{BlockReduce(temp_storage).Sum(thread_count)}; if (threadIdx.x == 0) { atomicAdd(global_count, block_count); } } /**---------------------------------------------------------------------------* * @brief Copies the bits starting at the specified offset from a source * bitmask into the destination bitmask. * * Bit `i` in `destination` will be equal to bit `i + offset` from `source`. * * @param destination The mask to copy into * @param source The mask to copy from * @param source_begin_bit The offset into `source` from which to begin the copy * @param source_end_bit The offset into `source` till which copying is done *---------------------------------------------------------------------------**/ __global__ void copy_offset_bitmask(bitmask_type *__restrict__ destination, bitmask_type const *__restrict__ source, size_type source_begin_bit, size_type source_end_bit, size_type number_of_mask_words) { for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x; destination_word_index < number_of_mask_words; destination_word_index += blockDim.x * gridDim.x) { size_type source_word_index = destination_word_index + word_index(source_begin_bit); bitmask_type curr_word = source[source_word_index]; bitmask_type next_word = 0; if ((word_index(source_begin_bit) != 0) && (word_index(source_end_bit) > word_index(source_begin_bit + destination_word_index * detail::size_in_bits<bitmask_type>()))) { next_word = source[source_word_index + 1]; } bitmask_type write_word = __funnelshift_r(curr_word, next_word, source_begin_bit); destination[destination_word_index] = write_word; } } /**---------------------------------------------------------------------------* * @brief Concatenates the null mask bits of all the column device views in the * `views` array to the destination bitmask. * * @param views Array of column_device_view * @param output_offsets Prefix sum of sizes of elements of `views` * @param number_of_views Size of `views` array * @param dest_mask The output buffer to copy null masks into * @param number_of_mask_bits The total number of null masks bits that are being * copied *---------------------------------------------------------------------------**/ __global__ void concatenate_masks_kernel( column_device_view* views, size_type* output_offsets, size_type number_of_views, bitmask_type* dest_mask, size_type number_of_mask_bits) { size_type mask_index = threadIdx.x + blockIdx.x * blockDim.x; auto active_mask = __ballot_sync(0xFFFF'FFFF, mask_index < number_of_mask_bits); while (mask_index < number_of_mask_bits) { size_type source_view_index = thrust::upper_bound(thrust::seq, output_offsets, output_offsets + number_of_views, mask_index) - output_offsets - 1; bool bit_is_set = 1; if (source_view_index < number_of_views) { size_type column_element_index = mask_index - output_offsets[source_view_index]; bit_is_set = views[source_view_index].is_valid(column_element_index); } bitmask_type const new_word = __ballot_sync(active_mask, bit_is_set); if (threadIdx.x % experimental::detail::warp_size == 0) { dest_mask[word_index(mask_index)] = new_word; } mask_index += blockDim.x * gridDim.x; active_mask = __ballot_sync(active_mask, mask_index < number_of_mask_bits); } } } // namespace namespace detail { cudf::size_type count_set_bits(bitmask_type const *bitmask, size_type start, size_type stop, cudaStream_t stream = 0) { if (nullptr == bitmask) { return 0; } CUDF_EXPECTS(start >= 0, "Invalid range."); CUDF_EXPECTS(start <= stop, "Invalid bit range."); std::size_t num_bits_to_count = stop - start; if (num_bits_to_count == 0) { return 0; } auto num_words = cudf::util::div_rounding_up_safe( num_bits_to_count, detail::size_in_bits<bitmask_type>()); constexpr size_type block_size{256}; cudf::experimental::detail::grid_1d grid(num_words, block_size); rmm::device_scalar<size_type> non_zero_count(0, stream); count_set_bits_kernel<block_size> <<<grid.num_blocks, grid.num_threads_per_block, 0, stream>>>( bitmask, start, stop - 1, non_zero_count.data()); return non_zero_count.value(); } cudf::size_type count_unset_bits(bitmask_type const *bitmask, size_type start, size_type stop, cudaStream_t stream = 0) { if (nullptr == bitmask) { return 0; } auto num_bits = (stop - start); return (num_bits - detail::count_set_bits(bitmask, start, stop, stream)); } // Create a bitmask from a vector of column views void concatenate_masks(std::vector<column_view> const &views, bitmask_type * dest_mask, cudaStream_t stream) { using CDViewPtr = decltype(column_device_view::create(std::declval<column_view>(), std::declval<cudaStream_t>())); std::vector<CDViewPtr> cols; thrust::host_vector<column_device_view> device_views; thrust::host_vector<size_type> view_offsets(1, 0); for (auto &v : views) { cols.emplace_back(column_device_view::create(v, stream)); device_views.push_back(*(cols.back())); view_offsets.push_back(v.size()); } thrust::inclusive_scan(thrust::host, view_offsets.begin(), view_offsets.end(), view_offsets.begin()); rmm::device_vector<column_device_view> d_views{device_views}; rmm::device_vector<size_type> d_offsets{view_offsets}; auto number_of_mask_bits = view_offsets.back(); constexpr size_type block_size{256}; cudf::experimental::detail::grid_1d config(number_of_mask_bits, block_size); concatenate_masks_kernel<<<config.num_blocks, config.num_threads_per_block, 0, stream>>>( d_views.data().get(), d_offsets.data().get(), static_cast<size_type>(d_views.size()), dest_mask, number_of_mask_bits); } } // namespace detail // Count non-zero bits in the specified range cudf::size_type count_set_bits(bitmask_type const *bitmask, size_type start, size_type stop) { return detail::count_set_bits(bitmask, start, stop); } // Count zero bits in the specified range cudf::size_type count_unset_bits(bitmask_type const *bitmask, size_type start, size_type stop) { return detail::count_unset_bits(bitmask, start, stop); } // Create a bitmask from a specific range rmm::device_buffer copy_bitmask(bitmask_type const *mask, size_type begin_bit, size_type end_bit, cudaStream_t stream, rmm::mr::device_memory_resource *mr) { CUDF_EXPECTS(begin_bit >= 0, "Invalid range."); CUDF_EXPECTS(begin_bit <= end_bit, "Invalid bit range."); rmm::device_buffer dest_mask{}; auto num_bytes = bitmask_allocation_size_bytes(end_bit - begin_bit); if ((mask == nullptr) || (num_bytes == 0)) { return dest_mask; } if (begin_bit == 0) { dest_mask = rmm::device_buffer{static_cast<void const *>(mask), num_bytes, stream, mr}; } else { auto number_of_mask_words = cudf::util::div_rounding_up_safe( static_cast<size_t>(end_bit - begin_bit), detail::size_in_bits<bitmask_type>()); dest_mask = rmm::device_buffer{num_bytes, stream, mr}; cudf::experimental::detail::grid_1d config(number_of_mask_words, 256); copy_offset_bitmask<<<config.num_blocks, config.num_threads_per_block, 0, stream>>>( static_cast<bitmask_type *>(dest_mask.data()), mask, begin_bit, end_bit, number_of_mask_words); CUDA_CHECK_LAST(); } return dest_mask; } // Create a bitmask from a column view rmm::device_buffer copy_bitmask(column_view const &view, cudaStream_t stream, rmm::mr::device_memory_resource *mr) { rmm::device_buffer null_mask{}; if (view.nullable()) { null_mask = copy_bitmask(view.null_mask(), view.offset(), view.offset() + view.size(), stream, mr); } return null_mask; } // Create a bitmask from a vector of column views rmm::device_buffer concatenate_masks(std::vector<column_view> const &views, rmm::mr::device_memory_resource *mr, cudaStream_t stream) { rmm::device_buffer null_mask{}; bool has_nulls = std::any_of(views.begin(), views.end(), [](const column_view col) { return col.has_nulls(); }); if (has_nulls) { size_type total_element_count = std::accumulate(views.begin(), views.end(), 0, [](auto accumulator, auto const& v) { return accumulator + v.size(); }); null_mask = create_null_mask(total_element_count, UNINITIALIZED, stream, mr); detail::concatenate_masks( views, static_cast<bitmask_type *>(null_mask.data()), stream); } return null_mask; } } // namespace cudf
2bb53c6b12cd7db9641dfee5d87449e003c9f7e2.hip
// !!! This is a file automatically generated by hipify!!! #include "Matrix_hip.cuh" #include <random> Matrix::Matrix() { } Matrix::Matrix(int _numRows, int _numCols, bool isRandom) { numRows = _numRows; numCols = _numCols; double r = 0.00; vector<double> colValues; for (int i = 0; i < numRows; i++) { for (int j = 0; j < numCols; j++) { if (isRandom) { r = generateRandomNumber(); } colValues.push_back(r); } values.push_back(colValues); colValues.clear(); } } double Matrix::generateRandomNumber() { std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<> dis(0, 1); return dis(gen); } void Matrix::printToConsole() { for (int i = 0; i < numRows; i++) { for (int j = 0; j < numCols; j++) { cout << this->values.at(i).at(j) << "\t\t"; } cout << endl; } } Matrix Matrix::transpose() { Matrix m = Matrix(numCols, numRows, false); for (int i = 0; i < numRows; ++i) { for (int j = 0; j < numCols; ++j) { m.setValue(j, i, getValue(i, j)); } } return m; }
2bb53c6b12cd7db9641dfee5d87449e003c9f7e2.cu
#include "Matrix.cuh" #include <random> Matrix::Matrix() { } Matrix::Matrix(int _numRows, int _numCols, bool isRandom) { numRows = _numRows; numCols = _numCols; double r = 0.00; vector<double> colValues; for (int i = 0; i < numRows; i++) { for (int j = 0; j < numCols; j++) { if (isRandom) { r = generateRandomNumber(); } colValues.push_back(r); } values.push_back(colValues); colValues.clear(); } } double Matrix::generateRandomNumber() { std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<> dis(0, 1); return dis(gen); } void Matrix::printToConsole() { for (int i = 0; i < numRows; i++) { for (int j = 0; j < numCols; j++) { cout << this->values.at(i).at(j) << "\t\t"; } cout << endl; } } Matrix Matrix::transpose() { Matrix m = Matrix(numCols, numRows, false); for (int i = 0; i < numRows; ++i) { for (int j = 0; j < numCols; ++j) { m.setValue(j, i, getValue(i, j)); } } return m; }
4ca2827bd7ad26aa9a72fd45524fe30e0c5eb033.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // sturg_cuda_post_proc_kernel.cu // sturgRender // // Created by Dilip Patlolla on 2/18/17. // Copyright (c) 2015-2025 STURFEE INC ALL RIGHTS RESERVED // #ifndef _STURG_CUDA_POST_PROC_KERNEL_ #define _STURG_CUDA_POST_PROC_KERNEL_ #define CONV_KERNEL_RADIUS 1 // compute the edges or mask // from input rgb raster, using only one band // // \param[in] d_raster input imagery raster rgb // \param[out] d_surface_norm placeholder for surface normals // \param[in] width raster width // \param[in] height raster height // \param[in] num_bands the count of rgb i.e 3 // // TO DO: further optimize this raw implementation __global__ void gpuGetUpdatedSurfaceNorm(const float* d_raster, float3* d_surface_norm, const int width, const int height, int num_output_bands) { unsigned int tidx = threadIdx.x; unsigned int tidy = threadIdx.y; // for horizontal and vertical unsigned int x_tid_h; unsigned int y_tid_h; unsigned int x_tid_v; unsigned int y_tid_v; // calculate normalized texture coordinates unsigned int x = blockIdx.x * blockDim.x + tidx; unsigned int y = blockIdx.y * blockDim.y + tidy; unsigned int xindex = (y * width + x) * 4; unsigned int surf_norm_xindex = y * width + x; unsigned int rgb_xindex = xindex; float norm = 0; float near = 0.01; float far = 2000; float eq_const_1 = (2 * near * far); float eq_const_2 = (far / 2.0 - near); float eq_const_3 = (far / 2.0 + near); float min = -1.0f; float max = 1.0f; __shared__ float sh_mem_raster_horz[BLOCK_SIZE_W + 2][BLOCK_SIZE_H]; __shared__ float sh_mem_raster_vert[BLOCK_SIZE_W][BLOCK_SIZE_H + 2]; if ((x < width) && (y < height)) { // Load ghost array required into shared mem // ghost pixels required to compute edges x_tid_h = threadIdx.x + 1 * CONV_KERNEL_RADIUS; y_tid_h = threadIdx.y; sh_mem_raster_horz[x_tid_h][y_tid_h] = (eq_const_1 / (eq_const_3 - d_raster[xindex + 3] * eq_const_2)); __syncthreads(); // load left Halo if (tidx == 0) { if (x == 0) { sh_mem_raster_horz[tidx][tidy] = sh_mem_raster_horz[x_tid_h][y_tid_h]; } else { sh_mem_raster_horz[tidx][tidy] = (eq_const_1 / (eq_const_3 - d_raster[xindex - 1 * 4 + 3] * eq_const_2)); } } __syncthreads(); // load right Halo if (x == (width - 1)) { sh_mem_raster_horz[tidx + 2 * CONV_KERNEL_RADIUS][tidy] = (eq_const_1 / (eq_const_3 - d_raster[xindex + 3] * eq_const_2)); } else if ((tidx == blockDim.x - 1)) { rgb_xindex = xindex + 1 * 4 + 3; sh_mem_raster_horz[tidx + 2 * CONV_KERNEL_RADIUS][tidy] = (eq_const_1 / (eq_const_3 - d_raster[rgb_xindex] * eq_const_2)); } __syncthreads(); // update core part to vertical sh mem x_tid_v = threadIdx.x; y_tid_v = threadIdx.y + 1 * CONV_KERNEL_RADIUS; sh_mem_raster_vert[x_tid_v][y_tid_v] = sh_mem_raster_horz[x_tid_h][y_tid_h]; __syncthreads(); // load top halo if (tidy == 0) { if (y == 0) { sh_mem_raster_vert[tidx][tidy] = sh_mem_raster_vert[x_tid_v][y_tid_v]; } else { sh_mem_raster_vert[tidx][tidy] = (eq_const_1 / (eq_const_3 - d_raster[((y - 1) * width + x) * 4 + 3] * eq_const_2)); } } __syncthreads(); // load bottom Halo if (y == (height - 1)) { sh_mem_raster_vert[tidx][tidy + 2 * CONV_KERNEL_RADIUS] = (eq_const_1 / (eq_const_3 - d_raster[xindex + 3] * eq_const_2)); } else if ((tidy == blockDim.y - 1)) { rgb_xindex = ((y + 1) * width + x) * 4 + 3; sh_mem_raster_vert[tidx][tidy + 2 * CONV_KERNEL_RADIUS] = (eq_const_1 / (eq_const_3 - d_raster[rgb_xindex] * eq_const_2)); } __syncthreads(); // compute dx float temp_horz = (sh_mem_raster_horz[x_tid_h + 1][y_tid_h] - sh_mem_raster_horz[x_tid_h - 1][y_tid_h]) / 2.0; // compute dy float temp_vert = (sh_mem_raster_vert[x_tid_v][y_tid_v + 1] - sh_mem_raster_vert[x_tid_v][y_tid_v - 1]) / 2.0; // compute norm norm = sqrtf(temp_horz * temp_horz + temp_vert * temp_vert + 1.0f); if (d_raster[xindex + 3] == 0.0f) { d_surface_norm[surf_norm_xindex].z = 127; d_surface_norm[surf_norm_xindex].y = 127; d_surface_norm[surf_norm_xindex].x = 127; } else { // // version 1 d_surface_norm[surf_norm_xindex].x = 255 * ((1.0f / norm) - min) / (max - min); d_surface_norm[surf_norm_xindex].y = 255 * ((-temp_horz / norm) - min) / (max - min); d_surface_norm[surf_norm_xindex].z = 255 * ((-temp_vert / norm) - min) / (max - min); } } __syncthreads(); } // TO DO: further optimize this raw implementation // TO DO: optimize the access of shared mem to remove bank conflicts __global__ void scaleRaster(float3* d_raster, const double min, const double max, const int width, const int height, int num_output_bands) { unsigned int tidx = threadIdx.x; unsigned int tidy = threadIdx.y; // calculate normalized texture coordinates unsigned int x = blockIdx.x * blockDim.x + tidx; unsigned int y = blockIdx.y * blockDim.y + tidy; unsigned int xindex = y * width + x; if ((x < width) && (y < height)) { // Load ghost array required into shared mem // ghost pixels required to compute edges if (d_raster[xindex].x == 0.0f && d_raster[xindex].y == 0.0f && d_raster[xindex].z == 0.0f) { d_raster[xindex].x = 127; d_raster[xindex].y = 127; d_raster[xindex].z = 127; } else if (min != max) { d_raster[xindex].x = 255 * (d_raster[xindex].x - min) / (max - min); d_raster[xindex].y = 255 * (d_raster[xindex].y - min) / (max - min); d_raster[xindex].z = 255 * (d_raster[xindex].z - min) / (max - min); } else { d_raster[xindex].x = 127; d_raster[xindex].y = 127; d_raster[xindex].z = 127; } } __syncthreads(); } // //// TO DO: further optimize this raw implementation //// TO DO: optimize the access of shared mem to remove bank conflicts //__global__ void updateGlobalImage(float3* d_surfnorm_raster, float4* d_image_raster, // float* d_global_raster, const unsigned int yaw_count, // const unsigned int pitch_count, const int width, const int height, // const int num_bands) { // unsigned int tidx = threadIdx.x; // unsigned int tidy = threadIdx.y; // // // calculate normalized texture coordinates // unsigned int x = blockIdx.x * blockDim.x + tidx; // unsigned int y = blockIdx.y * blockDim.y + tidy; // // unsigned int raster_xindex = y * width + x + width / 2 + height / 2 * width; // unsigned int global_xindex = // (y + pitch_count * 36) * STITCHED_IMAGE_WIDTH + x + yaw_count * 36 / 2; // // // Load ghost array required into shared mem // // ghost pixels required to compute edges // if (pitch_count == 0) { // if (blockIdx.y == 1) { // if ((x < width) && (y < height) && // ((y + (pitch_count - 0.5) * 36) < STITCHED_IMAGE_HEIGHT) && // ((x + yaw_count * 36) < STITCHED_IMAGE_WIDTH)) { // global_xindex = // (y + (pitch_count - 0.5) * 36) * STITCHED_IMAGE_WIDTH + x + yaw_count * 36; // // d_global_raster[global_xindex * num_bands + 0] = // d_image_raster[raster_xindex].x - B_MEAN; // d_global_raster[global_xindex * num_bands + 1] = // d_image_raster[raster_xindex].y - G_MEAN; // d_global_raster[global_xindex * num_bands + 2] = // d_image_raster[raster_xindex].z - R_MEAN; // d_global_raster[global_xindex * num_bands + 3] = // d_surfnorm_raster[raster_xindex].x - B_MEAN; // d_global_raster[global_xindex * num_bands + 4] = // d_surfnorm_raster[raster_xindex].y - G_MEAN; // d_global_raster[global_xindex * num_bands + 5] = // d_surfnorm_raster[raster_xindex].z - R_MEAN; // } // } // } else { // if ((x < width) && (y < height) && // ((y + (pitch_count - 0.5) * 36) < STITCHED_IMAGE_HEIGHT) && // ((x + yaw_count * 36) < STITCHED_IMAGE_WIDTH)) { // global_xindex = // (y + (pitch_count - 0.5) * 36) * STITCHED_IMAGE_WIDTH + x + yaw_count * 36; // d_global_raster[global_xindex * num_bands + 0] = // d_image_raster[raster_xindex].x - B_MEAN; // d_global_raster[global_xindex * num_bands + 1] = // d_image_raster[raster_xindex].y - G_MEAN; // d_global_raster[global_xindex * num_bands + 2] = // d_image_raster[raster_xindex].z - R_MEAN; // d_global_raster[global_xindex * num_bands + 3] = // d_surfnorm_raster[raster_xindex].x - B_MEAN; // d_global_raster[global_xindex * num_bands + 4] = // d_surfnorm_raster[raster_xindex].y - G_MEAN; // d_global_raster[global_xindex * num_bands + 5] = // d_surfnorm_raster[raster_xindex].z - R_MEAN; // } // } // __syncthreads(); //} #endif // #ifndef _STURG_CUDA_POST_PROC_KERNEL_
4ca2827bd7ad26aa9a72fd45524fe30e0c5eb033.cu
// // sturg_cuda_post_proc_kernel.cu // sturgRender // // Created by Dilip Patlolla on 2/18/17. // Copyright (c) 2015-2025 STURFEE INC ALL RIGHTS RESERVED // #ifndef _STURG_CUDA_POST_PROC_KERNEL_ #define _STURG_CUDA_POST_PROC_KERNEL_ #define CONV_KERNEL_RADIUS 1 // compute the edges or mask // from input rgb raster, using only one band // // \param[in] d_raster input imagery raster rgb // \param[out] d_surface_norm placeholder for surface normals // \param[in] width raster width // \param[in] height raster height // \param[in] num_bands the count of rgb i.e 3 // // TO DO: further optimize this raw implementation __global__ void gpuGetUpdatedSurfaceNorm(const float* d_raster, float3* d_surface_norm, const int width, const int height, int num_output_bands) { unsigned int tidx = threadIdx.x; unsigned int tidy = threadIdx.y; // for horizontal and vertical unsigned int x_tid_h; unsigned int y_tid_h; unsigned int x_tid_v; unsigned int y_tid_v; // calculate normalized texture coordinates unsigned int x = blockIdx.x * blockDim.x + tidx; unsigned int y = blockIdx.y * blockDim.y + tidy; unsigned int xindex = (y * width + x) * 4; unsigned int surf_norm_xindex = y * width + x; unsigned int rgb_xindex = xindex; float norm = 0; float near = 0.01; float far = 2000; float eq_const_1 = (2 * near * far); float eq_const_2 = (far / 2.0 - near); float eq_const_3 = (far / 2.0 + near); float min = -1.0f; float max = 1.0f; __shared__ float sh_mem_raster_horz[BLOCK_SIZE_W + 2][BLOCK_SIZE_H]; __shared__ float sh_mem_raster_vert[BLOCK_SIZE_W][BLOCK_SIZE_H + 2]; if ((x < width) && (y < height)) { // Load ghost array required into shared mem // ghost pixels required to compute edges x_tid_h = threadIdx.x + 1 * CONV_KERNEL_RADIUS; y_tid_h = threadIdx.y; sh_mem_raster_horz[x_tid_h][y_tid_h] = (eq_const_1 / (eq_const_3 - d_raster[xindex + 3] * eq_const_2)); __syncthreads(); // load left Halo if (tidx == 0) { if (x == 0) { sh_mem_raster_horz[tidx][tidy] = sh_mem_raster_horz[x_tid_h][y_tid_h]; } else { sh_mem_raster_horz[tidx][tidy] = (eq_const_1 / (eq_const_3 - d_raster[xindex - 1 * 4 + 3] * eq_const_2)); } } __syncthreads(); // load right Halo if (x == (width - 1)) { sh_mem_raster_horz[tidx + 2 * CONV_KERNEL_RADIUS][tidy] = (eq_const_1 / (eq_const_3 - d_raster[xindex + 3] * eq_const_2)); } else if ((tidx == blockDim.x - 1)) { rgb_xindex = xindex + 1 * 4 + 3; sh_mem_raster_horz[tidx + 2 * CONV_KERNEL_RADIUS][tidy] = (eq_const_1 / (eq_const_3 - d_raster[rgb_xindex] * eq_const_2)); } __syncthreads(); // update core part to vertical sh mem x_tid_v = threadIdx.x; y_tid_v = threadIdx.y + 1 * CONV_KERNEL_RADIUS; sh_mem_raster_vert[x_tid_v][y_tid_v] = sh_mem_raster_horz[x_tid_h][y_tid_h]; __syncthreads(); // load top halo if (tidy == 0) { if (y == 0) { sh_mem_raster_vert[tidx][tidy] = sh_mem_raster_vert[x_tid_v][y_tid_v]; } else { sh_mem_raster_vert[tidx][tidy] = (eq_const_1 / (eq_const_3 - d_raster[((y - 1) * width + x) * 4 + 3] * eq_const_2)); } } __syncthreads(); // load bottom Halo if (y == (height - 1)) { sh_mem_raster_vert[tidx][tidy + 2 * CONV_KERNEL_RADIUS] = (eq_const_1 / (eq_const_3 - d_raster[xindex + 3] * eq_const_2)); } else if ((tidy == blockDim.y - 1)) { rgb_xindex = ((y + 1) * width + x) * 4 + 3; sh_mem_raster_vert[tidx][tidy + 2 * CONV_KERNEL_RADIUS] = (eq_const_1 / (eq_const_3 - d_raster[rgb_xindex] * eq_const_2)); } __syncthreads(); // compute dx float temp_horz = (sh_mem_raster_horz[x_tid_h + 1][y_tid_h] - sh_mem_raster_horz[x_tid_h - 1][y_tid_h]) / 2.0; // compute dy float temp_vert = (sh_mem_raster_vert[x_tid_v][y_tid_v + 1] - sh_mem_raster_vert[x_tid_v][y_tid_v - 1]) / 2.0; // compute norm norm = sqrtf(temp_horz * temp_horz + temp_vert * temp_vert + 1.0f); if (d_raster[xindex + 3] == 0.0f) { d_surface_norm[surf_norm_xindex].z = 127; d_surface_norm[surf_norm_xindex].y = 127; d_surface_norm[surf_norm_xindex].x = 127; } else { // // version 1 d_surface_norm[surf_norm_xindex].x = 255 * ((1.0f / norm) - min) / (max - min); d_surface_norm[surf_norm_xindex].y = 255 * ((-temp_horz / norm) - min) / (max - min); d_surface_norm[surf_norm_xindex].z = 255 * ((-temp_vert / norm) - min) / (max - min); } } __syncthreads(); } // TO DO: further optimize this raw implementation // TO DO: optimize the access of shared mem to remove bank conflicts __global__ void scaleRaster(float3* d_raster, const double min, const double max, const int width, const int height, int num_output_bands) { unsigned int tidx = threadIdx.x; unsigned int tidy = threadIdx.y; // calculate normalized texture coordinates unsigned int x = blockIdx.x * blockDim.x + tidx; unsigned int y = blockIdx.y * blockDim.y + tidy; unsigned int xindex = y * width + x; if ((x < width) && (y < height)) { // Load ghost array required into shared mem // ghost pixels required to compute edges if (d_raster[xindex].x == 0.0f && d_raster[xindex].y == 0.0f && d_raster[xindex].z == 0.0f) { d_raster[xindex].x = 127; d_raster[xindex].y = 127; d_raster[xindex].z = 127; } else if (min != max) { d_raster[xindex].x = 255 * (d_raster[xindex].x - min) / (max - min); d_raster[xindex].y = 255 * (d_raster[xindex].y - min) / (max - min); d_raster[xindex].z = 255 * (d_raster[xindex].z - min) / (max - min); } else { d_raster[xindex].x = 127; d_raster[xindex].y = 127; d_raster[xindex].z = 127; } } __syncthreads(); } // //// TO DO: further optimize this raw implementation //// TO DO: optimize the access of shared mem to remove bank conflicts //__global__ void updateGlobalImage(float3* d_surfnorm_raster, float4* d_image_raster, // float* d_global_raster, const unsigned int yaw_count, // const unsigned int pitch_count, const int width, const int height, // const int num_bands) { // unsigned int tidx = threadIdx.x; // unsigned int tidy = threadIdx.y; // // // calculate normalized texture coordinates // unsigned int x = blockIdx.x * blockDim.x + tidx; // unsigned int y = blockIdx.y * blockDim.y + tidy; // // unsigned int raster_xindex = y * width + x + width / 2 + height / 2 * width; // unsigned int global_xindex = // (y + pitch_count * 36) * STITCHED_IMAGE_WIDTH + x + yaw_count * 36 / 2; // // // Load ghost array required into shared mem // // ghost pixels required to compute edges // if (pitch_count == 0) { // if (blockIdx.y == 1) { // if ((x < width) && (y < height) && // ((y + (pitch_count - 0.5) * 36) < STITCHED_IMAGE_HEIGHT) && // ((x + yaw_count * 36) < STITCHED_IMAGE_WIDTH)) { // global_xindex = // (y + (pitch_count - 0.5) * 36) * STITCHED_IMAGE_WIDTH + x + yaw_count * 36; // // d_global_raster[global_xindex * num_bands + 0] = // d_image_raster[raster_xindex].x - B_MEAN; // d_global_raster[global_xindex * num_bands + 1] = // d_image_raster[raster_xindex].y - G_MEAN; // d_global_raster[global_xindex * num_bands + 2] = // d_image_raster[raster_xindex].z - R_MEAN; // d_global_raster[global_xindex * num_bands + 3] = // d_surfnorm_raster[raster_xindex].x - B_MEAN; // d_global_raster[global_xindex * num_bands + 4] = // d_surfnorm_raster[raster_xindex].y - G_MEAN; // d_global_raster[global_xindex * num_bands + 5] = // d_surfnorm_raster[raster_xindex].z - R_MEAN; // } // } // } else { // if ((x < width) && (y < height) && // ((y + (pitch_count - 0.5) * 36) < STITCHED_IMAGE_HEIGHT) && // ((x + yaw_count * 36) < STITCHED_IMAGE_WIDTH)) { // global_xindex = // (y + (pitch_count - 0.5) * 36) * STITCHED_IMAGE_WIDTH + x + yaw_count * 36; // d_global_raster[global_xindex * num_bands + 0] = // d_image_raster[raster_xindex].x - B_MEAN; // d_global_raster[global_xindex * num_bands + 1] = // d_image_raster[raster_xindex].y - G_MEAN; // d_global_raster[global_xindex * num_bands + 2] = // d_image_raster[raster_xindex].z - R_MEAN; // d_global_raster[global_xindex * num_bands + 3] = // d_surfnorm_raster[raster_xindex].x - B_MEAN; // d_global_raster[global_xindex * num_bands + 4] = // d_surfnorm_raster[raster_xindex].y - G_MEAN; // d_global_raster[global_xindex * num_bands + 5] = // d_surfnorm_raster[raster_xindex].z - R_MEAN; // } // } // __syncthreads(); //} #endif // #ifndef _STURG_CUDA_POST_PROC_KERNEL_
68c05ba05c40f0939a3d6b2ff5742fb1cd24a142.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_OPENCV #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #endif // USE_OPENCV #include <algorithm> #include <fstream> // NOLINT(readability/streams) #include <map> #include <string> #include <utility> #include <vector> #include <iostream> #include "boost/filesystem.hpp" #include "boost/foreach.hpp" #include "caffe/layers/concatnew_layer.hpp" #include "caffe/util/math_functions.hpp" using namespace std; namespace caffe { template <typename Dtype> __global__ void ConcatNew(const int nthreads, const Dtype* in_data, const bool forward, const int num_concats, const int concat_size, const int top_concat_axis, const int bottom_concat_axis, const int offset_concat_axis, Dtype* out_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int total_concat_size = concat_size * bottom_concat_axis; const int concat_num = index / total_concat_size; const int concat_index = index % total_concat_size; const int top_index = concat_index + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; if (forward) { out_data[top_index] = in_data[index]; } else { out_data[index] = in_data[top_index]; } } } template <typename Dtype> void ConcatNewLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { if (visualize_) { #ifdef USE_OPENCV vector<cv::Mat> cv_imgs,cv_seg,cv_imgseg; this->data_transformer_->TransformInv(bottom[0], &cv_imgs); this->data_transformer_->TransformInv(bottom[1], &cv_seg); this->data_transformer_->TransformInv(bottom[3], &cv_imgseg); //vector<cv::Mat> new_imgs = AddChannels(cv_imgs, cv_seg); vector<cv::Scalar> colors = GetColors(label_to_display_name_.size()); VisualizeBBox(cv_imgs, cv_seg, cv_imgseg, bottom[2], visualize_threshold_, colors, label_to_display_name_, save_file_); #endif // USE_OPENCV } } template <typename Dtype> void ConcatNewLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { NOT_IMPLEMENTED; } INSTANTIATE_LAYER_GPU_FUNCS(ConcatNewLayer); } // namespace caffe
68c05ba05c40f0939a3d6b2ff5742fb1cd24a142.cu
#ifdef USE_OPENCV #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #endif // USE_OPENCV #include <algorithm> #include <fstream> // NOLINT(readability/streams) #include <map> #include <string> #include <utility> #include <vector> #include <iostream> #include "boost/filesystem.hpp" #include "boost/foreach.hpp" #include "caffe/layers/concatnew_layer.hpp" #include "caffe/util/math_functions.hpp" using namespace std; namespace caffe { template <typename Dtype> __global__ void ConcatNew(const int nthreads, const Dtype* in_data, const bool forward, const int num_concats, const int concat_size, const int top_concat_axis, const int bottom_concat_axis, const int offset_concat_axis, Dtype* out_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int total_concat_size = concat_size * bottom_concat_axis; const int concat_num = index / total_concat_size; const int concat_index = index % total_concat_size; const int top_index = concat_index + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; if (forward) { out_data[top_index] = in_data[index]; } else { out_data[index] = in_data[top_index]; } } } template <typename Dtype> void ConcatNewLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { if (visualize_) { #ifdef USE_OPENCV vector<cv::Mat> cv_imgs,cv_seg,cv_imgseg; this->data_transformer_->TransformInv(bottom[0], &cv_imgs); this->data_transformer_->TransformInv(bottom[1], &cv_seg); this->data_transformer_->TransformInv(bottom[3], &cv_imgseg); //vector<cv::Mat> new_imgs = AddChannels(cv_imgs, cv_seg); vector<cv::Scalar> colors = GetColors(label_to_display_name_.size()); VisualizeBBox(cv_imgs, cv_seg, cv_imgseg, bottom[2], visualize_threshold_, colors, label_to_display_name_, save_file_); #endif // USE_OPENCV } } template <typename Dtype> void ConcatNewLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { NOT_IMPLEMENTED; } INSTANTIATE_LAYER_GPU_FUNCS(ConcatNewLayer); } // namespace caffe
584cef063aacda47f509d0cb7b230e1b57343d3e.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> #include <cassert> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> #include <unistd.h> #include <helper_cuda.h> #include <pthread.h> #include <iostream> using namespace std; #define DEBUG 1 #define NUM_THREADS 1 inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } #endif return result; } float *x, *y, *z; int numElements = 2; int nStreams = 4; /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } void *PrintHello(void *threadid) { long tid; tid = (long)threadid; cout << "Hello World! Thread ID, " << tid << endl; int threadsPerBlock = 32; int blocksPerGrid = (1 + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, x + numElements, y + numElements, z+numElements, 1); checkCuda(hipStreamSynchronize(0)); pthread_exit(NULL); } /** * Host main routine */ int main(void) { // Print the vector length to be used, and compute its size size_t size = (numElements + NUM_THREADS) * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); int N = (numElements + NUM_THREADS); checkCuda(hipMallocManaged(&x, N * sizeof(float))); checkCuda(hipMallocManaged(&y, N * sizeof(float))); checkCuda(hipMallocManaged(&z, N * sizeof(float))); // Verify that allocations succeeded if (x == NULL || y == NULL || z == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements + NUM_THREADS; ++i) { x[i] = rand() / (float)RAND_MAX; printf("x[%d] addr %llu val %.2f\t", i, &x[i], x[i]); y[i] = rand() / (float)RAND_MAX; printf("y[%d] addr %llu val %.2f\n", i, &y[i], y[i]); } // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, x, y, z, numElements); pid_t pid = getpid(); printf("vectorAdd PID: %d\n", pid); // while(1){ // cout<<"cpu working"<<std::flush; // sleep(2); // } printf("Test PASSED\n"); checkCuda(hipStreamSynchronize(0)); for (int i = 0; i < numElements + NUM_THREADS; ++i) { x[i] = rand() / (float)RAND_MAX; printf("second time: x[%d] addr %llu val %.2f\t", i, &x[i], x[i]); y[i] = rand() / (float)RAND_MAX; printf("second time: y[%d] addr %llu val %.2f\n", i, &y[i], y[i]); } // multi threads int rc; int i; pthread_t threads[NUM_THREADS]; for (i = 0; i < NUM_THREADS; i++) { cout << "main() : creating thread, " << i << endl; rc = pthread_create(&threads[i], NULL, PrintHello, (void *)i); if (rc) { cout << "Error:unable to create thread," << rc << endl; exit(-1); } } for (i = 0; i < NUM_THREADS; i++) { void *ret; if (pthread_join(threads[i], &ret) != 0) { printf("thread exited with '%s'\n", ret); exit(3); } } // for (int i = 0; i < numElements + NUM_THREADS; ++i) // { // printf("z[%d] addr %llu val %.2f\t", i, &z[i], z[i]); // } cout << endl; // multi stream hipStream_t stream[nStreams]; // Free device global memory checkCuda(hipFree(x)); checkCuda(hipFree(y)); checkCuda(hipFree(z)); printf("Done\n"); return 0; }
584cef063aacda47f509d0cb7b230e1b57343d3e.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> #include <cassert> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> #include <unistd.h> #include <helper_cuda.h> #include <pthread.h> #include <iostream> using namespace std; #define DEBUG 1 #define NUM_THREADS 1 inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } float *x, *y, *z; int numElements = 2; int nStreams = 4; /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } void *PrintHello(void *threadid) { long tid; tid = (long)threadid; cout << "Hello World! Thread ID, " << tid << endl; int threadsPerBlock = 32; int blocksPerGrid = (1 + threadsPerBlock - 1) / threadsPerBlock; vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(x + numElements, y + numElements, z+numElements, 1); checkCuda(cudaStreamSynchronize(0)); pthread_exit(NULL); } /** * Host main routine */ int main(void) { // Print the vector length to be used, and compute its size size_t size = (numElements + NUM_THREADS) * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); int N = (numElements + NUM_THREADS); checkCuda(cudaMallocManaged(&x, N * sizeof(float))); checkCuda(cudaMallocManaged(&y, N * sizeof(float))); checkCuda(cudaMallocManaged(&z, N * sizeof(float))); // Verify that allocations succeeded if (x == NULL || y == NULL || z == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements + NUM_THREADS; ++i) { x[i] = rand() / (float)RAND_MAX; printf("x[%d] addr %llu val %.2f\t", i, &x[i], x[i]); y[i] = rand() / (float)RAND_MAX; printf("y[%d] addr %llu val %.2f\n", i, &y[i], y[i]); } // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(x, y, z, numElements); pid_t pid = getpid(); printf("vectorAdd PID: %d\n", pid); // while(1){ // cout<<"cpu working"<<std::flush; // sleep(2); // } printf("Test PASSED\n"); checkCuda(cudaStreamSynchronize(0)); for (int i = 0; i < numElements + NUM_THREADS; ++i) { x[i] = rand() / (float)RAND_MAX; printf("second time: x[%d] addr %llu val %.2f\t", i, &x[i], x[i]); y[i] = rand() / (float)RAND_MAX; printf("second time: y[%d] addr %llu val %.2f\n", i, &y[i], y[i]); } // multi threads int rc; int i; pthread_t threads[NUM_THREADS]; for (i = 0; i < NUM_THREADS; i++) { cout << "main() : creating thread, " << i << endl; rc = pthread_create(&threads[i], NULL, PrintHello, (void *)i); if (rc) { cout << "Error:unable to create thread," << rc << endl; exit(-1); } } for (i = 0; i < NUM_THREADS; i++) { void *ret; if (pthread_join(threads[i], &ret) != 0) { printf("thread exited with '%s'\n", ret); exit(3); } } // for (int i = 0; i < numElements + NUM_THREADS; ++i) // { // printf("z[%d] addr %llu val %.2f\t", i, &z[i], z[i]); // } cout << endl; // multi stream cudaStream_t stream[nStreams]; // Free device global memory checkCuda(cudaFree(x)); checkCuda(cudaFree(y)); checkCuda(cudaFree(z)); printf("Done\n"); return 0; }
9fa9b13840974e2b85ea1100135a31ccad5fc327.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <cstdio> #include <cmath> #include "error_checks.h" // Macros CUDA_CHECK and CHECK_ERROR_MSG __global__ void vector_add(double* C, const double* A, const double* B, int N) { // Add the kernel code int idx = blockIdx.x * blockDim.x + threadIdx.x; // Do not try to access past the allocated memory if (idx < N) { C[idx] = A[idx] + B[idx]; } } int main(void) { const int N = 20; const int ThreadsInBlock = 12800; double* dA, * dB, * dC; double hA[N], hB[N], hC[N]; for (int i = 0; i < N; ++i) { hA[i] = (double)i; hB[i] = (double)i * i; } /* Add memory allocations and copies. Wrap your runtime function calls with CUDA_CHECK( ) macro */ CUDA_CHECK(hipMalloc((void**)& dA, sizeof(double) * N)); CUDA_CHECK(hipMalloc((void**)& dB, sizeof(double) * N)); CUDA_CHECK(hipMalloc((void**)& dC, sizeof(double) * N)); CUDA_CHECK(hipMemcpy(dA, hA, sizeof(double) * N, hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(dB, hB, sizeof(double) * N, hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(dC, hC, sizeof(double) * N, hipMemcpyHostToDevice)); //#error Add the remaining memory allocations and copies // Note the maximum size of threads in a block dim3 grid, threads; // Add the kernel call here //#error Add the CUDA kernel call vector_add << <1, ThreadsInBlock >> > (dC, dA, dB, N); // Here we add an explicit synchronization so that we catch errors // as early as possible. Don't do this in production code! //hipDeviceSynchronize(); CHECK_ERROR_MSG("vector_add kernel"); // Copy back the results and free the device memory //#error Copy back the results and free the allocated memory CUDA_CHECK(hipMemcpy(hA, dA, sizeof(double) * N, hipMemcpyDeviceToHost)); CUDA_CHECK(hipMemcpy(hB, dB, sizeof(double) * N, hipMemcpyDeviceToHost)); CUDA_CHECK(hipMemcpy(hC, dC, sizeof(double) * N, hipMemcpyDeviceToHost)); CUDA_CHECK(hipFree(dA)); CUDA_CHECK(hipFree(dB)); CUDA_CHECK(hipFree(dC)); for (int i = 0; i < N; i++) printf("%5.1f\n", hC[i]); return 0; }
9fa9b13840974e2b85ea1100135a31ccad5fc327.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cstdio> #include <cmath> #include "error_checks.h" // Macros CUDA_CHECK and CHECK_ERROR_MSG __global__ void vector_add(double* C, const double* A, const double* B, int N) { // Add the kernel code int idx = blockIdx.x * blockDim.x + threadIdx.x; // Do not try to access past the allocated memory if (idx < N) { C[idx] = A[idx] + B[idx]; } } int main(void) { const int N = 20; const int ThreadsInBlock = 12800; double* dA, * dB, * dC; double hA[N], hB[N], hC[N]; for (int i = 0; i < N; ++i) { hA[i] = (double)i; hB[i] = (double)i * i; } /* Add memory allocations and copies. Wrap your runtime function calls with CUDA_CHECK( ) macro */ CUDA_CHECK(cudaMalloc((void**)& dA, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((void**)& dB, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((void**)& dC, sizeof(double) * N)); CUDA_CHECK(cudaMemcpy(dA, hA, sizeof(double) * N, cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(dB, hB, sizeof(double) * N, cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(dC, hC, sizeof(double) * N, cudaMemcpyHostToDevice)); //#error Add the remaining memory allocations and copies // Note the maximum size of threads in a block dim3 grid, threads; // Add the kernel call here //#error Add the CUDA kernel call vector_add << <1, ThreadsInBlock >> > (dC, dA, dB, N); // Here we add an explicit synchronization so that we catch errors // as early as possible. Don't do this in production code! //cudaDeviceSynchronize(); CHECK_ERROR_MSG("vector_add kernel"); // Copy back the results and free the device memory //#error Copy back the results and free the allocated memory CUDA_CHECK(cudaMemcpy(hA, dA, sizeof(double) * N, cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaMemcpy(hB, dB, sizeof(double) * N, cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaMemcpy(hC, dC, sizeof(double) * N, cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaFree(dA)); CUDA_CHECK(cudaFree(dB)); CUDA_CHECK(cudaFree(dC)); for (int i = 0; i < N; i++) printf("%5.1f\n", hC[i]); return 0; }
f345640040121b648eca2d7338f4322b2360364d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // modified from // https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_l2norm_kernel.cu #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/Exceptions.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> // Another possibility: // #include <torch/all.h> #include <assert.h> #include "multi_tensor_apply.cuh" #include "type_shim.h" #define BLOCK_SIZE 512 #define ILP 4 template <typename T> __device__ __forceinline__ bool is_aligned(T *p) { return ((uint64_t)p) % (ILP * sizeof(T)) == 0; } template <typename T> __device__ __forceinline__ void load_store(T *dst, T *src, int dst_offset, int src_offset) { typedef typename std::aligned_storage<ILP * sizeof(T), ILP * alignof(T)>::type LT; ((LT *)dst)[dst_offset] = ((LT *)src)[src_offset]; } template <typename x_t> struct L2NormFunctor { __device__ __forceinline__ void operator()( int chunk_size, volatile int *noop_gmem, TensorListMetadata<1> &tl, float *output, float *output_per_tensor, bool per_tensor, int max_chunks_per_tensor) { // I'd like this kernel to propagate infs/nans. // if(*noop_gmem == 1) // return; int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; x_t *x = (x_t *)tl.addresses[0][tensor_loc]; x += chunk_idx * chunk_size; n -= chunk_idx * chunk_size; __shared__ float s_vals[512]; float vals[ILP]; // = {0}; // this probably works too but I want to be // sure... x_t r_x[ILP]; for (int i = 0; i < ILP; i++) { vals[i] = 0.f; r_x[i] = 0; } // to make things simple, we put aligned case in a different code path if (n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(x)) { for (int i_start = threadIdx.x; i_start * ILP < n && i_start * ILP < chunk_size; i_start += blockDim.x) { // load load_store(r_x, x, 0, i_start); #pragma unroll for (int ii = 0; ii < ILP; ii++) { float next = static_cast<float>(r_x[ii]); vals[ii] += next * next; } } } else { for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * ILP) { #pragma unroll for (int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii * blockDim.x; if (i < n && i < chunk_size) { float next = static_cast<float>(x[i]); vals[ii] += next * next; } } } } float val = 0.f; for (int i = 0; i < ILP; i++) val += vals[i]; float final = reduce_block_into_lanes(s_vals, val); if (threadIdx.x == 0) { if (!isfinite(final)) *noop_gmem = 1; // Blindly fire off a write. These will race but that's ok. output[blockIdx.x] += final; if (per_tensor) output_per_tensor[(tl.start_tensor_this_launch + tensor_loc) * max_chunks_per_tensor + chunk_idx] = final; } } }; // Probably better to template, but since we are not likely to support other // norm template <typename x_t> struct MaxNormFunctor { __device__ __forceinline__ void operator()( int chunk_size, volatile int *noop_gmem, TensorListMetadata<1> &tl, float *output, float *output_per_tensor, bool per_tensor, int max_chunks_per_tensor) { // I'd like this kernel to propagate infs/nans. // if(*noop_gmem == 1) // return; int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; x_t *x = (x_t *)tl.addresses[0][tensor_loc]; x += chunk_idx * chunk_size; n -= chunk_idx * chunk_size; __shared__ float s_vals[512]; float vals[ILP]; // = {0}; // this probably works too but I want to be // sure... x_t r_x[ILP]; for (int i = 0; i < ILP; i++) { vals[i] = 0.f; r_x[i] = 0; } // to make things simple, we put aligned case in a different code path if (n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(x)) { for (int i_start = threadIdx.x; i_start * ILP < n && i_start * ILP < chunk_size; i_start += blockDim.x) { // load load_store(r_x, x, 0, i_start); #pragma unroll for (int ii = 0; ii < ILP; ii++) { float next = static_cast<float>(r_x[ii]); vals[ii] = fmaxf(fabsf(vals[ii]), fabsf(next)); } } } else { for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * ILP) { #pragma unroll for (int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii * blockDim.x; if (i < n && i < chunk_size) { float next = static_cast<float>(x[i]); vals[ii] = fmaxf(fabsf(vals[ii]), fabsf(next)); } } } } float val = 0.f; for (int i = 0; i < ILP; i++) val = fmaxf(fabsf(val), fabsf(vals[i])); float final = reduce_block_into_lanes_max_op(s_vals, val); if (threadIdx.x == 0) { if (!isfinite(final)) *noop_gmem = 1; // Blindly fire off a write. These will race but that's ok. output[blockIdx.x] = fmaxf(fabsf(output[blockIdx.x]), fabsf(final)); if (per_tensor) output_per_tensor[(tl.start_tensor_this_launch + tensor_loc) * max_chunks_per_tensor + chunk_idx] = final; } } }; __global__ void cleanup(float *output, float *output_per_tensor, float *ret, float *ret_per_tensor, bool per_tensor, int max_chunks_per_tensor) { __shared__ float vals[512]; if (blockIdx.x == 0) { float val = 0; if (threadIdx.x < 320) val = output[threadIdx.x]; float final = reduce_block_into_lanes(vals, val); if (threadIdx.x == 0) *ret = sqrt(final); } if (per_tensor) { float *output_this_tensor = output_per_tensor + blockIdx.x * max_chunks_per_tensor; float val = 0; for (int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x) val += output_this_tensor[i]; float final = reduce_block_into_lanes(vals, val); if (threadIdx.x == 0) ret_per_tensor[blockIdx.x] = sqrt(final); } } __global__ void cleanup_v2(float *output, float *output_per_tensor, float *ret, float *ret_per_tensor, bool per_tensor, int max_chunks_per_tensor, int norm_type, float alpha, float beta) { __shared__ float vals[512]; if (blockIdx.x == 0) { float val = 0; if (threadIdx.x < 320) val = output[threadIdx.x]; if (norm_type == 0) { float final = reduce_block_into_lanes_max_op(vals, val); if (threadIdx.x == 0) *ret = alpha * (*ret) + beta * final; } else { float final = reduce_block_into_lanes(vals, val); if (threadIdx.x == 0) *ret = sqrt(alpha * (*ret) * (*ret) + beta * final); } } if (per_tensor) { float *output_this_tensor = output_per_tensor + blockIdx.x * max_chunks_per_tensor; if (norm_type == 0) { float val = 0; for (int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x) val = fmaxf(fabsf(val), fabsf(output_this_tensor[i])); float final = reduce_block_into_lanes_max_op(vals, val); if (threadIdx.x == 0) ret_per_tensor[blockIdx.x] = alpha * ret_per_tensor[blockIdx.x] + beta * final; } else { float val = 0; for (int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x) val += output_this_tensor[i]; float final = reduce_block_into_lanes(vals, val); if (threadIdx.x == 0) ret_per_tensor[blockIdx.x] = sqrt(alpha * ret_per_tensor[blockIdx.x] * ret_per_tensor[blockIdx.x] + beta * final); } } } std::tuple<at::Tensor, at::Tensor> multi_tensor_l2norm_cuda( int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists, at::optional<bool> per_tensor_python) { bool per_tensor = per_tensor_python.has_value() ? per_tensor_python.value() : false; auto float_options = tensor_lists[0][0].options().dtype(at::kFloat); auto output = at::zeros({320}, float_options); at::Tensor output_per_tensor; at::Tensor ret_per_tensor; int ntensors = tensor_lists[0].size(); int max_chunks_per_tensor = -1; if (per_tensor) { for (int t = 0; t < ntensors; t++) { int max_chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1) / chunk_size; if (max_chunks_this_tensor > max_chunks_per_tensor) max_chunks_per_tensor = max_chunks_this_tensor; } output_per_tensor = at::zeros({ntensors * max_chunks_per_tensor}, float_options); ret_per_tensor = at::empty({ntensors}, float_options); } else { ret_per_tensor = at::empty({0}, float_options); } DISPATCH_FLOAT_AND_HALF( tensor_lists[0][0].scalar_type(), 0, "multi_tensor_l2norm_cuda", multi_tensor_apply<1>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, L2NormFunctor<scalar_t_0>(), output.DATA_PTR<float>(), per_tensor ? output_per_tensor.DATA_PTR<float>() : nullptr, per_tensor, max_chunks_per_tensor);) AT_CUDA_CHECK(hipGetLastError()); // AT_CUDA_CHECK(hipDeviceSynchronize()); // This involves one more small kernel launches, but will be negligible end to // end. I could get rid of these by hacking the functor + multi tensor harness // with persistence logic, but keeping it simple for now auto ret = at::empty({1}, output.options()); const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(output)); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( cleanup), dim3(per_tensor ? ntensors : 1), dim3(512), 0, stream, output.DATA_PTR<float>(), per_tensor ? output_per_tensor.DATA_PTR<float>() : nullptr, ret.DATA_PTR<float>(), per_tensor ? ret_per_tensor.DATA_PTR<float>() : nullptr, per_tensor, max_chunks_per_tensor); return std::tuple<at::Tensor, at::Tensor>(ret, ret_per_tensor); } // Compute and update grad norm // Here use a per tensor norm, and blend new norm(n) and old norm(gn) by // L-2: gn = sqrt(a * gn^2 + b * n^2) // L-inf: gn = a * gn + b * n void multi_tensor_norm_out_cuda( int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists, at::Tensor out, const float alpha, const float beta, const int norm_type) { auto float_options = tensor_lists[0][0].options().dtype(at::kFloat); TORCH_CHECK(tensor_lists[0][0].device() == noop_flag.device(), "noop flag should be on the same device as tensors"); // we don't need global thus uses empty here auto output = at::empty({320}, float_options); at::Tensor output_per_tensor; at::Tensor ret_per_tensor; int ntensors = tensor_lists[0].size(); int max_chunks_per_tensor = -1; for (int t = 0; t < ntensors; t++) { int max_chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1) / chunk_size; if (max_chunks_this_tensor > max_chunks_per_tensor) max_chunks_per_tensor = max_chunks_this_tensor; } // Although it is single write then read, still need to be zero // Since tailing element also participate cleanup output_per_tensor = at::zeros({ntensors * max_chunks_per_tensor}, float_options); if (norm_type == 0) { DISPATCH_FLOAT_AND_HALF( tensor_lists[0][0].scalar_type(), 0, "multi_tensor_maxnorm_cuda", multi_tensor_apply<1>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, MaxNormFunctor<scalar_t_0>(), output.DATA_PTR<float>(), output_per_tensor.DATA_PTR<float>(), true, max_chunks_per_tensor);) } else { DISPATCH_FLOAT_AND_HALF( tensor_lists[0][0].scalar_type(), 0, "multi_tensor_l2norm_cuda", multi_tensor_apply<1>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, L2NormFunctor<scalar_t_0>(), output.DATA_PTR<float>(), output_per_tensor.DATA_PTR<float>(), true, max_chunks_per_tensor);) } AT_CUDA_CHECK(hipGetLastError()); // AT_CUDA_CHECK(hipDeviceSynchronize()); // This involves one more small kernel launches, but will be negligible end to // end. I could get rid of these by hacking the functor + multi tensor harness // with persistence logic, but keeping it simple for now auto ret = at::empty({1}, output.options()); // Adding the following device guard since it happens sometimes that the // tensors are on one device and the cuda stream is on another device which // results in ILLEGAL MEM ACCESS error. const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(output)); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( cleanup_v2), dim3(ntensors), dim3(512), 0, stream, output.DATA_PTR<float>(), output_per_tensor.DATA_PTR<float>(), ret.DATA_PTR<float>(), out.DATA_PTR<float>(), true, max_chunks_per_tensor, norm_type, alpha, beta); return; }
f345640040121b648eca2d7338f4322b2360364d.cu
// modified from // https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_l2norm_kernel.cu #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/Exceptions.h> #include <c10/cuda/CUDAGuard.h> // Another possibility: // #include <torch/all.h> #include <assert.h> #include "multi_tensor_apply.cuh" #include "type_shim.h" #define BLOCK_SIZE 512 #define ILP 4 template <typename T> __device__ __forceinline__ bool is_aligned(T *p) { return ((uint64_t)p) % (ILP * sizeof(T)) == 0; } template <typename T> __device__ __forceinline__ void load_store(T *dst, T *src, int dst_offset, int src_offset) { typedef typename std::aligned_storage<ILP * sizeof(T), ILP * alignof(T)>::type LT; ((LT *)dst)[dst_offset] = ((LT *)src)[src_offset]; } template <typename x_t> struct L2NormFunctor { __device__ __forceinline__ void operator()( int chunk_size, volatile int *noop_gmem, TensorListMetadata<1> &tl, float *output, float *output_per_tensor, bool per_tensor, int max_chunks_per_tensor) { // I'd like this kernel to propagate infs/nans. // if(*noop_gmem == 1) // return; int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; x_t *x = (x_t *)tl.addresses[0][tensor_loc]; x += chunk_idx * chunk_size; n -= chunk_idx * chunk_size; __shared__ float s_vals[512]; float vals[ILP]; // = {0}; // this probably works too but I want to be // sure... x_t r_x[ILP]; for (int i = 0; i < ILP; i++) { vals[i] = 0.f; r_x[i] = 0; } // to make things simple, we put aligned case in a different code path if (n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(x)) { for (int i_start = threadIdx.x; i_start * ILP < n && i_start * ILP < chunk_size; i_start += blockDim.x) { // load load_store(r_x, x, 0, i_start); #pragma unroll for (int ii = 0; ii < ILP; ii++) { float next = static_cast<float>(r_x[ii]); vals[ii] += next * next; } } } else { for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * ILP) { #pragma unroll for (int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii * blockDim.x; if (i < n && i < chunk_size) { float next = static_cast<float>(x[i]); vals[ii] += next * next; } } } } float val = 0.f; for (int i = 0; i < ILP; i++) val += vals[i]; float final = reduce_block_into_lanes(s_vals, val); if (threadIdx.x == 0) { if (!isfinite(final)) *noop_gmem = 1; // Blindly fire off a write. These will race but that's ok. output[blockIdx.x] += final; if (per_tensor) output_per_tensor[(tl.start_tensor_this_launch + tensor_loc) * max_chunks_per_tensor + chunk_idx] = final; } } }; // Probably better to template, but since we are not likely to support other // norm template <typename x_t> struct MaxNormFunctor { __device__ __forceinline__ void operator()( int chunk_size, volatile int *noop_gmem, TensorListMetadata<1> &tl, float *output, float *output_per_tensor, bool per_tensor, int max_chunks_per_tensor) { // I'd like this kernel to propagate infs/nans. // if(*noop_gmem == 1) // return; int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; x_t *x = (x_t *)tl.addresses[0][tensor_loc]; x += chunk_idx * chunk_size; n -= chunk_idx * chunk_size; __shared__ float s_vals[512]; float vals[ILP]; // = {0}; // this probably works too but I want to be // sure... x_t r_x[ILP]; for (int i = 0; i < ILP; i++) { vals[i] = 0.f; r_x[i] = 0; } // to make things simple, we put aligned case in a different code path if (n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(x)) { for (int i_start = threadIdx.x; i_start * ILP < n && i_start * ILP < chunk_size; i_start += blockDim.x) { // load load_store(r_x, x, 0, i_start); #pragma unroll for (int ii = 0; ii < ILP; ii++) { float next = static_cast<float>(r_x[ii]); vals[ii] = fmaxf(fabsf(vals[ii]), fabsf(next)); } } } else { for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * ILP) { #pragma unroll for (int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii * blockDim.x; if (i < n && i < chunk_size) { float next = static_cast<float>(x[i]); vals[ii] = fmaxf(fabsf(vals[ii]), fabsf(next)); } } } } float val = 0.f; for (int i = 0; i < ILP; i++) val = fmaxf(fabsf(val), fabsf(vals[i])); float final = reduce_block_into_lanes_max_op(s_vals, val); if (threadIdx.x == 0) { if (!isfinite(final)) *noop_gmem = 1; // Blindly fire off a write. These will race but that's ok. output[blockIdx.x] = fmaxf(fabsf(output[blockIdx.x]), fabsf(final)); if (per_tensor) output_per_tensor[(tl.start_tensor_this_launch + tensor_loc) * max_chunks_per_tensor + chunk_idx] = final; } } }; __global__ void cleanup(float *output, float *output_per_tensor, float *ret, float *ret_per_tensor, bool per_tensor, int max_chunks_per_tensor) { __shared__ float vals[512]; if (blockIdx.x == 0) { float val = 0; if (threadIdx.x < 320) val = output[threadIdx.x]; float final = reduce_block_into_lanes(vals, val); if (threadIdx.x == 0) *ret = sqrt(final); } if (per_tensor) { float *output_this_tensor = output_per_tensor + blockIdx.x * max_chunks_per_tensor; float val = 0; for (int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x) val += output_this_tensor[i]; float final = reduce_block_into_lanes(vals, val); if (threadIdx.x == 0) ret_per_tensor[blockIdx.x] = sqrt(final); } } __global__ void cleanup_v2(float *output, float *output_per_tensor, float *ret, float *ret_per_tensor, bool per_tensor, int max_chunks_per_tensor, int norm_type, float alpha, float beta) { __shared__ float vals[512]; if (blockIdx.x == 0) { float val = 0; if (threadIdx.x < 320) val = output[threadIdx.x]; if (norm_type == 0) { float final = reduce_block_into_lanes_max_op(vals, val); if (threadIdx.x == 0) *ret = alpha * (*ret) + beta * final; } else { float final = reduce_block_into_lanes(vals, val); if (threadIdx.x == 0) *ret = sqrt(alpha * (*ret) * (*ret) + beta * final); } } if (per_tensor) { float *output_this_tensor = output_per_tensor + blockIdx.x * max_chunks_per_tensor; if (norm_type == 0) { float val = 0; for (int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x) val = fmaxf(fabsf(val), fabsf(output_this_tensor[i])); float final = reduce_block_into_lanes_max_op(vals, val); if (threadIdx.x == 0) ret_per_tensor[blockIdx.x] = alpha * ret_per_tensor[blockIdx.x] + beta * final; } else { float val = 0; for (int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x) val += output_this_tensor[i]; float final = reduce_block_into_lanes(vals, val); if (threadIdx.x == 0) ret_per_tensor[blockIdx.x] = sqrt(alpha * ret_per_tensor[blockIdx.x] * ret_per_tensor[blockIdx.x] + beta * final); } } } std::tuple<at::Tensor, at::Tensor> multi_tensor_l2norm_cuda( int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists, at::optional<bool> per_tensor_python) { bool per_tensor = per_tensor_python.has_value() ? per_tensor_python.value() : false; auto float_options = tensor_lists[0][0].options().dtype(at::kFloat); auto output = at::zeros({320}, float_options); at::Tensor output_per_tensor; at::Tensor ret_per_tensor; int ntensors = tensor_lists[0].size(); int max_chunks_per_tensor = -1; if (per_tensor) { for (int t = 0; t < ntensors; t++) { int max_chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1) / chunk_size; if (max_chunks_this_tensor > max_chunks_per_tensor) max_chunks_per_tensor = max_chunks_this_tensor; } output_per_tensor = at::zeros({ntensors * max_chunks_per_tensor}, float_options); ret_per_tensor = at::empty({ntensors}, float_options); } else { ret_per_tensor = at::empty({0}, float_options); } DISPATCH_FLOAT_AND_HALF( tensor_lists[0][0].scalar_type(), 0, "multi_tensor_l2norm_cuda", multi_tensor_apply<1>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, L2NormFunctor<scalar_t_0>(), output.DATA_PTR<float>(), per_tensor ? output_per_tensor.DATA_PTR<float>() : nullptr, per_tensor, max_chunks_per_tensor);) AT_CUDA_CHECK(cudaGetLastError()); // AT_CUDA_CHECK(cudaDeviceSynchronize()); // This involves one more small kernel launches, but will be negligible end to // end. I could get rid of these by hacking the functor + multi tensor harness // with persistence logic, but keeping it simple for now auto ret = at::empty({1}, output.options()); const at::cuda::OptionalCUDAGuard device_guard(device_of(output)); auto stream = at::cuda::getCurrentCUDAStream(); cleanup<<<per_tensor ? ntensors : 1, 512, 0, stream>>>( output.DATA_PTR<float>(), per_tensor ? output_per_tensor.DATA_PTR<float>() : nullptr, ret.DATA_PTR<float>(), per_tensor ? ret_per_tensor.DATA_PTR<float>() : nullptr, per_tensor, max_chunks_per_tensor); return std::tuple<at::Tensor, at::Tensor>(ret, ret_per_tensor); } // Compute and update grad norm // Here use a per tensor norm, and blend new norm(n) and old norm(gn) by // L-2: gn = sqrt(a * gn^2 + b * n^2) // L-inf: gn = a * gn + b * n void multi_tensor_norm_out_cuda( int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists, at::Tensor out, const float alpha, const float beta, const int norm_type) { auto float_options = tensor_lists[0][0].options().dtype(at::kFloat); TORCH_CHECK(tensor_lists[0][0].device() == noop_flag.device(), "noop flag should be on the same device as tensors"); // we don't need global thus uses empty here auto output = at::empty({320}, float_options); at::Tensor output_per_tensor; at::Tensor ret_per_tensor; int ntensors = tensor_lists[0].size(); int max_chunks_per_tensor = -1; for (int t = 0; t < ntensors; t++) { int max_chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1) / chunk_size; if (max_chunks_this_tensor > max_chunks_per_tensor) max_chunks_per_tensor = max_chunks_this_tensor; } // Although it is single write then read, still need to be zero // Since tailing element also participate cleanup output_per_tensor = at::zeros({ntensors * max_chunks_per_tensor}, float_options); if (norm_type == 0) { DISPATCH_FLOAT_AND_HALF( tensor_lists[0][0].scalar_type(), 0, "multi_tensor_maxnorm_cuda", multi_tensor_apply<1>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, MaxNormFunctor<scalar_t_0>(), output.DATA_PTR<float>(), output_per_tensor.DATA_PTR<float>(), true, max_chunks_per_tensor);) } else { DISPATCH_FLOAT_AND_HALF( tensor_lists[0][0].scalar_type(), 0, "multi_tensor_l2norm_cuda", multi_tensor_apply<1>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, L2NormFunctor<scalar_t_0>(), output.DATA_PTR<float>(), output_per_tensor.DATA_PTR<float>(), true, max_chunks_per_tensor);) } AT_CUDA_CHECK(cudaGetLastError()); // AT_CUDA_CHECK(cudaDeviceSynchronize()); // This involves one more small kernel launches, but will be negligible end to // end. I could get rid of these by hacking the functor + multi tensor harness // with persistence logic, but keeping it simple for now auto ret = at::empty({1}, output.options()); // Adding the following device guard since it happens sometimes that the // tensors are on one device and the cuda stream is on another device which // results in ILLEGAL MEM ACCESS error. const at::cuda::OptionalCUDAGuard device_guard(device_of(output)); auto stream = at::cuda::getCurrentCUDAStream(); cleanup_v2<<<ntensors, 512, 0, stream>>>( output.DATA_PTR<float>(), output_per_tensor.DATA_PTR<float>(), ret.DATA_PTR<float>(), out.DATA_PTR<float>(), true, max_chunks_per_tensor, norm_type, alpha, beta); return; }
43953eeb21ad5c844273b7d1a72841fa4954a122.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "file1.h" #include "file2.h" result_type __device__ file1_func(int x); result_type_dynamic __device__ file2_func(int x); static __global__ void file5_kernel(result_type& r, int x) { // call static_func which is a method that is defined in the // static library that is always out of date r = file1_func(x); result_type_dynamic rd = file2_func(x); } int file5_launch_kernel(int x) { result_type r; hipLaunchKernelGGL(( file5_kernel), dim3(1), dim3(1), 0, 0, r, x); return r.sum; }
43953eeb21ad5c844273b7d1a72841fa4954a122.cu
#include <iostream> #include "file1.h" #include "file2.h" result_type __device__ file1_func(int x); result_type_dynamic __device__ file2_func(int x); static __global__ void file5_kernel(result_type& r, int x) { // call static_func which is a method that is defined in the // static library that is always out of date r = file1_func(x); result_type_dynamic rd = file2_func(x); } int file5_launch_kernel(int x) { result_type r; file5_kernel<<<1, 1>>>(r, x); return r.sum; }
56cf4efaded32b6db2993ee714e814aab0986db7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Ref: // http://www.cc.u-tokyo.ac.jp/support/press/news/VOL12/No3/201005_gpgpu2.pdf #include <stdio.h> #include "common.h" #include "matrix.h" #define KERNEL2 #define BLOCKSIZE 64 #ifdef KERNEL0 // original __global__ void kernel (Matrix a, Matrix b, Matrix c, float alpha, float beta) { int my_y = blockIdx.x * blockDim.x + threadIdx.x; int a_offset = my_y * a.x; int c_offset = my_y * c.x; for (int i = 0; i < c.x; i++) { int c_index = c_offset + i; float acc = 0; for (int j = 0; j < a.x; j++) acc += a.body[a_offset + j] * b.body[i + j * b.x]; c.body[c_index] = alpha * acc + beta * c.body[c_index]; } } #endif #ifdef KERNEL1 // simd __global__ void kernel (Matrix a, Matrix b, Matrix c, float alpha, float beta) { int my_x = blockIdx.x * blockDim.x + threadIdx.x; int my_y = blockIdx.y; int a_offset = my_y * a.x; float acc = 0; for (int i = 0; i < a.x; i++) acc += a.body[a_offset + i] * b.body[my_x + i * b.x]; int c_index = my_y * c.x + my_x; c.body[c_index] = alpha * acc + beta * c.body[c_index]; } #endif #ifdef KERNEL2 // shared memory __global__ void kernel (Matrix a, Matrix b, Matrix c, float alpha, float beta) { int my_x = blockIdx.x * blockDim.x + threadIdx.x; int my_y = blockIdx.y; __shared__ float sb[BLOCKSIZE]; int a_offset = my_y * a.x; float acc = 0; for (int is = 0; is < a.x; is += BLOCKSIZE) { int b_offset = my_x + is * b.x; sb[threadIdx.x] = a.body[a_offset + is]; __syncthreads(); for (int i = 0; i < BLOCKSIZE; i++) acc += sb[i] * b.body[b_offset + i * b.x]; __syncthreads(); } int c_index = my_y * c.x + my_x; c.body[c_index] = alpha * acc + beta * c.body[c_index]; } #endif float run_sgemm (int type, Matrix a, Matrix b, Matrix c, float alpha, float beta) { #ifdef KERNEL0 int block = BLOCKSIZE; int grid = c.y / thread_num; #endif #if defined(KERNEL1) || defined(KERNEL2) int block = BLOCKSIZE; dim3 grid(c.x / block, c.y); #endif Matrix dev_a = matrix_into_device(a, type == TN || type == TT); Matrix dev_b = matrix_into_device(b, type == NT || type == TT); Matrix dev_c = matrix_into_device(c, false); hipEvent_t start, stop; float elapsed_time; HANDLE_ERROR(hipEventCreate(&start)); HANDLE_ERROR(hipEventCreate(&stop)); HANDLE_ERROR(hipEventRecord(start, 0)); hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), 0, 0, dev_a, dev_b, dev_c, alpha, beta); HANDLE_ERROR(hipEventRecord(stop, 0)); HANDLE_ERROR(hipEventSynchronize(stop)); HANDLE_ERROR(hipEventElapsedTime(&elapsed_time, start, stop)); matrix_free_device(dev_a); matrix_free_device(dev_b); matrix_free_device(dev_c); HANDLE_ERROR(hipEventDestroy(start)); HANDLE_ERROR(hipEventDestroy(stop)); return elapsed_time; } void test_sgemm (int size) { Matrix a = random_matrix(size, size); Matrix b = random_matrix(size, size); Matrix c = random_matrix(size, size); float elapsed_time; long double flops; elapsed_time = run_sgemm(TN, a, b, c, 1.F, 1.F); flops = 2L * size * size * size * 1000 / elapsed_time; printf("(%d x %d)\t" "%.1Lf flops\n", size, size, flops); matrix_free(a); matrix_free(b); matrix_free(c); } int main (int argc, char *argv[]) { srand((unsigned)time(NULL)); test_sgemm(512); test_sgemm(1024); test_sgemm(2048); test_sgemm(4096); return 0; }
56cf4efaded32b6db2993ee714e814aab0986db7.cu
// Ref: // http://www.cc.u-tokyo.ac.jp/support/press/news/VOL12/No3/201005_gpgpu2.pdf #include <stdio.h> #include "common.h" #include "matrix.h" #define KERNEL2 #define BLOCKSIZE 64 #ifdef KERNEL0 // original __global__ void kernel (Matrix a, Matrix b, Matrix c, float alpha, float beta) { int my_y = blockIdx.x * blockDim.x + threadIdx.x; int a_offset = my_y * a.x; int c_offset = my_y * c.x; for (int i = 0; i < c.x; i++) { int c_index = c_offset + i; float acc = 0; for (int j = 0; j < a.x; j++) acc += a.body[a_offset + j] * b.body[i + j * b.x]; c.body[c_index] = alpha * acc + beta * c.body[c_index]; } } #endif #ifdef KERNEL1 // simd __global__ void kernel (Matrix a, Matrix b, Matrix c, float alpha, float beta) { int my_x = blockIdx.x * blockDim.x + threadIdx.x; int my_y = blockIdx.y; int a_offset = my_y * a.x; float acc = 0; for (int i = 0; i < a.x; i++) acc += a.body[a_offset + i] * b.body[my_x + i * b.x]; int c_index = my_y * c.x + my_x; c.body[c_index] = alpha * acc + beta * c.body[c_index]; } #endif #ifdef KERNEL2 // shared memory __global__ void kernel (Matrix a, Matrix b, Matrix c, float alpha, float beta) { int my_x = blockIdx.x * blockDim.x + threadIdx.x; int my_y = blockIdx.y; __shared__ float sb[BLOCKSIZE]; int a_offset = my_y * a.x; float acc = 0; for (int is = 0; is < a.x; is += BLOCKSIZE) { int b_offset = my_x + is * b.x; sb[threadIdx.x] = a.body[a_offset + is]; __syncthreads(); for (int i = 0; i < BLOCKSIZE; i++) acc += sb[i] * b.body[b_offset + i * b.x]; __syncthreads(); } int c_index = my_y * c.x + my_x; c.body[c_index] = alpha * acc + beta * c.body[c_index]; } #endif float run_sgemm (int type, Matrix a, Matrix b, Matrix c, float alpha, float beta) { #ifdef KERNEL0 int block = BLOCKSIZE; int grid = c.y / thread_num; #endif #if defined(KERNEL1) || defined(KERNEL2) int block = BLOCKSIZE; dim3 grid(c.x / block, c.y); #endif Matrix dev_a = matrix_into_device(a, type == TN || type == TT); Matrix dev_b = matrix_into_device(b, type == NT || type == TT); Matrix dev_c = matrix_into_device(c, false); cudaEvent_t start, stop; float elapsed_time; HANDLE_ERROR(cudaEventCreate(&start)); HANDLE_ERROR(cudaEventCreate(&stop)); HANDLE_ERROR(cudaEventRecord(start, 0)); kernel<<<grid, block>>>(dev_a, dev_b, dev_c, alpha, beta); HANDLE_ERROR(cudaEventRecord(stop, 0)); HANDLE_ERROR(cudaEventSynchronize(stop)); HANDLE_ERROR(cudaEventElapsedTime(&elapsed_time, start, stop)); matrix_free_device(dev_a); matrix_free_device(dev_b); matrix_free_device(dev_c); HANDLE_ERROR(cudaEventDestroy(start)); HANDLE_ERROR(cudaEventDestroy(stop)); return elapsed_time; } void test_sgemm (int size) { Matrix a = random_matrix(size, size); Matrix b = random_matrix(size, size); Matrix c = random_matrix(size, size); float elapsed_time; long double flops; elapsed_time = run_sgemm(TN, a, b, c, 1.F, 1.F); flops = 2L * size * size * size * 1000 / elapsed_time; printf("(%d x %d)\t" "%.1Lf flops\n", size, size, flops); matrix_free(a); matrix_free(b); matrix_free(c); } int main (int argc, char *argv[]) { srand((unsigned)time(NULL)); test_sgemm(512); test_sgemm(1024); test_sgemm(2048); test_sgemm(4096); return 0; }
7661fc385cfb4c153941f3c355276de7fc1d33af.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include "helper_cuda.h" void initArray(double* ip,int size){ time_t t; srand((unsigned )time(&t)); for(int i=0;i<size;i++){ ip[i]=(float)(rand()&0xffff)/1000.0f; } } void checkResult(double *a, double *b, const int size){ double eps = 1e-8; int flag = 1; for(int i = 0; i<size; ++i){ if(abs(a[i] - b[i]) > eps){ flag = 0; break; } } if(flag){ printf("Check result success!\n"); } else{ printf("Check result fail!\n"); } } void addArraysOnCPU(double *a, double *b, double *res, const int size){ for(int i=0; i<size; i+=4){ res[i] = a[i] + b[i]; res[i+1] = a[i+1] + b[i+1]; res[i+2] = a[i+2] + b[i+2]; res[i+3] = a[i+3] + b[i+3]; } } __global__ void addArraysOnGPU(double *a, double *b, double *res){ int id = blockIdx.x * blockDim.x + threadIdx.x; res[id] = a[id] + b[id]; } int main(int argc, char **argv){ int dev = 0; hipSetDevice(dev); int n = (1<<24); printf("Vector size: %d\n", n); int nByte = sizeof(double) * n; double *a_h = (double*)malloc(nByte); double *b_h = (double*)malloc(nByte); double *res_h = (double*)malloc(nByte); double *res_fromGPU_h = (double*)malloc(nByte); memset(res_h, 0, nByte); memset(res_fromGPU_h, 0, nByte); double *a_d, *b_d, *res_d; CHECK(hipMalloc((double**)&a_d, nByte)); CHECK(hipMalloc((double**)&b_d, nByte)); CHECK(hipMalloc((double**)&res_d, nByte)); initArray(a_h, n); initArray(b_h, n); CHECK(hipMemcpy(a_d, a_h, nByte, hipMemcpyHostToDevice)); CHECK(hipMemcpy(b_d, b_h, nByte, hipMemcpyHostToDevice)); dim3 block(1<<10); dim3 grid((n-1)/block.x+1); double gpuStart = cpuSecond(); hipLaunchKernelGGL(( addArraysOnGPU), dim3(grid), dim3(block), 0, 0, a_d, b_d, res_d); CHECK(hipDeviceSynchronize()); double gpuEnd = cpuSecond(); printf("Execution configuration <<<%d, %d>>>\n", grid.x, block.x); CHECK(hipMemcpy(res_fromGPU_h, res_d, nByte, hipMemcpyDeviceToHost)); double cpuStart = cpuSecond(); addArraysOnCPU(a_h, b_h, res_h, n); double cpuEnd = cpuSecond(); checkResult(res_h, res_fromGPU_h, n); printf("GPU time elapsed: %f\n", gpuEnd - gpuStart); printf("CPU time elapsed: %f\n", cpuEnd - cpuStart); hipFree(a_d); hipFree(b_d); hipFree(res_d); free(a_h); free(b_h); free(res_h); free(res_fromGPU_h); return 0; }
7661fc385cfb4c153941f3c355276de7fc1d33af.cu
#include <stdio.h> #include <cuda_runtime.h> #include "helper_cuda.h" void initArray(double* ip,int size){ time_t t; srand((unsigned )time(&t)); for(int i=0;i<size;i++){ ip[i]=(float)(rand()&0xffff)/1000.0f; } } void checkResult(double *a, double *b, const int size){ double eps = 1e-8; int flag = 1; for(int i = 0; i<size; ++i){ if(abs(a[i] - b[i]) > eps){ flag = 0; break; } } if(flag){ printf("Check result success!\n"); } else{ printf("Check result fail!\n"); } } void addArraysOnCPU(double *a, double *b, double *res, const int size){ for(int i=0; i<size; i+=4){ res[i] = a[i] + b[i]; res[i+1] = a[i+1] + b[i+1]; res[i+2] = a[i+2] + b[i+2]; res[i+3] = a[i+3] + b[i+3]; } } __global__ void addArraysOnGPU(double *a, double *b, double *res){ int id = blockIdx.x * blockDim.x + threadIdx.x; res[id] = a[id] + b[id]; } int main(int argc, char **argv){ int dev = 0; cudaSetDevice(dev); int n = (1<<24); printf("Vector size: %d\n", n); int nByte = sizeof(double) * n; double *a_h = (double*)malloc(nByte); double *b_h = (double*)malloc(nByte); double *res_h = (double*)malloc(nByte); double *res_fromGPU_h = (double*)malloc(nByte); memset(res_h, 0, nByte); memset(res_fromGPU_h, 0, nByte); double *a_d, *b_d, *res_d; CHECK(cudaMalloc((double**)&a_d, nByte)); CHECK(cudaMalloc((double**)&b_d, nByte)); CHECK(cudaMalloc((double**)&res_d, nByte)); initArray(a_h, n); initArray(b_h, n); CHECK(cudaMemcpy(a_d, a_h, nByte, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(b_d, b_h, nByte, cudaMemcpyHostToDevice)); dim3 block(1<<10); dim3 grid((n-1)/block.x+1); double gpuStart = cpuSecond(); addArraysOnGPU<<<grid, block>>>(a_d, b_d, res_d); CHECK(cudaDeviceSynchronize()); double gpuEnd = cpuSecond(); printf("Execution configuration <<<%d, %d>>>\n", grid.x, block.x); CHECK(cudaMemcpy(res_fromGPU_h, res_d, nByte, cudaMemcpyDeviceToHost)); double cpuStart = cpuSecond(); addArraysOnCPU(a_h, b_h, res_h, n); double cpuEnd = cpuSecond(); checkResult(res_h, res_fromGPU_h, n); printf("GPU time elapsed: %f\n", gpuEnd - gpuStart); printf("CPU time elapsed: %f\n", cpuEnd - cpuStart); cudaFree(a_d); cudaFree(b_d); cudaFree(res_d); free(a_h); free(b_h); free(res_h); free(res_fromGPU_h); return 0; }
85c7ba64e7c0fdbd81b391443c76109facf1ec4d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* A CUDA program that demonstrates how to compute a stereo disparity map using * SIMD SAD (Sum of Absolute Difference) intrinsics */ /* * The program's performance is dominated by * the computation on the execution engine (EE) while memory copies * between Host and Device using the copy engine (CE) are significantly * less time consuming. * * This version uses a user allocated stream and asynchronous memory * copy operations (hipMemcpyAsync()). Cuda kernel invocations on the * stream are also asynchronous. hipStreamSynchronize() is used to * synchronize with both the copy and kernel executions. Host pinned * memory is not used because the copy operations are not a significant * element of performance. * * The program depends on two input files containing the image * representations for the left and right stereo images * (stereo.im0.640x533.ppm and stereo.im1.640x533.ppm) * which must be in the directory with the executable. * */ #include <errno.h> #include <math.h> #include <sched.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/mman.h> #include <sys/types.h> #include <unistd.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> extern "C" { #include "gpusync.h" } #include "sd_kernel.cuh" // Relative path to images static const char fname0[] = "../Samples/Copy/StereoDisparity/data/stereo.im0.640x533.ppm"; static const char fname1[] = "../Samples/Copy/StereoDisparity/data/stereo.im1.640x533.ppm"; // Holds per-thread state for this algorithm. typedef struct { hipStream_t stream; // Host Memory unsigned int *h_odata; unsigned char *h_img0; unsigned char *h_img1; // Device memory unsigned int *d_odata; unsigned int *d_img0; unsigned int *d_img1; // Kernel execution parameters unsigned int w, h; dim3 numThreads; dim3 numBlocks; unsigned int numData; unsigned int memSize; hipTextureObject_t texture_right; hipTextureObject_t texture_left; // Search parameters int minDisp; int maxDisp; } ThreadContext; // Used for work-in-progress migration of this task to one that doesn't rely on // global state. ThreadContext *g; int iDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); } // Override helper_image.h inline bool loadPPM4ub(const char *file, unsigned char **data, unsigned int *w, unsigned int *h) { unsigned char *idata = 0; unsigned int channels; if (!__loadPPM(file, &idata, w, h, &channels)) { free(idata); return false; } // pad 4th component int size = *w * *h; // keep the original pointer unsigned char *idata_orig = idata; checkCudaErrors(hipHostMalloc(data, sizeof(unsigned char) * size * 4)); unsigned char *ptr = *data; for (int i = 0; i < size; i++) { *ptr++ = *idata++; *ptr++ = *idata++; *ptr++ = *idata++; *ptr++ = 0; } free(idata_orig); return true; } extern "C" void init(int sync_level) { switch (sync_level) { case 0: hipSetDeviceFlags(hipDeviceScheduleSpin); break; case 1: hipSetDeviceFlags(hipDeviceScheduleYield); break; case 2: hipSetDeviceFlags(hipDeviceScheduleBlockingSync); break; default: fprintf(stderr, "Unknown sync level: %d\n", sync_level); break; } g = (ThreadContext*) malloc(sizeof(ThreadContext)); if (!g) { printf("Failed to allocate Thread Context.\n"); exit(1); } g->minDisp = -16; g->maxDisp = 0; // Follow convention and initialize CUDA/GPU // used here to invoke initialization of GPU locking hipFree(0); // Pin code if(!mlockall(MCL_CURRENT | MCL_FUTURE)) { fprintf(stderr, "Failed to lock code pages.\n"); exit(EXIT_FAILURE); } hipSetDevice(0); hipStreamCreate(&(g->stream)); } extern "C" void mallocCPU(int numElements) { // Load image data // functions allocate memory for the images on host side // initialize pointers to NULL to request lib call to allocate as needed // PPM images are loaded into 4 byte/pixel memory (RGBX) g->h_img0 = NULL; g->h_img1 = NULL; if (!loadPPM4ub(fname0, &(g->h_img0), &(g->w), &(g->h))) { fprintf(stderr, "Failed to load <%s>\n", fname0); exit(-1); } if (!loadPPM4ub(fname1, &(g->h_img1), &(g->w), &(g->h))) { fprintf(stderr, "Failed to load <%s>\n", fname1); exit(-1); } // set up parameters used in the rest of program g->numThreads = dim3(blockSize_x, blockSize_y, 1); g->numBlocks = dim3(iDivUp(g->w, g->numThreads.x), iDivUp(g->h, g->numThreads.y)); g->numData = g->w * g->h; g->memSize = sizeof(int) * g->numData; // allocate memory for the result on host side checkCudaErrors(hipHostMalloc(&(g->h_odata), g->memSize)); } extern "C" void mallocGPU(int unused) { hipResourceDesc left_resource, right_resource; hipTextureDesc texture_desc; hipChannelFormatDesc desc = hipCreateChannelDesc<unsigned int>(); // allocate device memory for inputs and result checkCudaErrors(hipMalloc(&(g->d_odata), g->memSize)); checkCudaErrors(hipMalloc(&(g->d_img0), g->memSize)); checkCudaErrors(hipMalloc(&(g->d_img1), g->memSize)); // Initialize texture objects. memset(&left_resource, 0, sizeof(left_resource)); left_resource.resType = hipResourceTypePitch2D; left_resource.res.pitch2D.width = g->w; left_resource.res.pitch2D.height = g->h; left_resource.res.pitch2D.desc = desc; left_resource.res.pitch2D.pitchInBytes = g->w * 4; // The only difference between the left and right textures is the image memcpy(&right_resource, &left_resource, sizeof(left_resource)); left_resource.res.pitch2D.devPtr = g->d_img0; right_resource.res.pitch2D.devPtr = g->d_img1; texture_desc.addressMode[0] = hipAddressModeClamp; texture_desc.addressMode[1] = hipAddressModeClamp; texture_desc.filterMode = hipFilterModePoint; texture_desc.readMode = hipReadModeElementType; checkCudaErrors(hipCreateTextureObject(&(g->texture_left), &left_resource, &texture_desc, NULL)); checkCudaErrors(hipCreateTextureObject(&(g->texture_right), &right_resource, &texture_desc, NULL)); } extern "C" void copyin(int unused) { // copy host memory with images to device checkCudaErrors(hipMemcpyAsync(g->d_img0, g->h_img0, g->memSize, hipMemcpyHostToDevice, g->stream)); checkCudaErrors(hipMemcpyAsync(g->d_img1, g->h_img1, g->memSize, hipMemcpyHostToDevice, g->stream)); // copy host memory that was set to zero to initialize device output checkCudaErrors(hipMemcpyAsync(g->d_odata, g->h_odata, g->memSize, hipMemcpyHostToDevice, g->stream)); hipStreamSynchronize(g->stream); } extern "C" void exec(int unused) { hipLaunchKernelGGL(( stereoDisparityKernel), dim3(g->numBlocks), dim3(g->numThreads), 0, g->stream, g->d_img0, g->d_img1, g->d_odata, g->w, g->h, g->minDisp, g->maxDisp, g->texture_left, g->texture_right); hipStreamSynchronize(g->stream); getLastCudaError("Kernel execution failed"); } extern "C" void copyout() { checkCudaErrors(hipMemcpyAsync(g->h_odata, g->d_odata, g->memSize, hipMemcpyDeviceToHost, g->stream)); hipStreamSynchronize(g->stream); } extern "C" void freeGPU() { checkCudaErrors(hipFree(g->d_odata)); checkCudaErrors(hipFree(g->d_img0)); checkCudaErrors(hipFree(g->d_img1)); } extern "C" void freeCPU() { hipHostFree(g->h_odata); hipHostFree(g->h_img0); hipHostFree(g->h_img1); } extern "C" void finish() { hipStreamSynchronize(g->stream); hipDestroyTextureObject(g->texture_right); hipDestroyTextureObject(g->texture_left); hipStreamDestroy(g->stream); checkCudaErrors(hipDeviceReset()); }
85c7ba64e7c0fdbd81b391443c76109facf1ec4d.cu
/* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* A CUDA program that demonstrates how to compute a stereo disparity map using * SIMD SAD (Sum of Absolute Difference) intrinsics */ /* * The program's performance is dominated by * the computation on the execution engine (EE) while memory copies * between Host and Device using the copy engine (CE) are significantly * less time consuming. * * This version uses a user allocated stream and asynchronous memory * copy operations (cudaMemcpyAsync()). Cuda kernel invocations on the * stream are also asynchronous. cudaStreamSynchronize() is used to * synchronize with both the copy and kernel executions. Host pinned * memory is not used because the copy operations are not a significant * element of performance. * * The program depends on two input files containing the image * representations for the left and right stereo images * (stereo.im0.640x533.ppm and stereo.im1.640x533.ppm) * which must be in the directory with the executable. * */ #include <errno.h> #include <math.h> #include <sched.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/mman.h> #include <sys/types.h> #include <unistd.h> #include <cuda_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> extern "C" { #include "gpusync.h" } #include "sd_kernel.cuh" // Relative path to images static const char fname0[] = "../Samples/Copy/StereoDisparity/data/stereo.im0.640x533.ppm"; static const char fname1[] = "../Samples/Copy/StereoDisparity/data/stereo.im1.640x533.ppm"; // Holds per-thread state for this algorithm. typedef struct { cudaStream_t stream; // Host Memory unsigned int *h_odata; unsigned char *h_img0; unsigned char *h_img1; // Device memory unsigned int *d_odata; unsigned int *d_img0; unsigned int *d_img1; // Kernel execution parameters unsigned int w, h; dim3 numThreads; dim3 numBlocks; unsigned int numData; unsigned int memSize; cudaTextureObject_t texture_right; cudaTextureObject_t texture_left; // Search parameters int minDisp; int maxDisp; } ThreadContext; // Used for work-in-progress migration of this task to one that doesn't rely on // global state. ThreadContext *g; int iDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); } // Override helper_image.h inline bool loadPPM4ub(const char *file, unsigned char **data, unsigned int *w, unsigned int *h) { unsigned char *idata = 0; unsigned int channels; if (!__loadPPM(file, &idata, w, h, &channels)) { free(idata); return false; } // pad 4th component int size = *w * *h; // keep the original pointer unsigned char *idata_orig = idata; checkCudaErrors(cudaMallocHost(data, sizeof(unsigned char) * size * 4)); unsigned char *ptr = *data; for (int i = 0; i < size; i++) { *ptr++ = *idata++; *ptr++ = *idata++; *ptr++ = *idata++; *ptr++ = 0; } free(idata_orig); return true; } extern "C" void init(int sync_level) { switch (sync_level) { case 0: cudaSetDeviceFlags(cudaDeviceScheduleSpin); break; case 1: cudaSetDeviceFlags(cudaDeviceScheduleYield); break; case 2: cudaSetDeviceFlags(cudaDeviceBlockingSync); break; default: fprintf(stderr, "Unknown sync level: %d\n", sync_level); break; } g = (ThreadContext*) malloc(sizeof(ThreadContext)); if (!g) { printf("Failed to allocate Thread Context.\n"); exit(1); } g->minDisp = -16; g->maxDisp = 0; // Follow convention and initialize CUDA/GPU // used here to invoke initialization of GPU locking cudaFree(0); // Pin code if(!mlockall(MCL_CURRENT | MCL_FUTURE)) { fprintf(stderr, "Failed to lock code pages.\n"); exit(EXIT_FAILURE); } cudaSetDevice(0); cudaStreamCreate(&(g->stream)); } extern "C" void mallocCPU(int numElements) { // Load image data // functions allocate memory for the images on host side // initialize pointers to NULL to request lib call to allocate as needed // PPM images are loaded into 4 byte/pixel memory (RGBX) g->h_img0 = NULL; g->h_img1 = NULL; if (!loadPPM4ub(fname0, &(g->h_img0), &(g->w), &(g->h))) { fprintf(stderr, "Failed to load <%s>\n", fname0); exit(-1); } if (!loadPPM4ub(fname1, &(g->h_img1), &(g->w), &(g->h))) { fprintf(stderr, "Failed to load <%s>\n", fname1); exit(-1); } // set up parameters used in the rest of program g->numThreads = dim3(blockSize_x, blockSize_y, 1); g->numBlocks = dim3(iDivUp(g->w, g->numThreads.x), iDivUp(g->h, g->numThreads.y)); g->numData = g->w * g->h; g->memSize = sizeof(int) * g->numData; // allocate memory for the result on host side checkCudaErrors(cudaMallocHost(&(g->h_odata), g->memSize)); } extern "C" void mallocGPU(int unused) { cudaResourceDesc left_resource, right_resource; cudaTextureDesc texture_desc; cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned int>(); // allocate device memory for inputs and result checkCudaErrors(cudaMalloc(&(g->d_odata), g->memSize)); checkCudaErrors(cudaMalloc(&(g->d_img0), g->memSize)); checkCudaErrors(cudaMalloc(&(g->d_img1), g->memSize)); // Initialize texture objects. memset(&left_resource, 0, sizeof(left_resource)); left_resource.resType = cudaResourceTypePitch2D; left_resource.res.pitch2D.width = g->w; left_resource.res.pitch2D.height = g->h; left_resource.res.pitch2D.desc = desc; left_resource.res.pitch2D.pitchInBytes = g->w * 4; // The only difference between the left and right textures is the image memcpy(&right_resource, &left_resource, sizeof(left_resource)); left_resource.res.pitch2D.devPtr = g->d_img0; right_resource.res.pitch2D.devPtr = g->d_img1; texture_desc.addressMode[0] = cudaAddressModeClamp; texture_desc.addressMode[1] = cudaAddressModeClamp; texture_desc.filterMode = cudaFilterModePoint; texture_desc.readMode = cudaReadModeElementType; checkCudaErrors(cudaCreateTextureObject(&(g->texture_left), &left_resource, &texture_desc, NULL)); checkCudaErrors(cudaCreateTextureObject(&(g->texture_right), &right_resource, &texture_desc, NULL)); } extern "C" void copyin(int unused) { // copy host memory with images to device checkCudaErrors(cudaMemcpyAsync(g->d_img0, g->h_img0, g->memSize, cudaMemcpyHostToDevice, g->stream)); checkCudaErrors(cudaMemcpyAsync(g->d_img1, g->h_img1, g->memSize, cudaMemcpyHostToDevice, g->stream)); // copy host memory that was set to zero to initialize device output checkCudaErrors(cudaMemcpyAsync(g->d_odata, g->h_odata, g->memSize, cudaMemcpyHostToDevice, g->stream)); cudaStreamSynchronize(g->stream); } extern "C" void exec(int unused) { stereoDisparityKernel<<<g->numBlocks, g->numThreads, 0, g->stream>>>( g->d_img0, g->d_img1, g->d_odata, g->w, g->h, g->minDisp, g->maxDisp, g->texture_left, g->texture_right); cudaStreamSynchronize(g->stream); getLastCudaError("Kernel execution failed"); } extern "C" void copyout() { checkCudaErrors(cudaMemcpyAsync(g->h_odata, g->d_odata, g->memSize, cudaMemcpyDeviceToHost, g->stream)); cudaStreamSynchronize(g->stream); } extern "C" void freeGPU() { checkCudaErrors(cudaFree(g->d_odata)); checkCudaErrors(cudaFree(g->d_img0)); checkCudaErrors(cudaFree(g->d_img1)); } extern "C" void freeCPU() { cudaFreeHost(g->h_odata); cudaFreeHost(g->h_img0); cudaFreeHost(g->h_img1); } extern "C" void finish() { cudaStreamSynchronize(g->stream); cudaDestroyTextureObject(g->texture_right); cudaDestroyTextureObject(g->texture_left); cudaStreamDestroy(g->stream); checkCudaErrors(cudaDeviceReset()); }
b1ccccbbeec99e8adb3f14265446e34fe5d9ed2b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_plus_2_bot; int xdim0_update_halo_kernel2_zvel_plus_2_bot_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_plus_2_bot; int ydim0_update_halo_kernel2_zvel_plus_2_bot_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_plus_2_bot; int xdim1_update_halo_kernel2_zvel_plus_2_bot_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_plus_2_bot; int ydim1_update_halo_kernel2_zvel_plus_2_bot_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_zvel_plus_2_bot*(y)+xdim0_update_halo_kernel2_zvel_plus_2_bot*ydim0_update_halo_kernel2_zvel_plus_2_bot*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_zvel_plus_2_bot*(y)+xdim1_update_halo_kernel2_zvel_plus_2_bot*ydim1_update_halo_kernel2_zvel_plus_2_bot*(z)) //user function __device__ inline void update_halo_kernel2_zvel_plus_2_bot_gpu(double *zvel0, double *zvel1, const int* fields) { if(fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0,0,0)] = zvel0[OPS_ACC0(0,2,0)]; if(fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0,0,0)] = zvel1[OPS_ACC1(0,2,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_plus_2_bot( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel2_zvel_plus_2_bot + idx_z * 1*1 * xdim0_update_halo_kernel2_zvel_plus_2_bot * ydim0_update_halo_kernel2_zvel_plus_2_bot; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel2_zvel_plus_2_bot + idx_z * 1*1 * xdim1_update_halo_kernel2_zvel_plus_2_bot * ydim1_update_halo_kernel2_zvel_plus_2_bot; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_plus_2_bot_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel2_zvel_plus_2_bot(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel2_zvel_plus_2_bot_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,49)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(49,"update_halo_kernel2_zvel_plus_2_bot"); OPS_kernels[49].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_2_bot_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_2_bot_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_2_bot_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_2_bot_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel2_zvel_plus_2_bot, &xdim0, sizeof(int) ); xdim0_update_halo_kernel2_zvel_plus_2_bot_h = xdim0; hipMemcpyToSymbol( ydim0_update_halo_kernel2_zvel_plus_2_bot, &ydim0, sizeof(int) ); ydim0_update_halo_kernel2_zvel_plus_2_bot_h = ydim0; hipMemcpyToSymbol( xdim1_update_halo_kernel2_zvel_plus_2_bot, &xdim1, sizeof(int) ); xdim1_update_halo_kernel2_zvel_plus_2_bot_h = xdim1; hipMemcpyToSymbol( ydim1_update_halo_kernel2_zvel_plus_2_bot, &ydim1, sizeof(int) ); ydim1_update_halo_kernel2_zvel_plus_2_bot_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[49].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_plus_2_bot), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[49].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[49].mpi_time += t2-t1; OPS_kernels[49].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[49].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel2_zvel_plus_2_bot(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 49; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 49; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel2_zvel_plus_2_bot_execute; if (OPS_diags > 1) { ops_timing_realloc(49,"update_halo_kernel2_zvel_plus_2_bot"); } ops_enqueue_kernel(desc); } #endif
b1ccccbbeec99e8adb3f14265446e34fe5d9ed2b.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_plus_2_bot; int xdim0_update_halo_kernel2_zvel_plus_2_bot_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_plus_2_bot; int ydim0_update_halo_kernel2_zvel_plus_2_bot_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_plus_2_bot; int xdim1_update_halo_kernel2_zvel_plus_2_bot_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_plus_2_bot; int ydim1_update_halo_kernel2_zvel_plus_2_bot_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_zvel_plus_2_bot*(y)+xdim0_update_halo_kernel2_zvel_plus_2_bot*ydim0_update_halo_kernel2_zvel_plus_2_bot*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_zvel_plus_2_bot*(y)+xdim1_update_halo_kernel2_zvel_plus_2_bot*ydim1_update_halo_kernel2_zvel_plus_2_bot*(z)) //user function __device__ inline void update_halo_kernel2_zvel_plus_2_bot_gpu(double *zvel0, double *zvel1, const int* fields) { if(fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0,0,0)] = zvel0[OPS_ACC0(0,2,0)]; if(fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0,0,0)] = zvel1[OPS_ACC1(0,2,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_plus_2_bot( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel2_zvel_plus_2_bot + idx_z * 1*1 * xdim0_update_halo_kernel2_zvel_plus_2_bot * ydim0_update_halo_kernel2_zvel_plus_2_bot; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel2_zvel_plus_2_bot + idx_z * 1*1 * xdim1_update_halo_kernel2_zvel_plus_2_bot * ydim1_update_halo_kernel2_zvel_plus_2_bot; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_plus_2_bot_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel2_zvel_plus_2_bot(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel2_zvel_plus_2_bot_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,49)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(49,"update_halo_kernel2_zvel_plus_2_bot"); OPS_kernels[49].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_2_bot_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_2_bot_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_2_bot_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_2_bot_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel2_zvel_plus_2_bot, &xdim0, sizeof(int) ); xdim0_update_halo_kernel2_zvel_plus_2_bot_h = xdim0; cudaMemcpyToSymbol( ydim0_update_halo_kernel2_zvel_plus_2_bot, &ydim0, sizeof(int) ); ydim0_update_halo_kernel2_zvel_plus_2_bot_h = ydim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel2_zvel_plus_2_bot, &xdim1, sizeof(int) ); xdim1_update_halo_kernel2_zvel_plus_2_bot_h = xdim1; cudaMemcpyToSymbol( ydim1_update_halo_kernel2_zvel_plus_2_bot, &ydim1, sizeof(int) ); ydim1_update_halo_kernel2_zvel_plus_2_bot_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[49].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_update_halo_kernel2_zvel_plus_2_bot<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[49].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[49].mpi_time += t2-t1; OPS_kernels[49].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[49].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel2_zvel_plus_2_bot(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 49; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 49; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel2_zvel_plus_2_bot_execute; if (OPS_diags > 1) { ops_timing_realloc(49,"update_halo_kernel2_zvel_plus_2_bot"); } ops_enqueue_kernel(desc); } #endif
77dc6a5f5af18bb3dd718fc65f1488c4759b7295.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*------------------------------------------------------------------------- * * CUDA functions for Steepest descend in POCS-type algorithms. * * This file will iteratively minimize by stepest descend the total variation * of the input image, with the parameters given, using GPUs. * * CODE by Ander Biguri * --------------------------------------------------------------------------- --------------------------------------------------------------------------- Copyright (c) 2015, University of Bath and CERN- European Organization for Nuclear Research All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------- Contact: [email protected] Codes : https://github.com/CERN/TIGRE --------------------------------------------------------------------------- */ #define MAXTHREADS 1024 #include "POCS_TV.hpp" #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ mexPrintf("ERROR in: %s \n",msg);\ mexErrMsgIdAndTxt("err",hipGetErrorString(__err));\ } \ } while (0) // CUDA kernels //https://stackoverflow.com/questions/21332040/simple-cuda-kernel-optimization/21340927#21340927 __global__ void divideArrayScalar(float* vec,float scalar,const size_t n) { unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x; for(; i<n; i+=gridDim.x*blockDim.x) { vec[i]/=scalar; } } __global__ void multiplyArrayScalar(float* vec,float scalar,const size_t n) { unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x; for(; i<n; i+=gridDim.x*blockDim.x) { vec[i]*=scalar; } } __global__ void substractArrays(float* vec,float* vec2,const size_t n) { unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x; for(; i<n; i+=gridDim.x*blockDim.x) { vec[i]-=vec2[i]; } } __device__ __inline__ void gradient(const float* u, float* grad, long z, long y, long x, long depth, long rows, long cols) { unsigned long size2d = rows*cols; unsigned long long idx = z * size2d + y * cols + x; float uidx = u[idx]; if ( z - 1 >= 0 && z<depth) { grad[0] = (uidx-u[(z-1)*size2d + y*cols + x]) ; } if ( y - 1 >= 0 && y<rows){ grad[1] = (uidx-u[z*size2d + (y-1)*cols + x]) ; } if ( x - 1 >= 0 && x<cols) { grad[2] = (uidx-u[z*size2d + y*cols + (x-1)]); } } __global__ void gradientTV(const float* f, float* dftv, long depth, long rows, long cols){ unsigned long x = threadIdx.x + blockIdx.x * blockDim.x; unsigned long y = threadIdx.y + blockIdx.y * blockDim.y; unsigned long z = threadIdx.z + blockIdx.z * blockDim.z; unsigned long long idx = z * rows * cols + y * cols + x; if ( x >= cols || y >= rows || z >= depth ) return; float df[3] ={0,0,0}; float dfi[3]={0,0,0}; // dfi== \partial f_{i+1,j,k} float dfj[3]={0,0,0}; float dfk[3]={0,0,0}; gradient(f,df ,z ,y ,x , depth,rows,cols); gradient(f,dfi ,z ,y ,x+1, depth,rows,cols); gradient(f,dfj ,z ,y+1,x , depth,rows,cols); gradient(f,dfk ,z+1,y ,x , depth,rows,cols); float eps=0.00000001; //% avoid division by zero dftv[idx]=(df[0]+df[1]+df[2])/(sqrt(df[0] *df[0] +df[1] *df[1] +df[2] *df[2])+eps) -dfi[2]/(sqrt(dfi[0]*dfi[0]+dfi[1]*dfi[1]+dfi[2]*dfi[2]) +eps) // I wish I coudl precompute this, but if I do then Id need to recompute the gradient. -dfj[1]/(sqrt(dfj[0]*dfj[0]+dfj[1]*dfj[1]+dfj[2]*dfj[2]) +eps) -dfk[0]/(sqrt(dfk[0]*dfk[0]+dfk[1]*dfk[1]+dfk[2]*dfk[2]) +eps); } __device__ void warpReduce(volatile float *sdata, size_t tid) { sdata[tid] += sdata[tid + 32]; sdata[tid] += sdata[tid + 16]; sdata[tid] += sdata[tid + 8]; sdata[tid] += sdata[tid + 4]; sdata[tid] += sdata[tid + 2]; sdata[tid] += sdata[tid + 1]; } __global__ void reduceNorm2(float *g_idata, float *g_odata, size_t n){ extern __shared__ volatile float sdata[]; //http://stackoverflow.com/a/35133396/1485872 size_t tid = threadIdx.x; size_t i = blockIdx.x*blockDim.x + tid; size_t gridSize = blockDim.x*gridDim.x; float mySum = 0; float value=0; while (i < n) { value=g_idata[i]; //avoid reading twice mySum += value*value; i += gridSize; } sdata[tid] = mySum; __syncthreads(); if (tid < 512) sdata[tid] += sdata[tid + 512]; __syncthreads(); if (tid < 256) sdata[tid] += sdata[tid + 256]; __syncthreads(); if (tid < 128) sdata[tid] += sdata[tid + 128]; __syncthreads(); if (tid < 64) sdata[tid] += sdata[tid + 64]; __syncthreads(); #if (__CUDA_ARCH__ >= 300) if ( tid < 32 ) { mySum = sdata[tid] + sdata[tid + 32]; for (int offset = warpSize/2; offset > 0; offset /= 2) { mySum += __shfl_down(mySum, offset); } } #else if (tid < 32) { warpReduce(sdata, tid); mySum = sdata[0]; } #endif if (tid == 0) g_odata[blockIdx.x] = mySum; } __global__ void reduceSum(float *g_idata, float *g_odata, size_t n){ extern __shared__ volatile float sdata[]; //http://stackoverflow.com/a/35133396/1485872 size_t tid = threadIdx.x; size_t i = blockIdx.x*blockDim.x + tid; size_t gridSize = blockDim.x*gridDim.x; float mySum = 0; // float value=0; while (i < n) { mySum += g_idata[i]; i += gridSize; } sdata[tid] = mySum; __syncthreads(); if (tid < 512) sdata[tid] += sdata[tid + 512]; __syncthreads(); if (tid < 256) sdata[tid] += sdata[tid + 256]; __syncthreads(); if (tid < 128) sdata[tid] += sdata[tid + 128]; __syncthreads(); if (tid < 64) sdata[tid] += sdata[tid + 64]; __syncthreads(); #if (__CUDA_ARCH__ >= 300) if ( tid < 32 ) { mySum = sdata[tid] + sdata[tid + 32]; for (int offset = warpSize/2; offset > 0; offset /= 2) { mySum += __shfl_down(mySum, offset); } } #else if (tid < 32) { warpReduce(sdata, tid); mySum = sdata[0]; } #endif if (tid == 0) g_odata[blockIdx.x] = mySum; } // main function void pocs_tv(const float* img,float* dst,float alpha,const long* image_size, int maxIter){ size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ; size_t mem_size = sizeof(float) * total_pixels; float *d_image, *d_dimgTV,*d_norm2aux,*d_norm2; // memory for image hipMalloc(&d_image, mem_size); hipMemcpy(d_image, img, mem_size, hipMemcpyHostToDevice); cudaCheckErrors("Memory Malloc and Memset: SRC"); // memory for df hipMalloc(&d_dimgTV, mem_size); cudaCheckErrors("Memory Malloc and Memset: TV"); hipMalloc(&d_norm2, mem_size); cudaCheckErrors("Memory Malloc and Memset: TV"); // memory for L2norm auxiliar hipMalloc(&d_norm2aux, sizeof(float)*(total_pixels + MAXTHREADS - 1) / MAXTHREADS); cudaCheckErrors("Memory Malloc and Memset: NORMAux"); // For the gradient dim3 blockGrad(10, 10, 10); dim3 gridGrad((image_size[0]+blockGrad.x-1)/blockGrad.x, (image_size[1]+blockGrad.y-1)/blockGrad.y, (image_size[2]+blockGrad.z-1)/blockGrad.z); // For the reduction float sumnorm2; for(unsigned int i=0;i<maxIter;i++){ // Compute the gradient of the TV norm hipLaunchKernelGGL(( gradientTV), dim3(gridGrad), dim3(blockGrad), 0, 0, d_image,d_dimgTV,image_size[2], image_size[1],image_size[0]); cudaCheckErrors("Gradient"); // hipMemcpy(dst, d_dimgTV, mem_size, hipMemcpyDeviceToHost); hipMemcpy(d_norm2, d_dimgTV, mem_size, hipMemcpyDeviceToDevice); // Compute the L2 norm of the gradint. For that, reduction is used. //REDUCE size_t dimblockRed = MAXTHREADS; size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS; reduceNorm2 << <dimgridRed, dimblockRed, MAXTHREADS*sizeof(float) >> >(d_norm2, d_norm2aux, total_pixels); cudaCheckErrors("reduce1"); if (dimgridRed > 1) { reduceSum << <1, dimblockRed, MAXTHREADS*sizeof(float) >> >(d_norm2aux, d_norm2, dimgridRed); cudaCheckErrors("reduce2"); hipMemcpy(&sumnorm2, d_norm2, sizeof(float), hipMemcpyDeviceToHost); cudaCheckErrors("hipMemcpy"); } else { hipMemcpy(&sumnorm2, d_norm2aux, sizeof(float), hipMemcpyDeviceToHost); cudaCheckErrors("hipMemcpy"); } //mexPrintf("%f ",sqrt(sumnorm2)); //NOMRALIZE //in a Tesla, maximum blocks =15 SM * 4 blocks/SM hipLaunchKernelGGL(( divideArrayScalar) , dim3(60),dim3(MAXTHREADS), 0, 0, d_dimgTV,sqrt(sumnorm2),total_pixels); //MULTIPLY HYPERPARAMETER hipLaunchKernelGGL(( multiplyArrayScalar), dim3(60),dim3(MAXTHREADS), 0, 0, d_dimgTV,alpha, total_pixels); //SUBSTRACT GRADIENT hipLaunchKernelGGL(( substractArrays) , dim3(60),dim3(MAXTHREADS), 0, 0, d_image,d_dimgTV, total_pixels); sumnorm2=0; } cudaCheckErrors("TV minimization"); hipMemcpy(dst, d_image, mem_size, hipMemcpyDeviceToHost); cudaCheckErrors("Copy result back"); hipFree(d_image); hipFree(d_norm2aux); hipFree(d_dimgTV); hipFree(d_norm2); cudaCheckErrors("Memory free"); }
77dc6a5f5af18bb3dd718fc65f1488c4759b7295.cu
/*------------------------------------------------------------------------- * * CUDA functions for Steepest descend in POCS-type algorithms. * * This file will iteratively minimize by stepest descend the total variation * of the input image, with the parameters given, using GPUs. * * CODE by Ander Biguri * --------------------------------------------------------------------------- --------------------------------------------------------------------------- Copyright (c) 2015, University of Bath and CERN- European Organization for Nuclear Research All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------- Contact: [email protected] Codes : https://github.com/CERN/TIGRE --------------------------------------------------------------------------- */ #define MAXTHREADS 1024 #include "POCS_TV.hpp" #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ mexPrintf("ERROR in: %s \n",msg);\ mexErrMsgIdAndTxt("err",cudaGetErrorString(__err));\ } \ } while (0) // CUDA kernels //https://stackoverflow.com/questions/21332040/simple-cuda-kernel-optimization/21340927#21340927 __global__ void divideArrayScalar(float* vec,float scalar,const size_t n) { unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x; for(; i<n; i+=gridDim.x*blockDim.x) { vec[i]/=scalar; } } __global__ void multiplyArrayScalar(float* vec,float scalar,const size_t n) { unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x; for(; i<n; i+=gridDim.x*blockDim.x) { vec[i]*=scalar; } } __global__ void substractArrays(float* vec,float* vec2,const size_t n) { unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x; for(; i<n; i+=gridDim.x*blockDim.x) { vec[i]-=vec2[i]; } } __device__ __inline__ void gradient(const float* u, float* grad, long z, long y, long x, long depth, long rows, long cols) { unsigned long size2d = rows*cols; unsigned long long idx = z * size2d + y * cols + x; float uidx = u[idx]; if ( z - 1 >= 0 && z<depth) { grad[0] = (uidx-u[(z-1)*size2d + y*cols + x]) ; } if ( y - 1 >= 0 && y<rows){ grad[1] = (uidx-u[z*size2d + (y-1)*cols + x]) ; } if ( x - 1 >= 0 && x<cols) { grad[2] = (uidx-u[z*size2d + y*cols + (x-1)]); } } __global__ void gradientTV(const float* f, float* dftv, long depth, long rows, long cols){ unsigned long x = threadIdx.x + blockIdx.x * blockDim.x; unsigned long y = threadIdx.y + blockIdx.y * blockDim.y; unsigned long z = threadIdx.z + blockIdx.z * blockDim.z; unsigned long long idx = z * rows * cols + y * cols + x; if ( x >= cols || y >= rows || z >= depth ) return; float df[3] ={0,0,0}; float dfi[3]={0,0,0}; // dfi== \partial f_{i+1,j,k} float dfj[3]={0,0,0}; float dfk[3]={0,0,0}; gradient(f,df ,z ,y ,x , depth,rows,cols); gradient(f,dfi ,z ,y ,x+1, depth,rows,cols); gradient(f,dfj ,z ,y+1,x , depth,rows,cols); gradient(f,dfk ,z+1,y ,x , depth,rows,cols); float eps=0.00000001; //% avoid division by zero dftv[idx]=(df[0]+df[1]+df[2])/(sqrt(df[0] *df[0] +df[1] *df[1] +df[2] *df[2])+eps) -dfi[2]/(sqrt(dfi[0]*dfi[0]+dfi[1]*dfi[1]+dfi[2]*dfi[2]) +eps) // I wish I coudl precompute this, but if I do then Id need to recompute the gradient. -dfj[1]/(sqrt(dfj[0]*dfj[0]+dfj[1]*dfj[1]+dfj[2]*dfj[2]) +eps) -dfk[0]/(sqrt(dfk[0]*dfk[0]+dfk[1]*dfk[1]+dfk[2]*dfk[2]) +eps); } __device__ void warpReduce(volatile float *sdata, size_t tid) { sdata[tid] += sdata[tid + 32]; sdata[tid] += sdata[tid + 16]; sdata[tid] += sdata[tid + 8]; sdata[tid] += sdata[tid + 4]; sdata[tid] += sdata[tid + 2]; sdata[tid] += sdata[tid + 1]; } __global__ void reduceNorm2(float *g_idata, float *g_odata, size_t n){ extern __shared__ volatile float sdata[]; //http://stackoverflow.com/a/35133396/1485872 size_t tid = threadIdx.x; size_t i = blockIdx.x*blockDim.x + tid; size_t gridSize = blockDim.x*gridDim.x; float mySum = 0; float value=0; while (i < n) { value=g_idata[i]; //avoid reading twice mySum += value*value; i += gridSize; } sdata[tid] = mySum; __syncthreads(); if (tid < 512) sdata[tid] += sdata[tid + 512]; __syncthreads(); if (tid < 256) sdata[tid] += sdata[tid + 256]; __syncthreads(); if (tid < 128) sdata[tid] += sdata[tid + 128]; __syncthreads(); if (tid < 64) sdata[tid] += sdata[tid + 64]; __syncthreads(); #if (__CUDA_ARCH__ >= 300) if ( tid < 32 ) { mySum = sdata[tid] + sdata[tid + 32]; for (int offset = warpSize/2; offset > 0; offset /= 2) { mySum += __shfl_down(mySum, offset); } } #else if (tid < 32) { warpReduce(sdata, tid); mySum = sdata[0]; } #endif if (tid == 0) g_odata[blockIdx.x] = mySum; } __global__ void reduceSum(float *g_idata, float *g_odata, size_t n){ extern __shared__ volatile float sdata[]; //http://stackoverflow.com/a/35133396/1485872 size_t tid = threadIdx.x; size_t i = blockIdx.x*blockDim.x + tid; size_t gridSize = blockDim.x*gridDim.x; float mySum = 0; // float value=0; while (i < n) { mySum += g_idata[i]; i += gridSize; } sdata[tid] = mySum; __syncthreads(); if (tid < 512) sdata[tid] += sdata[tid + 512]; __syncthreads(); if (tid < 256) sdata[tid] += sdata[tid + 256]; __syncthreads(); if (tid < 128) sdata[tid] += sdata[tid + 128]; __syncthreads(); if (tid < 64) sdata[tid] += sdata[tid + 64]; __syncthreads(); #if (__CUDA_ARCH__ >= 300) if ( tid < 32 ) { mySum = sdata[tid] + sdata[tid + 32]; for (int offset = warpSize/2; offset > 0; offset /= 2) { mySum += __shfl_down(mySum, offset); } } #else if (tid < 32) { warpReduce(sdata, tid); mySum = sdata[0]; } #endif if (tid == 0) g_odata[blockIdx.x] = mySum; } // main function void pocs_tv(const float* img,float* dst,float alpha,const long* image_size, int maxIter){ size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ; size_t mem_size = sizeof(float) * total_pixels; float *d_image, *d_dimgTV,*d_norm2aux,*d_norm2; // memory for image cudaMalloc(&d_image, mem_size); cudaMemcpy(d_image, img, mem_size, cudaMemcpyHostToDevice); cudaCheckErrors("Memory Malloc and Memset: SRC"); // memory for df cudaMalloc(&d_dimgTV, mem_size); cudaCheckErrors("Memory Malloc and Memset: TV"); cudaMalloc(&d_norm2, mem_size); cudaCheckErrors("Memory Malloc and Memset: TV"); // memory for L2norm auxiliar cudaMalloc(&d_norm2aux, sizeof(float)*(total_pixels + MAXTHREADS - 1) / MAXTHREADS); cudaCheckErrors("Memory Malloc and Memset: NORMAux"); // For the gradient dim3 blockGrad(10, 10, 10); dim3 gridGrad((image_size[0]+blockGrad.x-1)/blockGrad.x, (image_size[1]+blockGrad.y-1)/blockGrad.y, (image_size[2]+blockGrad.z-1)/blockGrad.z); // For the reduction float sumnorm2; for(unsigned int i=0;i<maxIter;i++){ // Compute the gradient of the TV norm gradientTV<<<gridGrad, blockGrad>>>(d_image,d_dimgTV,image_size[2], image_size[1],image_size[0]); cudaCheckErrors("Gradient"); // cudaMemcpy(dst, d_dimgTV, mem_size, cudaMemcpyDeviceToHost); cudaMemcpy(d_norm2, d_dimgTV, mem_size, cudaMemcpyDeviceToDevice); // Compute the L2 norm of the gradint. For that, reduction is used. //REDUCE size_t dimblockRed = MAXTHREADS; size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS; reduceNorm2 << <dimgridRed, dimblockRed, MAXTHREADS*sizeof(float) >> >(d_norm2, d_norm2aux, total_pixels); cudaCheckErrors("reduce1"); if (dimgridRed > 1) { reduceSum << <1, dimblockRed, MAXTHREADS*sizeof(float) >> >(d_norm2aux, d_norm2, dimgridRed); cudaCheckErrors("reduce2"); cudaMemcpy(&sumnorm2, d_norm2, sizeof(float), cudaMemcpyDeviceToHost); cudaCheckErrors("cudaMemcpy"); } else { cudaMemcpy(&sumnorm2, d_norm2aux, sizeof(float), cudaMemcpyDeviceToHost); cudaCheckErrors("cudaMemcpy"); } //mexPrintf("%f ",sqrt(sumnorm2)); //NOMRALIZE //in a Tesla, maximum blocks =15 SM * 4 blocks/SM divideArrayScalar <<<60,MAXTHREADS>>>(d_dimgTV,sqrt(sumnorm2),total_pixels); //MULTIPLY HYPERPARAMETER multiplyArrayScalar<<<60,MAXTHREADS>>>(d_dimgTV,alpha, total_pixels); //SUBSTRACT GRADIENT substractArrays <<<60,MAXTHREADS>>>(d_image,d_dimgTV, total_pixels); sumnorm2=0; } cudaCheckErrors("TV minimization"); cudaMemcpy(dst, d_image, mem_size, cudaMemcpyDeviceToHost); cudaCheckErrors("Copy result back"); cudaFree(d_image); cudaFree(d_norm2aux); cudaFree(d_dimgTV); cudaFree(d_norm2); cudaCheckErrors("Memory free"); }
2298b0ed15762440f46d7c64c1c96081beca308d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #define BLOCK_SIZE 16 #define HEADER_SIZE 138 typedef unsigned char BYTE; /** * Structure that represents a BMP image. */ typedef struct { int width; int height; float *data; } BMPImage; typedef struct timeval tval; BYTE g_info[HEADER_SIZE]; // Reference header /** * Reads a BMP 24bpp file and returns a BMPImage structure. * Thanks to https://stackoverflow.com/a/9296467 */ BMPImage readBMP(char *filename) { BMPImage bitmap = { 0 }; int size = 0; BYTE *data = NULL; FILE *file = fopen(filename, "rb"); // Read the header (expected BGR - 24bpp) fread(g_info, sizeof(BYTE), HEADER_SIZE, file); // Get the image width / height from the header bitmap.width = *((int *)&g_info[18]); bitmap.height = *((int *)&g_info[22]); size = *((int *)&g_info[34]); // Read the image data data = (BYTE *)malloc(sizeof(BYTE) * size); fread(data, sizeof(BYTE), size, file); // Convert the pixel values to float bitmap.data = (float *)malloc(sizeof(float) * size); for (int i = 0; i < size; i++) { bitmap.data[i] = (float)data[i]; } fclose(file); free(data); return bitmap; } /** * Writes a BMP file in grayscale given its image data and a filename. */ void writeBMPGrayscale(int width, int height, float *image, char *filename) { FILE *file = NULL; file = fopen(filename, "wb"); // Write the reference header fwrite(g_info, sizeof(BYTE), HEADER_SIZE, file); // Unwrap the 8-bit grayscale into a 24bpp (for simplicity) for (int h = 0; h < height; h++) { int offset = h * width; for (int w = 0; w < width; w++) { BYTE pixel = (BYTE)((image[offset + w] > 255.0f) ? 255.0f : (image[offset + w] < 0.0f) ? 0.0f : image[offset + w]); // Repeat the same pixel value for BGR fputc(pixel, file); fputc(pixel, file); fputc(pixel, file); } } fclose(file); } /** * Releases a given BMPImage. */ void freeBMP(BMPImage bitmap) { free(bitmap.data); } /** * Checks if there has been any CUDA error. The method will automatically print * some information and exit the program when an error is found. */ void checkCUDAError() { hipError_t hipError_t = hipGetLastError(); if(hipError_t != hipSuccess) { printf("CUDA Error: Returned %d: %s\n", hipError_t, hipGetErrorString(hipError_t)); exit(-1); } } /** * Calculates the elapsed time between two time intervals (in milliseconds). */ double get_elapsed(tval t0, tval t1) { return (double)(t1.tv_sec - t0.tv_sec) * 1000.0L + (double)(t1.tv_usec - t0.tv_usec) / 1000.0L; } /** * Stores the result image and prints a message. */ void store_result(int index, double elapsed_cpu, double elapsed_gpu, int width, int height, float *image) { char path[255]; sprintf(path, "images/hw3_result_%d.bmp", index); writeBMPGrayscale(width, height, image, path); printf("Step #%d Completed - Result stored in \"%s\".\n", index, path); printf("Elapsed CPU: %fms / ", elapsed_cpu); if (elapsed_gpu == 0) { printf("[GPU version not available]\n"); } else { printf("Elapsed GPU: %fms\n", elapsed_gpu); } } /** * Converts a given 24bpp image into 8bpp grayscale using the CPU. */ void cpu_grayscale(int width, int height, float *image, float *image_out) { for (int h = 0; h < height; h++) { int offset_out = h * width; // 1 color per pixel int offset = offset_out * 3; // 3 colors per pixel for (int w = 0; w < width; w++) { float *pixel = &image[offset + w * 3]; // Convert to grayscale following the "luminance" model image_out[offset_out + w] = pixel[0] * 0.0722f + // B pixel[1] * 0.7152f + // G pixel[2] * 0.2126f; // R } } } /** * Converts a given 24bpp image into 8bpp grayscale using the GPU. */ __global__ void gpu_grayscale(int width, int height, float *image, float *image_out) { //////////////// // TO-DO #4.2 ///////////////////////////////////////////// // Implement the GPU version of the grayscale conversion // /////////////////////////////////////////////////////////// } /** * Applies a 3x3 convolution matrix to a pixel using the CPU. */ float cpu_applyFilter(float *image, int stride, float *matrix, int filter_dim) { float pixel = 0.0f; for (int h = 0; h < filter_dim; h++) { int offset = h * stride; int offset_kernel = h * filter_dim; for (int w = 0; w < filter_dim; w++) { pixel += image[offset + w] * matrix[offset_kernel + w]; } } return pixel; } /** * Applies a 3x3 convolution matrix to a pixel using the GPU. */ __device__ float gpu_applyFilter(float *image, int stride, float *matrix, int filter_dim) { //////////////// // TO-DO #5.2 //////////////////////////////////////////////// // Implement the GPU version of cpu_applyFilter() // // // // Does it make sense to have a separate gpu_applyFilter()? // ////////////////////////////////////////////////////////////// return 0.0f; } /** * Applies a Gaussian 3x3 filter to a given image using the CPU. */ void cpu_gaussian(int width, int height, float *image, float *image_out) { float gaussian[9] = { 1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f, 2.0f / 16.0f, 4.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f }; for (int h = 0; h < (height - 2); h++) { int offset_t = h * width; int offset = (h + 1) * width; for (int w = 0; w < (width - 2); w++) { image_out[offset + (w + 1)] = cpu_applyFilter(&image[offset_t + w], width, gaussian, 3); } } } /** * Applies a Gaussian 3x3 filter to a given image using the GPU. */ __global__ void gpu_gaussian(int width, int height, float *image, float *image_out) { float gaussian[9] = { 1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f, 2.0f / 16.0f, 4.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f }; int index_x = blockIdx.x * blockDim.x + threadIdx.x; int index_y = blockIdx.y * blockDim.y + threadIdx.y; if (index_x < (width - 2) && index_y < (height - 2)) { int offset_t = index_y * width + index_x; int offset = (index_y + 1) * width + (index_x + 1); image_out[offset] = gpu_applyFilter(&image[offset_t], width, gaussian, 3); } } /** * Calculates the gradient of an image using a Sobel filter on the CPU. */ void cpu_sobel(int width, int height, float *image, float *image_out) { float sobel_x[9] = { 1.0f, 0.0f, -1.0f, 2.0f, 0.0f, -2.0f, 1.0f, 0.0f, -1.0f }; float sobel_y[9] = { 1.0f, 2.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, -2.0f, -1.0f }; for (int h = 0; h < (height - 2); h++) { int offset_t = h * width; int offset = (h + 1) * width; for (int w = 0; w < (width - 2); w++) { float gx = cpu_applyFilter(&image[offset_t + w], width, sobel_x, 3); float gy = cpu_applyFilter(&image[offset_t + w], width, sobel_y, 3); // Note: The output can be negative or exceed the max. color value // of 255. We compensate this afterwards while storing the file. image_out[offset + (w + 1)] = sqrtf(gx * gx + gy * gy); } } } /** * Calculates the gradient of an image using a Sobel filter on the GPU. */ __global__ void gpu_sobel(int width, int height, float *image, float *image_out) { //////////////// // TO-DO #6.1 ///////////////////////////////////// // Implement the GPU version of the Sobel filter // /////////////////////////////////////////////////// } int main(int argc, char **argv) { BMPImage bitmap = { 0 }; float *d_bitmap = { 0 }; float *image_out[2] = { 0 }; float *d_image_out[2] = { 0 }; int image_size = 0; tval t[2] = { 0 }; double elapsed[2] = { 0 }; dim3 grid(1); // The grid will be defined later dim3 block(BLOCK_SIZE, BLOCK_SIZE); // The block size will not change // Make sure the filename is provided if (argc != 2) { fprintf(stderr, "Error: The filename is missing!\n"); return -1; } // Read the input image and update the grid dimension bitmap = readBMP(argv[1]); image_size = bitmap.width * bitmap.height; grid = dim3(((bitmap.width + (BLOCK_SIZE - 1)) / BLOCK_SIZE), ((bitmap.height + (BLOCK_SIZE - 1)) / BLOCK_SIZE)); printf("Image opened (width=%d height=%d).\n", bitmap.width, bitmap.height); // Allocate the intermediate image buffers for each step for (int i = 0; i < 2; i++) { image_out[i] = (float *)calloc(image_size, sizeof(float)); hipMalloc(&d_image_out[i], image_size * sizeof(float)); hipMemset(d_image_out[i], 0, image_size * sizeof(float)); } hipMalloc(&d_bitmap, image_size * sizeof(float) * 3); hipMemcpy(d_bitmap, bitmap.data, image_size * sizeof(float) * 3, hipMemcpyHostToDevice); // Step 1: Convert to grayscale { // Launch the CPU version gettimeofday(&t[0], NULL); cpu_grayscale(bitmap.width, bitmap.height, bitmap.data, image_out[0]); gettimeofday(&t[1], NULL); elapsed[0] = get_elapsed(t[0], t[1]); // Launch the GPU version gettimeofday(&t[0], NULL); // gpu_grayscale<<<grid, block>>>(bitmap.width, bitmap.height, // d_bitmap, d_image_out[0]); // hipMemcpy(image_out[0], d_image_out[0], // image_size * sizeof(float), hipMemcpyDeviceToHost); gettimeofday(&t[1], NULL); elapsed[1] = get_elapsed(t[0], t[1]); // Store the result image in grayscale store_result(1, elapsed[0], elapsed[1], bitmap.width, bitmap.height, image_out[0]); } // Step 2: Apply a 3x3 Gaussian filter { // Launch the CPU version gettimeofday(&t[0], NULL); cpu_gaussian(bitmap.width, bitmap.height, image_out[0], image_out[1]); gettimeofday(&t[1], NULL); elapsed[0] = get_elapsed(t[0], t[1]); // Launch the GPU version gettimeofday(&t[0], NULL); // gpu_gaussian<<<grid, block>>>(bitmap.width, bitmap.height, // d_image_out[0], d_image_out[1]); // hipMemcpy(image_out[1], d_image_out[1], // image_size * sizeof(float), hipMemcpyDeviceToHost); gettimeofday(&t[1], NULL); elapsed[1] = get_elapsed(t[0], t[1]); // Store the result image with the Gaussian filter applied store_result(2, elapsed[0], elapsed[1], bitmap.width, bitmap.height, image_out[1]); } // Step 3: Apply a Sobel filter { // Launch the CPU version gettimeofday(&t[0], NULL); cpu_sobel(bitmap.width, bitmap.height, image_out[1], image_out[0]); gettimeofday(&t[1], NULL); elapsed[0] = get_elapsed(t[0], t[1]); // Launch the GPU version gettimeofday(&t[0], NULL); // gpu_sobel<<<grid, block>>>(bitmap.width, bitmap.height, // d_image_out[1], d_image_out[0]); // hipMemcpy(image_out[0], d_image_out[0], // image_size * sizeof(float), hipMemcpyDeviceToHost); gettimeofday(&t[1], NULL); elapsed[1] = get_elapsed(t[0], t[1]); // Store the final result image with the Sobel filter applied store_result(3, elapsed[0], elapsed[1], bitmap.width, bitmap.height, image_out[0]); } // Release the allocated memory for (int i = 0; i < 2; i++) { free(image_out[i]); hipFree(d_image_out[i]); } freeBMP(bitmap); hipFree(d_bitmap); return 0; }
2298b0ed15762440f46d7c64c1c96081beca308d.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #define BLOCK_SIZE 16 #define HEADER_SIZE 138 typedef unsigned char BYTE; /** * Structure that represents a BMP image. */ typedef struct { int width; int height; float *data; } BMPImage; typedef struct timeval tval; BYTE g_info[HEADER_SIZE]; // Reference header /** * Reads a BMP 24bpp file and returns a BMPImage structure. * Thanks to https://stackoverflow.com/a/9296467 */ BMPImage readBMP(char *filename) { BMPImage bitmap = { 0 }; int size = 0; BYTE *data = NULL; FILE *file = fopen(filename, "rb"); // Read the header (expected BGR - 24bpp) fread(g_info, sizeof(BYTE), HEADER_SIZE, file); // Get the image width / height from the header bitmap.width = *((int *)&g_info[18]); bitmap.height = *((int *)&g_info[22]); size = *((int *)&g_info[34]); // Read the image data data = (BYTE *)malloc(sizeof(BYTE) * size); fread(data, sizeof(BYTE), size, file); // Convert the pixel values to float bitmap.data = (float *)malloc(sizeof(float) * size); for (int i = 0; i < size; i++) { bitmap.data[i] = (float)data[i]; } fclose(file); free(data); return bitmap; } /** * Writes a BMP file in grayscale given its image data and a filename. */ void writeBMPGrayscale(int width, int height, float *image, char *filename) { FILE *file = NULL; file = fopen(filename, "wb"); // Write the reference header fwrite(g_info, sizeof(BYTE), HEADER_SIZE, file); // Unwrap the 8-bit grayscale into a 24bpp (for simplicity) for (int h = 0; h < height; h++) { int offset = h * width; for (int w = 0; w < width; w++) { BYTE pixel = (BYTE)((image[offset + w] > 255.0f) ? 255.0f : (image[offset + w] < 0.0f) ? 0.0f : image[offset + w]); // Repeat the same pixel value for BGR fputc(pixel, file); fputc(pixel, file); fputc(pixel, file); } } fclose(file); } /** * Releases a given BMPImage. */ void freeBMP(BMPImage bitmap) { free(bitmap.data); } /** * Checks if there has been any CUDA error. The method will automatically print * some information and exit the program when an error is found. */ void checkCUDAError() { cudaError_t cudaError = cudaGetLastError(); if(cudaError != cudaSuccess) { printf("CUDA Error: Returned %d: %s\n", cudaError, cudaGetErrorString(cudaError)); exit(-1); } } /** * Calculates the elapsed time between two time intervals (in milliseconds). */ double get_elapsed(tval t0, tval t1) { return (double)(t1.tv_sec - t0.tv_sec) * 1000.0L + (double)(t1.tv_usec - t0.tv_usec) / 1000.0L; } /** * Stores the result image and prints a message. */ void store_result(int index, double elapsed_cpu, double elapsed_gpu, int width, int height, float *image) { char path[255]; sprintf(path, "images/hw3_result_%d.bmp", index); writeBMPGrayscale(width, height, image, path); printf("Step #%d Completed - Result stored in \"%s\".\n", index, path); printf("Elapsed CPU: %fms / ", elapsed_cpu); if (elapsed_gpu == 0) { printf("[GPU version not available]\n"); } else { printf("Elapsed GPU: %fms\n", elapsed_gpu); } } /** * Converts a given 24bpp image into 8bpp grayscale using the CPU. */ void cpu_grayscale(int width, int height, float *image, float *image_out) { for (int h = 0; h < height; h++) { int offset_out = h * width; // 1 color per pixel int offset = offset_out * 3; // 3 colors per pixel for (int w = 0; w < width; w++) { float *pixel = &image[offset + w * 3]; // Convert to grayscale following the "luminance" model image_out[offset_out + w] = pixel[0] * 0.0722f + // B pixel[1] * 0.7152f + // G pixel[2] * 0.2126f; // R } } } /** * Converts a given 24bpp image into 8bpp grayscale using the GPU. */ __global__ void gpu_grayscale(int width, int height, float *image, float *image_out) { //////////////// // TO-DO #4.2 ///////////////////////////////////////////// // Implement the GPU version of the grayscale conversion // /////////////////////////////////////////////////////////// } /** * Applies a 3x3 convolution matrix to a pixel using the CPU. */ float cpu_applyFilter(float *image, int stride, float *matrix, int filter_dim) { float pixel = 0.0f; for (int h = 0; h < filter_dim; h++) { int offset = h * stride; int offset_kernel = h * filter_dim; for (int w = 0; w < filter_dim; w++) { pixel += image[offset + w] * matrix[offset_kernel + w]; } } return pixel; } /** * Applies a 3x3 convolution matrix to a pixel using the GPU. */ __device__ float gpu_applyFilter(float *image, int stride, float *matrix, int filter_dim) { //////////////// // TO-DO #5.2 //////////////////////////////////////////////// // Implement the GPU version of cpu_applyFilter() // // // // Does it make sense to have a separate gpu_applyFilter()? // ////////////////////////////////////////////////////////////// return 0.0f; } /** * Applies a Gaussian 3x3 filter to a given image using the CPU. */ void cpu_gaussian(int width, int height, float *image, float *image_out) { float gaussian[9] = { 1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f, 2.0f / 16.0f, 4.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f }; for (int h = 0; h < (height - 2); h++) { int offset_t = h * width; int offset = (h + 1) * width; for (int w = 0; w < (width - 2); w++) { image_out[offset + (w + 1)] = cpu_applyFilter(&image[offset_t + w], width, gaussian, 3); } } } /** * Applies a Gaussian 3x3 filter to a given image using the GPU. */ __global__ void gpu_gaussian(int width, int height, float *image, float *image_out) { float gaussian[9] = { 1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f, 2.0f / 16.0f, 4.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f }; int index_x = blockIdx.x * blockDim.x + threadIdx.x; int index_y = blockIdx.y * blockDim.y + threadIdx.y; if (index_x < (width - 2) && index_y < (height - 2)) { int offset_t = index_y * width + index_x; int offset = (index_y + 1) * width + (index_x + 1); image_out[offset] = gpu_applyFilter(&image[offset_t], width, gaussian, 3); } } /** * Calculates the gradient of an image using a Sobel filter on the CPU. */ void cpu_sobel(int width, int height, float *image, float *image_out) { float sobel_x[9] = { 1.0f, 0.0f, -1.0f, 2.0f, 0.0f, -2.0f, 1.0f, 0.0f, -1.0f }; float sobel_y[9] = { 1.0f, 2.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, -2.0f, -1.0f }; for (int h = 0; h < (height - 2); h++) { int offset_t = h * width; int offset = (h + 1) * width; for (int w = 0; w < (width - 2); w++) { float gx = cpu_applyFilter(&image[offset_t + w], width, sobel_x, 3); float gy = cpu_applyFilter(&image[offset_t + w], width, sobel_y, 3); // Note: The output can be negative or exceed the max. color value // of 255. We compensate this afterwards while storing the file. image_out[offset + (w + 1)] = sqrtf(gx * gx + gy * gy); } } } /** * Calculates the gradient of an image using a Sobel filter on the GPU. */ __global__ void gpu_sobel(int width, int height, float *image, float *image_out) { //////////////// // TO-DO #6.1 ///////////////////////////////////// // Implement the GPU version of the Sobel filter // /////////////////////////////////////////////////// } int main(int argc, char **argv) { BMPImage bitmap = { 0 }; float *d_bitmap = { 0 }; float *image_out[2] = { 0 }; float *d_image_out[2] = { 0 }; int image_size = 0; tval t[2] = { 0 }; double elapsed[2] = { 0 }; dim3 grid(1); // The grid will be defined later dim3 block(BLOCK_SIZE, BLOCK_SIZE); // The block size will not change // Make sure the filename is provided if (argc != 2) { fprintf(stderr, "Error: The filename is missing!\n"); return -1; } // Read the input image and update the grid dimension bitmap = readBMP(argv[1]); image_size = bitmap.width * bitmap.height; grid = dim3(((bitmap.width + (BLOCK_SIZE - 1)) / BLOCK_SIZE), ((bitmap.height + (BLOCK_SIZE - 1)) / BLOCK_SIZE)); printf("Image opened (width=%d height=%d).\n", bitmap.width, bitmap.height); // Allocate the intermediate image buffers for each step for (int i = 0; i < 2; i++) { image_out[i] = (float *)calloc(image_size, sizeof(float)); cudaMalloc(&d_image_out[i], image_size * sizeof(float)); cudaMemset(d_image_out[i], 0, image_size * sizeof(float)); } cudaMalloc(&d_bitmap, image_size * sizeof(float) * 3); cudaMemcpy(d_bitmap, bitmap.data, image_size * sizeof(float) * 3, cudaMemcpyHostToDevice); // Step 1: Convert to grayscale { // Launch the CPU version gettimeofday(&t[0], NULL); cpu_grayscale(bitmap.width, bitmap.height, bitmap.data, image_out[0]); gettimeofday(&t[1], NULL); elapsed[0] = get_elapsed(t[0], t[1]); // Launch the GPU version gettimeofday(&t[0], NULL); // gpu_grayscale<<<grid, block>>>(bitmap.width, bitmap.height, // d_bitmap, d_image_out[0]); // cudaMemcpy(image_out[0], d_image_out[0], // image_size * sizeof(float), cudaMemcpyDeviceToHost); gettimeofday(&t[1], NULL); elapsed[1] = get_elapsed(t[0], t[1]); // Store the result image in grayscale store_result(1, elapsed[0], elapsed[1], bitmap.width, bitmap.height, image_out[0]); } // Step 2: Apply a 3x3 Gaussian filter { // Launch the CPU version gettimeofday(&t[0], NULL); cpu_gaussian(bitmap.width, bitmap.height, image_out[0], image_out[1]); gettimeofday(&t[1], NULL); elapsed[0] = get_elapsed(t[0], t[1]); // Launch the GPU version gettimeofday(&t[0], NULL); // gpu_gaussian<<<grid, block>>>(bitmap.width, bitmap.height, // d_image_out[0], d_image_out[1]); // cudaMemcpy(image_out[1], d_image_out[1], // image_size * sizeof(float), cudaMemcpyDeviceToHost); gettimeofday(&t[1], NULL); elapsed[1] = get_elapsed(t[0], t[1]); // Store the result image with the Gaussian filter applied store_result(2, elapsed[0], elapsed[1], bitmap.width, bitmap.height, image_out[1]); } // Step 3: Apply a Sobel filter { // Launch the CPU version gettimeofday(&t[0], NULL); cpu_sobel(bitmap.width, bitmap.height, image_out[1], image_out[0]); gettimeofday(&t[1], NULL); elapsed[0] = get_elapsed(t[0], t[1]); // Launch the GPU version gettimeofday(&t[0], NULL); // gpu_sobel<<<grid, block>>>(bitmap.width, bitmap.height, // d_image_out[1], d_image_out[0]); // cudaMemcpy(image_out[0], d_image_out[0], // image_size * sizeof(float), cudaMemcpyDeviceToHost); gettimeofday(&t[1], NULL); elapsed[1] = get_elapsed(t[0], t[1]); // Store the final result image with the Sobel filter applied store_result(3, elapsed[0], elapsed[1], bitmap.width, bitmap.height, image_out[0]); } // Release the allocated memory for (int i = 0; i < 2; i++) { free(image_out[i]); cudaFree(d_image_out[i]); } freeBMP(bitmap); cudaFree(d_bitmap); return 0; }
558b945d7044a846e970692fd4c5a66fc3a24ff4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define INTERVALS 1000000 // Max number of threads per block #define THREADS 512 #define BLOCKS 64 double calculatePiCPU(); // Synchronous error checking call. Enable with nvcc -DDEBUG __global__ void integrateOptimised(int *n, float *g_sum) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int tx = threadIdx.x; // Shared memory to hold the sum for each block __shared__ float s_sum[THREADS]; float sum = 0.0f; float step = 1.0f / (float)*n; for (int i = idx + 1; i <= *n; i += blockDim.x * BLOCKS) { float x = step * ((float)i - 0.5f); sum += 4.0f / (1.0f+ x*x); } s_sum[tx] = sum * step; // Wait for all threads to catch up __syncthreads(); // For each block, do sum using shared memory for (int i = blockDim.x / 2; i > 0; i >>= 1) { if (tx < i) { s_sum[tx] += s_sum[tx + i]; } __syncthreads(); } // Write results to global memory g_sum[idx] = s_sum[tx]; }
558b945d7044a846e970692fd4c5a66fc3a24ff4.cu
#include "includes.h" #define INTERVALS 1000000 // Max number of threads per block #define THREADS 512 #define BLOCKS 64 double calculatePiCPU(); // Synchronous error checking call. Enable with nvcc -DDEBUG __global__ void integrateOptimised(int *n, float *g_sum) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int tx = threadIdx.x; // Shared memory to hold the sum for each block __shared__ float s_sum[THREADS]; float sum = 0.0f; float step = 1.0f / (float)*n; for (int i = idx + 1; i <= *n; i += blockDim.x * BLOCKS) { float x = step * ((float)i - 0.5f); sum += 4.0f / (1.0f+ x*x); } s_sum[tx] = sum * step; // Wait for all threads to catch up __syncthreads(); // For each block, do sum using shared memory for (int i = blockDim.x / 2; i > 0; i >>= 1) { if (tx < i) { s_sum[tx] += s_sum[tx + i]; } __syncthreads(); } // Write results to global memory g_sum[idx] = s_sum[tx]; }
e712a3af223d7ed1f270eb1edbfeadd2ef72e11a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <time.h> struct Startup{ int seed = time(nullptr); int random_range = 100; int threads_per_block = 1024; int data_size = 10000; int sample_size = 16; char* output_directory = "."; bool print = false; bool print_result_only = false; bool save = false; bool benchmark = false; bool single = false; } startup; /* Found on the stack overflow: https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api Throws errors if cuda command doesn't return Success */ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } struct DataSet{ float* values; int size; }; inline int sizeOfDataSet(DataSet data){ return sizeof(float)*data.size; } DataSet generateRandomDataSet(int size){ DataSet data; data.size = size; data.values = (float*)malloc(sizeof(float)*data.size); for (int i = 0; i < data.size; i++) data.values[i] = (float)(rand()%startup.random_range); return data; } bool CompareDataSet(DataSet d1, DataSet d2){ if (d1.size != d2.size) {printf("Datasets are not equal size\n"); return false;}; for (int i = 0; i < d1.size; i++) if (d1.values[i] != d2.values[i]){ printf("Dataset is different at %dth element. D1: %f, D2: %f", i, d1.values[i], d2.values[i] ); return false; } printf("D1 and D2 are equal!"); return true; } /*A cache in-efficent algorithm for computing SMA. Loads everything form global memory*/ __global__ void DeviceCalculateSMA_Global(float* input, int input_size, float* result, int result_size, int sample_size){ int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < result_size){ float sum = 0; for (int i = 0; i < sample_size; i++) sum += input[idx+i]; sum /= sample_size; result[idx] = sum; } } /*A cache efficent algorithm for SMA. Each block loads range of data used by each of its threads into shared memory. Then computes the moving average sum. This algorithm should becomes more efficent as the threads per block increases or as the sample size increases */ __global__ void DeviceCalculateSMA_Shared(float* input, int input_size, float* result, int result_size, int sample_size){ int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < input_size){ /*Shared memory. Size passed in with kernel parameters*/ extern __shared__ float cache[]; int cachedDataSize = sample_size + blockDim.x; /*Copy the data that will be used by the block into shared memory using all threads in the block.*/ for (int i = 0; i < cachedDataSize/blockDim.x+1; i++){ int cacheId = threadIdx.x+ i*blockDim.x; if (cacheId < cachedDataSize && cacheId+blockDim.x *blockIdx.x < input_size) cache[cacheId] = input[cacheId+blockDim.x *blockIdx.x]; } __syncthreads(); /*compute the sum using shared memory*/ float sum = 0; for (int i = 0; i < sample_size; i++){ if(i + threadIdx.x < cachedDataSize && i + idx < input_size) sum += cache[i+threadIdx.x]; } sum /= sample_size; /*store in global memory*/ if (idx < result_size) result[idx] = sum; } } DataSet CalculateSMA(DataSet input, int sample_size, bool usesharedmemory){ if(sample_size == 1) { printf("Warning! Samplesize is 1. Result will equal input dataset.\n"); } if(input.size < 1) { printf("Cannot compute a moving average with an empty dataset.\n"); exit(-1); } if(sample_size < 1) { printf("Cannot compute a moving average with a samplesize of 0.\n"); exit(-1); } if(sample_size > input.size) { printf("Error! Sample Size is larger than dataset. Please make samplesize a value less than or equal to dataset size.\n"); exit(-1); } int result_size = input.size-sample_size+1; DataSet host_result = {(float*)malloc(sizeof(float)*(result_size)), result_size}; float* device_input, *device_result; gpuErrchk(hipMalloc((void **)&device_input, sizeOfDataSet(input) )); gpuErrchk(hipMalloc((void **)&device_result, sizeOfDataSet(host_result) )); gpuErrchk(hipMemcpy(device_input, input.values, sizeOfDataSet(input) , hipMemcpyHostToDevice)); int threads_needed = host_result.size; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); if (usesharedmemory){ int shared_memory_allocation_size = sizeof(float)*(startup.threads_per_block+sample_size); /*If shared memory too small, then optimized algorithm cannot be run. Exit*/ hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); if (shared_memory_allocation_size > prop.sharedMemPerBlock) {printf("Cannot use shared Memory Algorithm. Not enough shared memory for dataset!"); exit(-1);} hipEventRecord(start); hipLaunchKernelGGL(( DeviceCalculateSMA_Shared), dim3(threads_needed/ startup.threads_per_block + 1), dim3(startup.threads_per_block), shared_memory_allocation_size, 0, device_input, input.size, device_result, host_result.size, sample_size); hipEventRecord(stop); }else{ hipEventRecord(start); hipLaunchKernelGGL(( DeviceCalculateSMA_Global), dim3(threads_needed/ startup.threads_per_block + 1), dim3(startup.threads_per_block), 0, 0, device_input, input.size, device_result, host_result.size, sample_size); hipEventRecord(stop); } hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); if (startup.single){ if (usesharedmemory) printf("Shared Memory: "); else printf("Global Memory: "); printf("Kernel executed in %f milliseconds\n", milliseconds); } if (startup.benchmark) { if (usesharedmemory) printf("%.6g,", milliseconds); else printf("%.6g\n", milliseconds); } gpuErrchk(hipGetLastError()); gpuErrchk(hipMemcpy(host_result.values, device_result, sizeOfDataSet(host_result), hipMemcpyDeviceToHost)); gpuErrchk(hipFree(device_result)); gpuErrchk(hipFree(device_input)); return host_result; } void printDataSet(DataSet data){ for (int i = 0; i < data.size; i++) printf("%.6g, ", data.values[i]); printf("\n"); } void saveDataSetCSV(DataSet data, char* fileName){ char fileNameBuffer[256]; snprintf(fileNameBuffer, sizeof fileNameBuffer, "%s/%s%s", startup.output_directory, fileName, ".csv"); FILE* fp = fopen( fileNameBuffer, "w"); if (fp == nullptr) printf("Could not log to file\n"); else { for (int i = 0; i < data.size; i++){ fprintf(fp, "%.6g,", data.values[i]); } fprintf(fp, "\n"); } fclose(fp); } void AlgorithmsPerformanceBenchmark(){ for (int i = 4; i <= 268435456; i*=2) { int j = i/2; hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); if (j > prop.sharedMemPerBlock/sizeof(float)) j = prop.sharedMemPerBlock/sizeof(float) - startup.threads_per_block; printf("%d,%d,", i, j); DataSet data = generateRandomDataSet(i); DataSet shared = CalculateSMA(data, j, true); DataSet global = CalculateSMA(data, j, false); free(data.values); free(shared.values); free(global.values); } } int main(int argc, char** argv){ for (int i = 0; i < argc; i++){ //if (strcmp(argv[i], "--help")==0) {printf("%s", help); exit(-1); } if (strcmp(argv[i], "--random_range")==0 && i+1 < argc) startup.random_range = atoi(argv[i+1]); if (strcmp(argv[i], "--seed")==0 && i+1 < argc) startup.seed = atoi(argv[i+1]); if (strcmp(argv[i], "--block_threads")==0 && i+1 < argc) startup.threads_per_block = atoi(argv[i+1]); if (strcmp(argv[i], "--sample_size")==0 && i+1 < argc) startup.sample_size = atoi(argv[i+1]); if (strcmp(argv[i], "--data_size")==0 && i+1 < argc) startup.data_size = atoi(argv[i+1]); if (strcmp(argv[i], "--save")==0) startup.save = true; if (strcmp(argv[i], "--print")==0) startup.print = true; if (strcmp(argv[i], "--print_result")==0) startup.print_result_only = true; if (strcmp(argv[i], "--benchmark")==0) startup.benchmark = true; if (strcmp(argv[i], "--single")==0) startup.single = true; } if (( startup.single || startup.benchmark ) == false) printf("Please select a runtime mode. There are two options --single or --benchmark\n\n\t--benchmark mode will continually increase the set size and sample size and compare the two algorithms.\n\n\t--single mode will apply SMA on a single randomly generated set. By default the dataset will be 10,000 elements with a sample size of 16. These parameters can be changes.\n\n"); if (startup.single && startup.benchmark) { printf("You cannot run both modes at the same time. Please use only --single or only --benchmark"); exit(0); } srand(startup.seed); if (startup.single) { DataSet data = generateRandomDataSet(startup.data_size); if(startup.print) printDataSet(data); if(startup.save) saveDataSetCSV(data, "Input"); int shared_memory_allocation_size = sizeof(float)*(startup.threads_per_block+startup.sample_size); hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); bool useSharedMemory = (shared_memory_allocation_size <= prop.sharedMemPerBlock); DataSet shared = CalculateSMA(data, startup.sample_size, useSharedMemory); if(startup.print || startup.print_result_only) printDataSet(shared); if(startup.save) saveDataSetCSV(shared, "Result"); free(shared.values); free(data.values); } if (startup.benchmark) AlgorithmsPerformanceBenchmark(); }
e712a3af223d7ed1f270eb1edbfeadd2ef72e11a.cu
#include <stdio.h> #include <cuda.h> #include <time.h> struct Startup{ int seed = time(nullptr); int random_range = 100; int threads_per_block = 1024; int data_size = 10000; int sample_size = 16; char* output_directory = "."; bool print = false; bool print_result_only = false; bool save = false; bool benchmark = false; bool single = false; } startup; /* Found on the stack overflow: https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api Throws errors if cuda command doesn't return Success */ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } struct DataSet{ float* values; int size; }; inline int sizeOfDataSet(DataSet data){ return sizeof(float)*data.size; } DataSet generateRandomDataSet(int size){ DataSet data; data.size = size; data.values = (float*)malloc(sizeof(float)*data.size); for (int i = 0; i < data.size; i++) data.values[i] = (float)(rand()%startup.random_range); return data; } bool CompareDataSet(DataSet d1, DataSet d2){ if (d1.size != d2.size) {printf("Datasets are not equal size\n"); return false;}; for (int i = 0; i < d1.size; i++) if (d1.values[i] != d2.values[i]){ printf("Dataset is different at %dth element. D1: %f, D2: %f", i, d1.values[i], d2.values[i] ); return false; } printf("D1 and D2 are equal!"); return true; } /*A cache in-efficent algorithm for computing SMA. Loads everything form global memory*/ __global__ void DeviceCalculateSMA_Global(float* input, int input_size, float* result, int result_size, int sample_size){ int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < result_size){ float sum = 0; for (int i = 0; i < sample_size; i++) sum += input[idx+i]; sum /= sample_size; result[idx] = sum; } } /*A cache efficent algorithm for SMA. Each block loads range of data used by each of its threads into shared memory. Then computes the moving average sum. This algorithm should becomes more efficent as the threads per block increases or as the sample size increases */ __global__ void DeviceCalculateSMA_Shared(float* input, int input_size, float* result, int result_size, int sample_size){ int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < input_size){ /*Shared memory. Size passed in with kernel parameters*/ extern __shared__ float cache[]; int cachedDataSize = sample_size + blockDim.x; /*Copy the data that will be used by the block into shared memory using all threads in the block.*/ for (int i = 0; i < cachedDataSize/blockDim.x+1; i++){ int cacheId = threadIdx.x+ i*blockDim.x; if (cacheId < cachedDataSize && cacheId+blockDim.x *blockIdx.x < input_size) cache[cacheId] = input[cacheId+blockDim.x *blockIdx.x]; } __syncthreads(); /*compute the sum using shared memory*/ float sum = 0; for (int i = 0; i < sample_size; i++){ if(i + threadIdx.x < cachedDataSize && i + idx < input_size) sum += cache[i+threadIdx.x]; } sum /= sample_size; /*store in global memory*/ if (idx < result_size) result[idx] = sum; } } DataSet CalculateSMA(DataSet input, int sample_size, bool usesharedmemory){ if(sample_size == 1) { printf("Warning! Samplesize is 1. Result will equal input dataset.\n"); } if(input.size < 1) { printf("Cannot compute a moving average with an empty dataset.\n"); exit(-1); } if(sample_size < 1) { printf("Cannot compute a moving average with a samplesize of 0.\n"); exit(-1); } if(sample_size > input.size) { printf("Error! Sample Size is larger than dataset. Please make samplesize a value less than or equal to dataset size.\n"); exit(-1); } int result_size = input.size-sample_size+1; DataSet host_result = {(float*)malloc(sizeof(float)*(result_size)), result_size}; float* device_input, *device_result; gpuErrchk(cudaMalloc((void **)&device_input, sizeOfDataSet(input) )); gpuErrchk(cudaMalloc((void **)&device_result, sizeOfDataSet(host_result) )); gpuErrchk(cudaMemcpy(device_input, input.values, sizeOfDataSet(input) , cudaMemcpyHostToDevice)); int threads_needed = host_result.size; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); if (usesharedmemory){ int shared_memory_allocation_size = sizeof(float)*(startup.threads_per_block+sample_size); /*If shared memory too small, then optimized algorithm cannot be run. Exit*/ cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); if (shared_memory_allocation_size > prop.sharedMemPerBlock) {printf("Cannot use shared Memory Algorithm. Not enough shared memory for dataset!"); exit(-1);} cudaEventRecord(start); DeviceCalculateSMA_Shared<<<threads_needed/ startup.threads_per_block + 1, startup.threads_per_block, shared_memory_allocation_size>>> (device_input, input.size, device_result, host_result.size, sample_size); cudaEventRecord(stop); }else{ cudaEventRecord(start); DeviceCalculateSMA_Global<<<threads_needed/ startup.threads_per_block + 1, startup.threads_per_block>>> (device_input, input.size, device_result, host_result.size, sample_size); cudaEventRecord(stop); } cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); if (startup.single){ if (usesharedmemory) printf("Shared Memory: "); else printf("Global Memory: "); printf("Kernel executed in %f milliseconds\n", milliseconds); } if (startup.benchmark) { if (usesharedmemory) printf("%.6g,", milliseconds); else printf("%.6g\n", milliseconds); } gpuErrchk(cudaGetLastError()); gpuErrchk(cudaMemcpy(host_result.values, device_result, sizeOfDataSet(host_result), cudaMemcpyDeviceToHost)); gpuErrchk(cudaFree(device_result)); gpuErrchk(cudaFree(device_input)); return host_result; } void printDataSet(DataSet data){ for (int i = 0; i < data.size; i++) printf("%.6g, ", data.values[i]); printf("\n"); } void saveDataSetCSV(DataSet data, char* fileName){ char fileNameBuffer[256]; snprintf(fileNameBuffer, sizeof fileNameBuffer, "%s/%s%s", startup.output_directory, fileName, ".csv"); FILE* fp = fopen( fileNameBuffer, "w"); if (fp == nullptr) printf("Could not log to file\n"); else { for (int i = 0; i < data.size; i++){ fprintf(fp, "%.6g,", data.values[i]); } fprintf(fp, "\n"); } fclose(fp); } void AlgorithmsPerformanceBenchmark(){ for (int i = 4; i <= 268435456; i*=2) { int j = i/2; cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); if (j > prop.sharedMemPerBlock/sizeof(float)) j = prop.sharedMemPerBlock/sizeof(float) - startup.threads_per_block; printf("%d,%d,", i, j); DataSet data = generateRandomDataSet(i); DataSet shared = CalculateSMA(data, j, true); DataSet global = CalculateSMA(data, j, false); free(data.values); free(shared.values); free(global.values); } } int main(int argc, char** argv){ for (int i = 0; i < argc; i++){ //if (strcmp(argv[i], "--help")==0) {printf("%s", help); exit(-1); } if (strcmp(argv[i], "--random_range")==0 && i+1 < argc) startup.random_range = atoi(argv[i+1]); if (strcmp(argv[i], "--seed")==0 && i+1 < argc) startup.seed = atoi(argv[i+1]); if (strcmp(argv[i], "--block_threads")==0 && i+1 < argc) startup.threads_per_block = atoi(argv[i+1]); if (strcmp(argv[i], "--sample_size")==0 && i+1 < argc) startup.sample_size = atoi(argv[i+1]); if (strcmp(argv[i], "--data_size")==0 && i+1 < argc) startup.data_size = atoi(argv[i+1]); if (strcmp(argv[i], "--save")==0) startup.save = true; if (strcmp(argv[i], "--print")==0) startup.print = true; if (strcmp(argv[i], "--print_result")==0) startup.print_result_only = true; if (strcmp(argv[i], "--benchmark")==0) startup.benchmark = true; if (strcmp(argv[i], "--single")==0) startup.single = true; } if (( startup.single || startup.benchmark ) == false) printf("Please select a runtime mode. There are two options --single or --benchmark\n\n\t--benchmark mode will continually increase the set size and sample size and compare the two algorithms.\n\n\t--single mode will apply SMA on a single randomly generated set. By default the dataset will be 10,000 elements with a sample size of 16. These parameters can be changes.\n\n"); if (startup.single && startup.benchmark) { printf("You cannot run both modes at the same time. Please use only --single or only --benchmark"); exit(0); } srand(startup.seed); if (startup.single) { DataSet data = generateRandomDataSet(startup.data_size); if(startup.print) printDataSet(data); if(startup.save) saveDataSetCSV(data, "Input"); int shared_memory_allocation_size = sizeof(float)*(startup.threads_per_block+startup.sample_size); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); bool useSharedMemory = (shared_memory_allocation_size <= prop.sharedMemPerBlock); DataSet shared = CalculateSMA(data, startup.sample_size, useSharedMemory); if(startup.print || startup.print_result_only) printDataSet(shared); if(startup.save) saveDataSetCSV(shared, "Result"); free(shared.values); free(data.values); } if (startup.benchmark) AlgorithmsPerformanceBenchmark(); }
1377c9dd9b20f064af039663dc274e057d464c0e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zgemm_fermi_batched.cu normal z -> d, Fri Jan 30 19:00:10 2015 @author Jakub Kurzak @author Stan Tomov @author Mark Gates @author Azzam Haidar [zcds]gemm_fermi.cu defines the CPU driver. [zcds]gemm_fermi_kernels.h defines the block sizes for each precision. gemm_stencil_defs.h defines types and functions for precision-independent code. These files are included multiple times, once for each transpose version. gemm_stencil.cuh defines the GPU kernel (device function). gemm_kernel_batched.cuh defines the GPU kernel (global function). The batched version uses gemm_kernel_batched.cuh instead of gemm_kernel.cuh. */ #include "common_magma.h" #include "commonblas_d.h" #define PRECISION_d /////////////////////////////////////////////////////////////////////////////////////////////////// #include "dgemm_fermi_kernels_batched.h" /////////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- DGEMM performs one of the matrix-matrix operations C = alpha*op( A )*op( B ) + beta*C, where op( X ) is one of op( X ) = X or op( X ) = X**T or op( X ) = X**H, alpha and beta are scalars, and A, B and C are matrices, with op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix. Parameters ---------- @param[in] transA CHARACTER*1. On entry, transA specifies the form of op( A ) to be used in the matrix multiplication as follows: - = 'N': op( A ) = A. - = 'T': op( A ) = A**T. - = 'C': op( A ) = A**H. @param[in] transB CHARACTER*1. On entry, transB specifies the form of op( B ) to be used in the matrix multiplication as follows: - = 'N': op( B ) = B. - = 'T': op( B ) = B**T. - = 'C': op( B ) = B**H. @param[in] m INTEGER. On entry, M specifies the number of rows of the matrix op( dA ) and of the matrix dC. M must be at least zero. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix op( dB ) and the number of columns of the matrix dC. N must be at least zero. @param[in] k INTEGER. On entry, K specifies the number of columns of the matrix op( dA ) and the number of rows of the matrix op( dB ). K must be at least zero. @param[in] alpha DOUBLE_PRECISION On entry, ALPHA specifies the scalar alpha. @param[in] dA DOUBLE_PRECISION array of DIMENSION ( LDA, ka ), where ka is k when transA = MagmaNoTrans, and is m otherwise. Before entry with transA = MagmaNoTrans, the leading m by k part of the array dA must contain the matrix dA, otherwise the leading k by m part of the array dA must contain the matrix dA. @param[in] ldda INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When transA = MagmaNoTrans then LDA must be at least max( 1, m ), otherwise LDA must be at least max( 1, k ). @param[in] dB DOUBLE_PRECISION array of DIMENSION ( LDB, kb ), where kb is n when transB = MagmaNoTrans, and is k otherwise. Before entry with transB = MagmaNoTrans, the leading k by n part of the array dB must contain the matrix dB, otherwise the leading n by k part of the array dB must contain the matrix dB. @param[in] lddb INTEGER. On entry, LDB specifies the first dimension of dB as declared in the calling (sub) program. When transB = MagmaNoTrans then LDB must be at least max( 1, k ), otherwise LDB must be at least max( 1, n ). @param[in] beta DOUBLE_PRECISION. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then dC need not be set on input. @param[in,out] dC DOUBLE_PRECISION array of DIMENSION ( LDC, n ). Before entry, the leading m by n part of the array dC must contain the matrix dC, except when beta is zero, in which case dC need not be set on entry. On exit, the array dC is overwritten by the m by n matrix ( alpha*op( dA )*op( dB ) + beta*dC ). @param[in] lddc INTEGER. On entry, LDC specifies the first dimension of dC as declared in the calling (sub) program. LDC must be at least max( 1, m ). @ingroup magma_dblas3 ********************************************************************/ extern "C" void magmablas_dgemm_batched_lg( magma_trans_t transA, magma_trans_t transB, magma_int_t m, magma_int_t n, magma_int_t k, double alpha, double const * const * dA_array, magma_int_t ldda, double const * const * dB_array, magma_int_t lddb, double beta, double **dC_array, magma_int_t lddc, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != MagmaConjTrans ) info = -1; else if ( transB != MagmaNoTrans && transB != MagmaTrans && transB != MagmaConjTrans ) info = -2; else if ( m < 0 ) info = -3; else if ( n < 0 ) info = -4; else if ( k < 0 ) info = -5; else if ( transA == MagmaNoTrans ? ldda < m : ldda < k ) info = -8; else if ( transB == MagmaNoTrans ? lddb < k : lddb < n ) info = -10; else if ( lddc < m ) info = -13; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { printf("arch < 200 not supported \n"); // TODO call cublas return; /* // -------------------- // call CUDA ARCH 1.x version // magmablas for [sd] precisions, cublas for [zc] precisions. #if defined(PRECISION_z) || defined(PRECISION_c) magma_dgemm( transA, transB, m, n, k, alpha, dA_array[0], ldda, dB_array[0], lddb, beta, dC_array[0], lddc ); #else magmablas_dgemm_tesla( transA, transB, m, n, k, alpha, dA_array[0], ldda, dB_array[0], lddb, beta, dC_array[0], lddc ); #endif return; */ } // -------------------- // CUDA ARCH 2.x (Fermi) version if ( m <= 0 || n <= 0 || k <= 0 ) return; size_t offsetA = 0; size_t offsetB = 0; int TransA = 2, TransB = 2; if ( transA == MagmaTrans ) TransA = 1; else if ( transA == MagmaNoTrans ) TransA = 0; if ( transB == MagmaTrans ) TransB = 1; else if ( transB == MagmaNoTrans ) TransB = 0; #ifdef TEXTURE_1D // allocate CPU ptr to copy the GPU array to it in order to bind texture double *cpuAarray[2], *cpuBarray[2]; magma_getvector( min(2,batchCount), sizeof(double*), dA_array, 1, cpuAarray, 1); magma_getvector( min(2,batchCount), sizeof(double*), dB_array, 1, cpuBarray, 1); magma_int_t matrixA_size = batchCount > 1 ? cpuAarray[1] - cpuAarray[0] : 0; magma_int_t matrixB_size = batchCount > 1 ? cpuBarray[1] - cpuBarray[0] : 0; magma_int_t Am = ( ! TransA ? m : k); magma_int_t An = (!TransA ? k : m); magma_int_t Bm = ( ! TransB ? k : n); magma_int_t Bn = (!TransB ? n : k); size_t sizeA = batchCount > 1 ? (size_t) matrixA_size * (size_t) batchCount : (size_t) ldda * (An - 1) + Am; size_t sizeB = batchCount > 1 ? (size_t) matrixB_size * (size_t) batchCount : (size_t) lddb * (Bn - 1) + Bm; size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512); if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE || sizeB >= CUBLAS_MAX_1DBUF_SIZE ) { printf("sizeA %d > CUBLAS_MAX_1DBUF_SIZE %d not supported \n",sizeA,CUBLAS_MAX_1DBUF_SIZE); // TODO call cublas printf("sizeB %d > CUBLAS_MAX_1DBUF_SIZE %d not supported \n",sizeB,CUBLAS_MAX_1DBUF_SIZE); // TODO call cublas return; /* magma_dgemm( transA, transB, m, n, k, alpha, dA_array[0], ldda, dB_array[0], lddb, beta, dC_array[0], lddc ); return; */ } // Set textures parameters tex_ref_A.normalized = false; tex_ref_A.filterMode = hipFilterModePoint; tex_ref_A.addressMode[0] = hipAddressModeClamp; tex_ref_B.normalized = false; tex_ref_B.filterMode = hipFilterModePoint; tex_ref_B.addressMode[0] = hipAddressModeClamp; // Bind A and B to texture references hipError_t err; err = hipBindTexture(&offsetA, tex_ref_A, cpuAarray[0], sizeA*sizeof(double)); if ( err != hipSuccess ) { fprintf( stderr, "cannot bind A to texture: %s (%d)\n", hipGetErrorString(err), err ); return; } err = hipBindTexture(&offsetB, tex_ref_B, cpuBarray[0], sizeB*sizeof(double)); if ( err != hipSuccess ) { fprintf( stderr, "cannot bind B to texture: %s (%d)\n", hipGetErrorString(err), err ); hipUnbindTexture( tex_ref_A ); return; } #endif // Set up grids dim3 dimBlock(DIM_X, DIM_Y); offsetA = offsetA/sizeof(double); offsetB = offsetB/sizeof(double); if ( TransA == 0 && TransB == 0 ) { dim3 dimGrid( (m - 1)/BLK_M_nn + 1, (n - 1)/BLK_N_nn + 1 , batchCount ); hipLaunchKernelGGL(( dgemm_kernel_fermi_nn_batched), dim3(dimGrid), dim3(dimBlock), 0, queue , m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 0 && TransB == 1 ) { dim3 dimGrid( (m - 1)/BLK_M_nt + 1, (n - 1)/BLK_N_nt + 1 , batchCount ); hipLaunchKernelGGL(( dgemm_kernel_fermi_nt_batched), dim3(dimGrid), dim3(dimBlock), 0, queue , m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 0 && TransB == 2 ) { dim3 dimGrid( (m - 1)/BLK_M_nc + 1, (n - 1)/BLK_N_nc + 1 , batchCount ); hipLaunchKernelGGL(( dgemm_kernel_fermi_nc_batched), dim3(dimGrid), dim3(dimBlock), 0, queue , m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 0 ) { dim3 dimGrid( (m - 1)/BLK_M_tn + 1, (n - 1)/BLK_N_tn + 1 , batchCount ); hipLaunchKernelGGL(( dgemm_kernel_fermi_tn_batched), dim3(dimGrid), dim3(dimBlock), 0, queue , m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 1 ) { dim3 dimGrid( (m - 1)/BLK_M_tt + 1, (n - 1)/BLK_N_tt + 1 , batchCount ); hipLaunchKernelGGL(( dgemm_kernel_fermi_tt_batched), dim3(dimGrid), dim3(dimBlock), 0, queue , m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 2 ) { dim3 dimGrid( (m - 1)/BLK_M_tc + 1, (n - 1)/BLK_N_tc + 1 , batchCount ); hipLaunchKernelGGL(( dgemm_kernel_fermi_tc_batched), dim3(dimGrid), dim3(dimBlock), 0, queue , m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 0 ) { dim3 dimGrid( (m - 1)/BLK_M_cn + 1, (n - 1)/BLK_N_cn + 1 , batchCount ); hipLaunchKernelGGL(( dgemm_kernel_fermi_cn_batched), dim3(dimGrid), dim3(dimBlock), 0, queue , m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 1 ) { dim3 dimGrid( (m - 1)/BLK_M_ct + 1, (n - 1)/BLK_N_ct + 1 , batchCount ); hipLaunchKernelGGL(( dgemm_kernel_fermi_ct_batched), dim3(dimGrid), dim3(dimBlock), 0, queue , m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 2 ) { dim3 dimGrid( (m - 1)/BLK_M_cc + 1, (n - 1)/BLK_N_cc + 1 , batchCount ); hipLaunchKernelGGL(( dgemm_kernel_fermi_cc_batched), dim3(dimGrid), dim3(dimBlock), 0, queue , m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } #ifdef TEXTURE_1D hipUnbindTexture( tex_ref_A ); hipUnbindTexture( tex_ref_B ); #endif } ///////////////////////////////////////////////////////////////////////////////////////////////////
1377c9dd9b20f064af039663dc274e057d464c0e.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zgemm_fermi_batched.cu normal z -> d, Fri Jan 30 19:00:10 2015 @author Jakub Kurzak @author Stan Tomov @author Mark Gates @author Azzam Haidar [zcds]gemm_fermi.cu defines the CPU driver. [zcds]gemm_fermi_kernels.h defines the block sizes for each precision. gemm_stencil_defs.h defines types and functions for precision-independent code. These files are included multiple times, once for each transpose version. gemm_stencil.cuh defines the GPU kernel (device function). gemm_kernel_batched.cuh defines the GPU kernel (global function). The batched version uses gemm_kernel_batched.cuh instead of gemm_kernel.cuh. */ #include "common_magma.h" #include "commonblas_d.h" #define PRECISION_d /////////////////////////////////////////////////////////////////////////////////////////////////// #include "dgemm_fermi_kernels_batched.h" /////////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- DGEMM performs one of the matrix-matrix operations C = alpha*op( A )*op( B ) + beta*C, where op( X ) is one of op( X ) = X or op( X ) = X**T or op( X ) = X**H, alpha and beta are scalars, and A, B and C are matrices, with op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix. Parameters ---------- @param[in] transA CHARACTER*1. On entry, transA specifies the form of op( A ) to be used in the matrix multiplication as follows: - = 'N': op( A ) = A. - = 'T': op( A ) = A**T. - = 'C': op( A ) = A**H. @param[in] transB CHARACTER*1. On entry, transB specifies the form of op( B ) to be used in the matrix multiplication as follows: - = 'N': op( B ) = B. - = 'T': op( B ) = B**T. - = 'C': op( B ) = B**H. @param[in] m INTEGER. On entry, M specifies the number of rows of the matrix op( dA ) and of the matrix dC. M must be at least zero. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix op( dB ) and the number of columns of the matrix dC. N must be at least zero. @param[in] k INTEGER. On entry, K specifies the number of columns of the matrix op( dA ) and the number of rows of the matrix op( dB ). K must be at least zero. @param[in] alpha DOUBLE_PRECISION On entry, ALPHA specifies the scalar alpha. @param[in] dA DOUBLE_PRECISION array of DIMENSION ( LDA, ka ), where ka is k when transA = MagmaNoTrans, and is m otherwise. Before entry with transA = MagmaNoTrans, the leading m by k part of the array dA must contain the matrix dA, otherwise the leading k by m part of the array dA must contain the matrix dA. @param[in] ldda INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When transA = MagmaNoTrans then LDA must be at least max( 1, m ), otherwise LDA must be at least max( 1, k ). @param[in] dB DOUBLE_PRECISION array of DIMENSION ( LDB, kb ), where kb is n when transB = MagmaNoTrans, and is k otherwise. Before entry with transB = MagmaNoTrans, the leading k by n part of the array dB must contain the matrix dB, otherwise the leading n by k part of the array dB must contain the matrix dB. @param[in] lddb INTEGER. On entry, LDB specifies the first dimension of dB as declared in the calling (sub) program. When transB = MagmaNoTrans then LDB must be at least max( 1, k ), otherwise LDB must be at least max( 1, n ). @param[in] beta DOUBLE_PRECISION. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then dC need not be set on input. @param[in,out] dC DOUBLE_PRECISION array of DIMENSION ( LDC, n ). Before entry, the leading m by n part of the array dC must contain the matrix dC, except when beta is zero, in which case dC need not be set on entry. On exit, the array dC is overwritten by the m by n matrix ( alpha*op( dA )*op( dB ) + beta*dC ). @param[in] lddc INTEGER. On entry, LDC specifies the first dimension of dC as declared in the calling (sub) program. LDC must be at least max( 1, m ). @ingroup magma_dblas3 ********************************************************************/ extern "C" void magmablas_dgemm_batched_lg( magma_trans_t transA, magma_trans_t transB, magma_int_t m, magma_int_t n, magma_int_t k, double alpha, double const * const * dA_array, magma_int_t ldda, double const * const * dB_array, magma_int_t lddb, double beta, double **dC_array, magma_int_t lddc, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != MagmaConjTrans ) info = -1; else if ( transB != MagmaNoTrans && transB != MagmaTrans && transB != MagmaConjTrans ) info = -2; else if ( m < 0 ) info = -3; else if ( n < 0 ) info = -4; else if ( k < 0 ) info = -5; else if ( transA == MagmaNoTrans ? ldda < m : ldda < k ) info = -8; else if ( transB == MagmaNoTrans ? lddb < k : lddb < n ) info = -10; else if ( lddc < m ) info = -13; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { printf("arch < 200 not supported \n"); // TODO call cublas return; /* // -------------------- // call CUDA ARCH 1.x version // magmablas for [sd] precisions, cublas for [zc] precisions. #if defined(PRECISION_z) || defined(PRECISION_c) magma_dgemm( transA, transB, m, n, k, alpha, dA_array[0], ldda, dB_array[0], lddb, beta, dC_array[0], lddc ); #else magmablas_dgemm_tesla( transA, transB, m, n, k, alpha, dA_array[0], ldda, dB_array[0], lddb, beta, dC_array[0], lddc ); #endif return; */ } // -------------------- // CUDA ARCH 2.x (Fermi) version if ( m <= 0 || n <= 0 || k <= 0 ) return; size_t offsetA = 0; size_t offsetB = 0; int TransA = 2, TransB = 2; if ( transA == MagmaTrans ) TransA = 1; else if ( transA == MagmaNoTrans ) TransA = 0; if ( transB == MagmaTrans ) TransB = 1; else if ( transB == MagmaNoTrans ) TransB = 0; #ifdef TEXTURE_1D // allocate CPU ptr to copy the GPU array to it in order to bind texture double *cpuAarray[2], *cpuBarray[2]; magma_getvector( min(2,batchCount), sizeof(double*), dA_array, 1, cpuAarray, 1); magma_getvector( min(2,batchCount), sizeof(double*), dB_array, 1, cpuBarray, 1); magma_int_t matrixA_size = batchCount > 1 ? cpuAarray[1] - cpuAarray[0] : 0; magma_int_t matrixB_size = batchCount > 1 ? cpuBarray[1] - cpuBarray[0] : 0; magma_int_t Am = ( ! TransA ? m : k); magma_int_t An = (!TransA ? k : m); magma_int_t Bm = ( ! TransB ? k : n); magma_int_t Bn = (!TransB ? n : k); size_t sizeA = batchCount > 1 ? (size_t) matrixA_size * (size_t) batchCount : (size_t) ldda * (An - 1) + Am; size_t sizeB = batchCount > 1 ? (size_t) matrixB_size * (size_t) batchCount : (size_t) lddb * (Bn - 1) + Bm; size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512); if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE || sizeB >= CUBLAS_MAX_1DBUF_SIZE ) { printf("sizeA %d > CUBLAS_MAX_1DBUF_SIZE %d not supported \n",sizeA,CUBLAS_MAX_1DBUF_SIZE); // TODO call cublas printf("sizeB %d > CUBLAS_MAX_1DBUF_SIZE %d not supported \n",sizeB,CUBLAS_MAX_1DBUF_SIZE); // TODO call cublas return; /* magma_dgemm( transA, transB, m, n, k, alpha, dA_array[0], ldda, dB_array[0], lddb, beta, dC_array[0], lddc ); return; */ } // Set textures parameters tex_ref_A.normalized = false; tex_ref_A.filterMode = cudaFilterModePoint; tex_ref_A.addressMode[0] = cudaAddressModeClamp; tex_ref_B.normalized = false; tex_ref_B.filterMode = cudaFilterModePoint; tex_ref_B.addressMode[0] = cudaAddressModeClamp; // Bind A and B to texture references cudaError_t err; err = cudaBindTexture(&offsetA, tex_ref_A, cpuAarray[0], sizeA*sizeof(double)); if ( err != cudaSuccess ) { fprintf( stderr, "cannot bind A to texture: %s (%d)\n", cudaGetErrorString(err), err ); return; } err = cudaBindTexture(&offsetB, tex_ref_B, cpuBarray[0], sizeB*sizeof(double)); if ( err != cudaSuccess ) { fprintf( stderr, "cannot bind B to texture: %s (%d)\n", cudaGetErrorString(err), err ); cudaUnbindTexture( tex_ref_A ); return; } #endif // Set up grids dim3 dimBlock(DIM_X, DIM_Y); offsetA = offsetA/sizeof(double); offsetB = offsetB/sizeof(double); if ( TransA == 0 && TransB == 0 ) { dim3 dimGrid( (m - 1)/BLK_M_nn + 1, (n - 1)/BLK_N_nn + 1 , batchCount ); dgemm_kernel_fermi_nn_batched<<< dimGrid, dimBlock, 0, queue >>>( m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 0 && TransB == 1 ) { dim3 dimGrid( (m - 1)/BLK_M_nt + 1, (n - 1)/BLK_N_nt + 1 , batchCount ); dgemm_kernel_fermi_nt_batched<<< dimGrid, dimBlock, 0, queue >>>( m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 0 && TransB == 2 ) { dim3 dimGrid( (m - 1)/BLK_M_nc + 1, (n - 1)/BLK_N_nc + 1 , batchCount ); dgemm_kernel_fermi_nc_batched<<< dimGrid, dimBlock, 0, queue >>>( m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 0 ) { dim3 dimGrid( (m - 1)/BLK_M_tn + 1, (n - 1)/BLK_N_tn + 1 , batchCount ); dgemm_kernel_fermi_tn_batched<<< dimGrid, dimBlock, 0, queue >>>( m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 1 ) { dim3 dimGrid( (m - 1)/BLK_M_tt + 1, (n - 1)/BLK_N_tt + 1 , batchCount ); dgemm_kernel_fermi_tt_batched<<< dimGrid, dimBlock, 0, queue >>>( m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 2 ) { dim3 dimGrid( (m - 1)/BLK_M_tc + 1, (n - 1)/BLK_N_tc + 1 , batchCount ); dgemm_kernel_fermi_tc_batched<<< dimGrid, dimBlock, 0, queue >>>( m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 0 ) { dim3 dimGrid( (m - 1)/BLK_M_cn + 1, (n - 1)/BLK_N_cn + 1 , batchCount ); dgemm_kernel_fermi_cn_batched<<< dimGrid, dimBlock, 0, queue >>>( m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 1 ) { dim3 dimGrid( (m - 1)/BLK_M_ct + 1, (n - 1)/BLK_N_ct + 1 , batchCount ); dgemm_kernel_fermi_ct_batched<<< dimGrid, dimBlock, 0, queue >>>( m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 2 ) { dim3 dimGrid( (m - 1)/BLK_M_cc + 1, (n - 1)/BLK_N_cc + 1 , batchCount ); dgemm_kernel_fermi_cc_batched<<< dimGrid, dimBlock, 0, queue >>>( m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } #ifdef TEXTURE_1D cudaUnbindTexture( tex_ref_A ); cudaUnbindTexture( tex_ref_B ); #endif } ///////////////////////////////////////////////////////////////////////////////////////////////////
b6058ef253cc025e159f8dbfebd5440051f00f5a.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/find_multiple.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/utilities/error.hpp> #include <thrust/transform.h> namespace cudf { namespace strings { namespace detail { std::unique_ptr<column> find_multiple( strings_column_view const& strings, strings_column_view const& targets, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), hipStream_t stream = 0) { auto strings_count = strings.size(); if (strings_count == 0) return make_empty_column(data_type{INT32}); auto targets_count = targets.size(); CUDF_EXPECTS(targets_count > 0, "Must include at least one search target"); CUDF_EXPECTS(!targets.has_nulls(), "Search targets cannot contain null strings"); auto strings_column = column_device_view::create(strings.parent(), stream); auto d_strings = *strings_column; auto targets_column = column_device_view::create(targets.parent(), stream); auto d_targets = *targets_column; // create output column auto total_count = strings_count * targets_count; auto results = make_numeric_column( data_type{INT32}, total_count, rmm::device_buffer{0, stream, mr}, 0, stream, mr); // no nulls auto results_view = results->mutable_view(); auto d_results = results_view.data<int32_t>(); // fill output column with position values thrust::transform(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(total_count), d_results, [d_strings, d_targets, targets_count] __device__(size_type idx) { size_type str_idx = idx / targets_count; if (d_strings.is_null(str_idx)) return -1; string_view d_str = d_strings.element<string_view>(str_idx); string_view d_tgt = d_targets.element<string_view>(idx % targets_count); return d_str.find(d_tgt); }); results->set_null_count(0); return results; } } // namespace detail // external API std::unique_ptr<column> find_multiple(strings_column_view const& strings, strings_column_view const& targets, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::find_multiple(strings, targets, mr); } } // namespace strings } // namespace cudf
b6058ef253cc025e159f8dbfebd5440051f00f5a.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/find_multiple.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/utilities/error.hpp> #include <thrust/transform.h> namespace cudf { namespace strings { namespace detail { std::unique_ptr<column> find_multiple( strings_column_view const& strings, strings_column_view const& targets, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), cudaStream_t stream = 0) { auto strings_count = strings.size(); if (strings_count == 0) return make_empty_column(data_type{INT32}); auto targets_count = targets.size(); CUDF_EXPECTS(targets_count > 0, "Must include at least one search target"); CUDF_EXPECTS(!targets.has_nulls(), "Search targets cannot contain null strings"); auto strings_column = column_device_view::create(strings.parent(), stream); auto d_strings = *strings_column; auto targets_column = column_device_view::create(targets.parent(), stream); auto d_targets = *targets_column; // create output column auto total_count = strings_count * targets_count; auto results = make_numeric_column( data_type{INT32}, total_count, rmm::device_buffer{0, stream, mr}, 0, stream, mr); // no nulls auto results_view = results->mutable_view(); auto d_results = results_view.data<int32_t>(); // fill output column with position values thrust::transform(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(total_count), d_results, [d_strings, d_targets, targets_count] __device__(size_type idx) { size_type str_idx = idx / targets_count; if (d_strings.is_null(str_idx)) return -1; string_view d_str = d_strings.element<string_view>(str_idx); string_view d_tgt = d_targets.element<string_view>(idx % targets_count); return d_str.find(d_tgt); }); results->set_null_count(0); return results; } } // namespace detail // external API std::unique_ptr<column> find_multiple(strings_column_view const& strings, strings_column_view const& targets, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::find_multiple(strings, targets, mr); } } // namespace strings } // namespace cudf
62f87b58c1ec639ce8b1441931aead39c80e0c7d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "reduce_v3.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *in = NULL; hipMalloc(&in, XSIZE*YSIZE); float *out = NULL; hipMalloc(&out, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( reduce_v3), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( reduce_v3), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( reduce_v3), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
62f87b58c1ec639ce8b1441931aead39c80e0c7d.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "reduce_v3.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *in = NULL; cudaMalloc(&in, XSIZE*YSIZE); float *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); reduce_v3<<<gridBlock,threadBlock>>>(in,out,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { reduce_v3<<<gridBlock,threadBlock>>>(in,out,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { reduce_v3<<<gridBlock,threadBlock>>>(in,out,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2f1f84b64ddf7190bcced4337179be7c28c32118.hip
// !!! This is a file automatically generated by hipify!!! #include "SequenceVisitor.cuh" #include "PrefixSumHandler.cuh" template<> void SequenceVisitor::set_arguments_size<prefix_sum_ut_hits_t>( prefix_sum_ut_hits_t::arguments_t arguments, const RuntimeOptions& runtime_options, const Constants& constants, const HostBuffers& host_buffers) { arguments.set_size<dev_prefix_sum_auxiliary_array_3>(prefix_sum_ut_hits_t::aux_array_size( host_buffers.host_number_of_selected_events[0] * constants.host_unique_x_sector_layer_offsets[4])); } template<> void SequenceVisitor::visit<prefix_sum_ut_hits_t>( prefix_sum_ut_hits_t& state, const prefix_sum_ut_hits_t::arguments_t& arguments, const RuntimeOptions& runtime_options, const Constants& constants, HostBuffers& host_buffers, hipStream_t& cuda_stream, hipEvent_t& cuda_generic_event) { // Set size of the main array to be prefix summed state.set_size(host_buffers.host_number_of_selected_events[0] * constants.host_unique_x_sector_layer_offsets[4]); // Set the cuda_stream state.set_opts(cuda_stream); // Set arguments: Array to prefix sum and auxiliary array state.set_arguments(arguments.offset<dev_ut_hit_offsets>(), arguments.offset<dev_prefix_sum_auxiliary_array_3>()); // Invoke all steps of prefix sum state.invoke(); // Fetch total number of hits accumulated with all tracks cudaCheck(hipMemcpyAsync( host_buffers.host_accumulated_number_of_ut_hits, arguments.offset<dev_ut_hit_offsets>() + host_buffers.host_number_of_selected_events[0] * constants.host_unique_x_sector_layer_offsets[4], sizeof(uint), hipMemcpyDeviceToHost, cuda_stream)); hipEventRecord(cuda_generic_event, cuda_stream); hipEventSynchronize(cuda_generic_event); // // Now, we should have the offset instead, and the sum of all in host_accumulated_number_of_ut_hits // // Check that // cudaCheck(hipMemcpyAsync(host_ut_hit_count.data(), arguments.offset<dev_ut_hit_offsets>(), // arguments.size<dev_ut_hit_offsets>(), hipMemcpyDeviceToHost, stream)); hipEventRecord(cuda_generic_event, // stream); hipEventSynchronize(cuda_generic_event); for (int e=0; e<number_of_events; ++e) { // info_cout << "Event " << e << ", offset per sector group: "; // uint32_t* offset = host_ut_hit_count.data() + e * constants.host_unique_x_sector_layer_offsets[4]; // for (uint32_t i = 0; i < constants.host_unique_x_sector_layer_offsets[4]; ++i) { // info_cout << offset[i] << ", "; // } // info_cout << std::endl; // } // info_cout << "Total number of UT hits: " << *host_accumulated_number_of_ut_hits << std::endl; }
2f1f84b64ddf7190bcced4337179be7c28c32118.cu
#include "SequenceVisitor.cuh" #include "PrefixSumHandler.cuh" template<> void SequenceVisitor::set_arguments_size<prefix_sum_ut_hits_t>( prefix_sum_ut_hits_t::arguments_t arguments, const RuntimeOptions& runtime_options, const Constants& constants, const HostBuffers& host_buffers) { arguments.set_size<dev_prefix_sum_auxiliary_array_3>(prefix_sum_ut_hits_t::aux_array_size( host_buffers.host_number_of_selected_events[0] * constants.host_unique_x_sector_layer_offsets[4])); } template<> void SequenceVisitor::visit<prefix_sum_ut_hits_t>( prefix_sum_ut_hits_t& state, const prefix_sum_ut_hits_t::arguments_t& arguments, const RuntimeOptions& runtime_options, const Constants& constants, HostBuffers& host_buffers, cudaStream_t& cuda_stream, cudaEvent_t& cuda_generic_event) { // Set size of the main array to be prefix summed state.set_size(host_buffers.host_number_of_selected_events[0] * constants.host_unique_x_sector_layer_offsets[4]); // Set the cuda_stream state.set_opts(cuda_stream); // Set arguments: Array to prefix sum and auxiliary array state.set_arguments(arguments.offset<dev_ut_hit_offsets>(), arguments.offset<dev_prefix_sum_auxiliary_array_3>()); // Invoke all steps of prefix sum state.invoke(); // Fetch total number of hits accumulated with all tracks cudaCheck(cudaMemcpyAsync( host_buffers.host_accumulated_number_of_ut_hits, arguments.offset<dev_ut_hit_offsets>() + host_buffers.host_number_of_selected_events[0] * constants.host_unique_x_sector_layer_offsets[4], sizeof(uint), cudaMemcpyDeviceToHost, cuda_stream)); cudaEventRecord(cuda_generic_event, cuda_stream); cudaEventSynchronize(cuda_generic_event); // // Now, we should have the offset instead, and the sum of all in host_accumulated_number_of_ut_hits // // Check that // cudaCheck(cudaMemcpyAsync(host_ut_hit_count.data(), arguments.offset<dev_ut_hit_offsets>(), // arguments.size<dev_ut_hit_offsets>(), cudaMemcpyDeviceToHost, stream)); cudaEventRecord(cuda_generic_event, // stream); cudaEventSynchronize(cuda_generic_event); for (int e=0; e<number_of_events; ++e) { // info_cout << "Event " << e << ", offset per sector group: "; // uint32_t* offset = host_ut_hit_count.data() + e * constants.host_unique_x_sector_layer_offsets[4]; // for (uint32_t i = 0; i < constants.host_unique_x_sector_layer_offsets[4]; ++i) { // info_cout << offset[i] << ", "; // } // info_cout << std::endl; // } // info_cout << "Total number of UT hits: " << *host_accumulated_number_of_ut_hits << std::endl; }
84fc4d91451df19c3ea61d09a582ce996e2df129.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Author: Gerhard Hoffmann */ /* */ /* Batch implementation of Keccak-f1600. Calculates for 960 files as input as*/ /* output: the corresonding digest (of theoretical infinite length -> */ /* bitrate r = 1024). */ /* */ /* Note: The code below shows how to proceed in principle. It hashes the */ /* same file 960 times (with of course the same result). For a more realistic*/ /* scenario it would be necessary to have something like an input file */ /* containing the filenames to be hashed. Another point would be to make sure*/ /* that the GTX 295 with only ~2x900 MB main memory can handle all the files.*/ /* An approach would be to use streams. Another one to come up with kind of */ /* a scheduling algorithm for hashing what files first if the sizes of the */ /* files show a high variation. */ /* */ /* The kernels below have been executed on a grid with 120 blocks, where */ /* each block consists of 256 threads. The occupancy was reported as 0.5 by */ /* the CUDA profiler. The kernels have been based straight on the kernels of */ /* the basic implementation. */ /* */ /* Note that the GTX 295 has two built in GPUs. For this implementation only */ /* one of them has been used. Using both GPUs it would be necessary to use */ /* either two different CPU processes or CPU threads. Because the GPU scales */ /* almost perfectly, we considered it as not essential to use both cards. */ /* */ /* Some values reported by the CUDA profiler (hashing the same file 960x). */ /* */ /* File size [bytes] | Digest length [bits] | Time [nanoseconds] */ /* ------------------+----------------------+---------------------- */ /* 3510 | 100 | 11244544 */ /* 13227 | 100 | 41634434 */ /* 42653 | 100 | 133587688 */ /* 110455 | 100 | 345085750 */ /**/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/stat.h> #include <inttypes.h> #include <errno.h> #include <cutil_inline.h> #include <cuda_keccak_batch.cuh> #define PRINT_GPU_RESULT \ printf("\nOutput of GPU:\n"); \ for(int i=0;i<200;++i) { \ printf("%02X ", h_out[j][i]); \ } printf("\n\n\n"); #define BLOCK_SIZE 256 /* threads per block */ #define BLOCKS_PER_SM 4 /* threads per streaming multiprocessor */ #define WARPS_PER_SM ((BLOCK_SIZE/32)*BLOCKS_PER_SM) #define SM 30 #define FILES (WARPS_PER_SM*SM) #define MAX_FILE_SIZE 1000000000 #define BITRATE 1024 #define ROUNDS 24 #define OFFSET 63 #define R64(a,b,c) (((a) << b) ^ ((a) >> c)) /* works on the GPU also for b = 64 or c = 64 */ static uint8_t **h_data; static uint8_t **h_out; static uint64_t **d_data; static uint64_t **d_out; static uint64_t *h_dblen; static uint64_t *d_dblen; static uint64_t *d_data2[FILES]; static uint64_t *d_out2[FILES]; static const uint64_t round_const[5][ROUNDS] = { {0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808AULL, 0x8000000080008000ULL, 0x000000000000808BULL, 0x0000000080000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008AULL, 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000AULL, 0x000000008000808BULL, 0x800000000000008BULL, 0x8000000000008089ULL, 0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL, 0x000000000000800AULL, 0x800000008000000AULL, 0x8000000080008081ULL, 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL}, {0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL}, {0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL}, {0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL}, {0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL}}; /* Rho-Offsets. Note that for each entry pair their respective sum is 64. Only the first entry of each pair is a rho-offset. The second part is used in the R64 macros. */ static const int rho_offsets[25][2] = { /*y=0*/ /*y=1*/ /*y=2*/ /*y=3*/ /*y=4*/ /*x=0*/{ 0,64}, /*x=1*/{44,20}, /*x=2*/{43,21}, /*x=3*/{21,43}, /*x=4*/{14,50}, /*x=1*/{ 1,63}, /*x=2*/{ 6,58}, /*x=3*/{25,39}, /*x=4*/{ 8,56}, /*x=0*/{18,46}, /*x=2*/{62, 2}, /*x=3*/{55, 9}, /*x=4*/{39,25}, /*x=0*/{41,23}, /*x=1*/{ 2,62}, /*x=3*/{28,36}, /*x=4*/{20,44}, /*x=0*/{ 3,61}, /*x=1*/{45,19}, /*x=2*/{61, 3}, /*x=4*/{27,37}, /*x=0*/{36,28}, /*x=1*/{10,54}, /*x=2*/{15,49}, /*x=3*/{56, 8}}; static const int a_host[25] = { 0, 6, 12, 18, 24, 1, 7, 13, 19, 20, 2, 8, 14, 15, 21, 3, 9, 10, 16, 22, 4, 5, 11, 17, 23}; static const int b_host[25] = { 0, 1, 2, 3, 4, 1, 2, 3, 4, 0, 2, 3, 4, 0, 1, 3, 4, 0, 1, 2, 4, 0, 1, 2, 3}; static const int c_host[25][3] = { { 0, 1, 2}, { 1, 2, 3}, { 2, 3, 4}, { 3, 4, 0}, { 4, 0, 1}, { 5, 6, 7}, { 6, 7, 8}, { 7, 8, 9}, { 8, 9, 5}, { 9, 5, 6}, {10,11,12}, {11,12,13}, {12,13,14}, {13,14,10}, {14,10,11}, {15,16,17}, {16,17,18}, {17,18,19}, {18,19,15}, {19,15,16}, {20,21,22}, {21,22,23}, {22,23,24}, {23,24,20}, {24,20,21}}; static const int d_host[25] = { 0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 20, 21, 22, 23, 24, 5, 6, 7, 8, 9, 15, 16, 17, 18, 19}; __device__ __constant__ uint32_t a[25]; __device__ __constant__ uint32_t b[25]; __device__ __constant__ uint32_t c[25][3]; __device__ __constant__ uint32_t d[25]; __device__ __constant__ uint32_t ro[25][2]; __device__ __constant__ uint64_t rc[5][ROUNDS]; __global__ void keccac_squeeze_kernel(uint64_t **data) {/* In case a digest of length */ /* greater than 1024 bits is needed, call */ int const tid = threadIdx.x; /* this kernel multiple times. Another way */ int const tw = tid/32; /* would be to have a loop here and squeeze*/ int const t = tid%32; /* more than once. */ int const s = t%5; int const gw = (tid + blockIdx.x*blockDim.x)/32; __shared__ uint64_t A_[8][25]; __shared__ uint64_t C_[8][25]; __shared__ uint64_t D_[8][25]; if(t < 25) { /*each thread sets a pointer to its corresponding leaf (=warp) memory*/ uint64_t *__restrict__ A = &A_[tw][0]; uint64_t *__restrict__ C = &C_[tw][0]; uint64_t *__restrict__ D = &D_[tw][0]; A[t] = data[gw][t]; for(int i=0;i<ROUNDS;++i) { /* Keccak-f */ C[t] = A[s]^A[s+5]^A[s+10]^A[s+15]^A[s+20]; D[t] = C[b[20+s]] ^ R64(C[b[5+s]],1,63); C[t] = R64(A[a[t]]^D[b[t]], ro[t][0], ro[t][1]); A[d[t]] = C[c[t][0]] ^ ((~C[c[t][1]]) & C[c[t][2]]); A[t] ^= rc[(t==0) ? 0 : 1][i]; } data[gw][t] = A[t]; } } /* The batch kernel is executed in blocks consisting of 256 threads. The */ /* basic implementation of Keccak uses only one warp of 32 threads. Therefore*/ /* the batch kernel executes 8 such warps in parallel. */ __global__ void keccac_kernel(uint64_t **d_data, uint64_t **out, uint64_t *dblen) { int const tid = threadIdx.x; int const tw = tid/32; /* warp of the thread local to the block */ int const t = tid%32; /* thread number local to the warp */ int const s = t%5; int const gw = (tid + blockIdx.x*blockDim.x)/32; /* global warp number */ __shared__ uint64_t A_[8][25]; /* 8 warps per block are executing Keccak*/ __shared__ uint64_t B_[8][25]; /* in parallel. */ __shared__ uint64_t C_[8][25]; __shared__ uint64_t D_[8][25]; if(t < 25) {/* only the lower 25 threads per warp are active. each thread*/ /* sets a pointer to its corresponding warp memory. This way,*/ /* no synchronization between the threads of the block is */ /* needed. Threads in a warp are always synchronized. */ uint64_t *__restrict__ A = &A_[tw][0], *__restrict__ B = &B_[tw][0]; uint64_t *__restrict__ C = &C_[tw][0], *__restrict__ D = &D_[tw][0]; uint64_t *__restrict__ data = d_data[gw]; uint64_t databitlen = dblen[gw]; A[t] = 0ULL; B[t] = 0ULL; if(t < 16) B[t] = data[t]; int const blocks = databitlen/BITRATE; for(int block=0;block<blocks;++block) {/* load data without crossing */ /* a 128-byte boundary. */ A[t] ^= B[t]; data += BITRATE/64; if(t < 16) B[t] = data[t]; /* prefetch data */ for(int i=0;i<ROUNDS;++i) { /* Keccak-f */ C[t] = A[s]^A[s+5]^A[s+10]^A[s+15]^A[s+20]; D[t] = C[b[20+s]] ^ R64(C[b[5+s]],1,63); C[t] = R64(A[a[t]]^D[b[t]], ro[t][0], ro[t][1]); A[d[t]] = C[c[t][0]] ^ ((~C[c[t][1]]) & C[c[t][2]]); A[t] ^= rc[(t==0) ? 0 : 1][i]; } databitlen -= BITRATE; } int const bytes = databitlen/8; if(t == 0) { /* pad the end of the data */ uint8_t *p = (uint8_t *)B+bytes; uint8_t const q = *p; *p++ = (q >> (8-(databitlen&7)) | (1 << (databitlen&7))); *p++ = 0x00; *p++ = BITRATE/8; *p++ = 0x01; while(p < (uint8_t *)&B[25]) *p++ = 0; } if(t < 16) A[t] ^= B[t]; /* load 128 byte of data */ for(int i=0;i<ROUNDS;++i) { /* Keccak-f */ C[t] = A[s]^A[s+5]^A[s+10]^A[s+15]^A[s+20]; D[t] = C[b[20+s]] ^ R64(C[b[5+s]],1,63); C[t] = R64(A[a[t]]^D[b[t]], ro[t][0], ro[t][1]); A[d[t]] = C[c[t][0]] ^ ((~C[c[t][1]]) & C[c[t][2]]); A[t] ^= rc[(t==0) ? 0 : 1][i]; } if((bytes+4) > BITRATE/8) {/*then thread t=0 has crossed the 128 byte*/ if(t < 16) B[t] = 0ULL;/* boundary and touched some higher parts */ if(t < 9) B[t] = B[t+16]; /* of B. */ if(t < 16) A[t] ^= B[t]; for(int i=0;i<ROUNDS;++i) { /* Keccak-f */ C[t] = A[s]^A[s+5]^A[s+10]^A[s+15]^A[s+20]; D[t] = C[b[20+s]] ^ R64(C[b[5+s]],1,63); C[t] = R64(A[a[t]]^D[b[t]], ro[t][0], ro[t][1]); A[d[t]] = C[c[t][0]] ^ ((~C[c[t][1]]) & C[c[t][2]]); A[t] ^= rc[(t==0) ? 0 : 1][i]; } } out[gw][t] = A[t]; /* write the result */ } } /**/ /**/ /**/ void call_keccak_batch_kernel(char const *filename, int digestlength) { struct stat buf; size_t size; if(stat(filename, &buf) < 0) { fprintf(stderr, "stat %s failed: %s\n", filename, strerror(errno)); return; } if(buf.st_size == 0 || buf.st_size > MAX_FILE_SIZE/FILES) { fprintf(stderr, "%s wrong sized %d\n", filename, (int)buf.st_size); return; } /* align the data on BITRATE/8 bytes */ size = ((buf.st_size-1)/(BITRATE/8) + 1)*(BITRATE/8); h_data = (uint8_t **)malloc(FILES*sizeof(*h_data)); h_out = (uint8_t **)malloc(FILES*sizeof(*h_out)); h_dblen = (uint64_t *)malloc(FILES*sizeof(*h_dblen)); CUDA_SAFE_CALL(hipMalloc((void **)&d_dblen, FILES*sizeof(*d_dblen))); CUDA_SAFE_CALL(hipMalloc((void **)&d_data, FILES*sizeof(*d_data))); CUDA_SAFE_CALL(hipMalloc((void **)&d_out, FILES*sizeof(*d_out))); for(int i=0;i<FILES;++i) { /* allocate memory for each file */ h_data[i] = (uint8_t *)malloc(size); /* and for each output buffer */ h_out[i] = (uint8_t *)malloc(200); CUDA_SAFE_CALL(hipMalloc((void **)&d_data2[i], size)); CUDA_SAFE_CALL(hipMalloc((void **)&d_out2[i], 200)); } CUDA_SAFE_CALL(hipMemcpy(d_data, d_data2 /* copy the device pointers */ , FILES*sizeof(d_data2[0]), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_out, d_out2 , FILES*sizeof(d_out2[0]), hipMemcpyHostToDevice)); FILE *in = fopen(filename, "r"); if(in == NULL) { fprintf(stderr, "open %s failed: %s\n", filename, strerror(errno)); return; } memset(&h_data[0][0], 0x00, size); /* read the file(s) */ if(fread(&h_data[0][0], 1, (size_t)buf.st_size, in) < buf.st_size) { fprintf(stderr, "read %s failed: %s\n", filename, strerror(errno)); return; } for(int i=1;i<FILES;++i) { /* copy the file content (only for this test) */ memcpy(h_data[i], h_data[0], size); } fclose(in); for(int j=0;j<FILES;++j) { int count = 0; for(int i=0;i<8;++i) { if((h_data[j][buf.st_size-1] >> i) & 1) { /* compute bit count */ count = 8 - i; break; } } h_dblen[j] = (buf.st_size-1)*8 + count; } CUDA_SAFE_CALL(hipMemcpy(d_dblen, h_dblen, FILES*sizeof(*h_dblen) , hipMemcpyHostToDevice)); /* copy the Keccak tables from host to GPU */ CUDA_SAFE_CALL(hipMemcpyToSymbol(a, a_host, sizeof(a_host))); CUDA_SAFE_CALL(hipMemcpyToSymbol(b, b_host, sizeof(b_host))); CUDA_SAFE_CALL(hipMemcpyToSymbol(c, c_host, sizeof(c_host))); CUDA_SAFE_CALL(hipMemcpyToSymbol(d, d_host, sizeof(d_host))); CUDA_SAFE_CALL(hipMemcpyToSymbol(ro, rho_offsets, sizeof(rho_offsets))); CUDA_SAFE_CALL(hipMemcpyToSymbol(rc, round_const, sizeof(round_const))); for(int i=0;i<FILES;++i) { /* copy the file contents to the GPU */ CUDA_SAFE_CALL(hipMemcpy(d_data2[i], h_data[i], size , hipMemcpyHostToDevice)); } /* call the GPU */ hipLaunchKernelGGL(( keccac_kernel), dim3(BLOCKS_PER_SM*SM),dim3(BLOCK_SIZE), 0, 0, /*BLOCKS_PER_SM*SM==FILES/8*/ d_data, d_out, d_dblen); for(int j=0;j<2/*FILES*/;++j) { /* fetch only two of the hashed files to */ memset(h_out[j], 0x00, 200); /* check for correctness */ CUDA_SAFE_CALL(hipMemcpy(h_out[j], d_out2[j], 200 , hipMemcpyDeviceToHost)); printf("FILE %03d:", j); PRINT_GPU_RESULT; } for(int j=0;j<digestlength/BITRATE;++j) { /* GPU: call the squeeze phase */ hipLaunchKernelGGL(( keccac_squeeze_kernel), dim3(BLOCKS_PER_SM*SM), dim3(BLOCK_SIZE), 0, 0, d_out); CUDA_SAFE_CALL(hipMemcpy(h_out, d_out, 200, hipMemcpyDeviceToHost)); PRINT_GPU_RESULT; } for(int i=0;i<FILES;++i) { /* release memory */ CUDA_SAFE_CALL(hipFree(d_data2[i])); CUDA_SAFE_CALL(hipFree(d_out2[i])); free(h_out[i]); free(h_data[i]); } CUDA_SAFE_CALL(hipFree(d_data)); CUDA_SAFE_CALL(hipFree(d_out)); CUDA_SAFE_CALL(hipFree(d_dblen)); free(h_dblen); free(h_data); free(h_out); } /********************************* end-of-file *******************************/
84fc4d91451df19c3ea61d09a582ce996e2df129.cu
/* Author: Gerhard Hoffmann */ /* */ /* Batch implementation of Keccak-f1600. Calculates for 960 files as input as*/ /* output: the corresonding digest (of theoretical infinite length -> */ /* bitrate r = 1024). */ /* */ /* Note: The code below shows how to proceed in principle. It hashes the */ /* same file 960 times (with of course the same result). For a more realistic*/ /* scenario it would be necessary to have something like an input file */ /* containing the filenames to be hashed. Another point would be to make sure*/ /* that the GTX 295 with only ~2x900 MB main memory can handle all the files.*/ /* An approach would be to use streams. Another one to come up with kind of */ /* a scheduling algorithm for hashing what files first if the sizes of the */ /* files show a high variation. */ /* */ /* The kernels below have been executed on a grid with 120 blocks, where */ /* each block consists of 256 threads. The occupancy was reported as 0.5 by */ /* the CUDA profiler. The kernels have been based straight on the kernels of */ /* the basic implementation. */ /* */ /* Note that the GTX 295 has two built in GPUs. For this implementation only */ /* one of them has been used. Using both GPUs it would be necessary to use */ /* either two different CPU processes or CPU threads. Because the GPU scales */ /* almost perfectly, we considered it as not essential to use both cards. */ /* */ /* Some values reported by the CUDA profiler (hashing the same file 960x). */ /* */ /* File size [bytes] | Digest length [bits] | Time [nanoseconds] */ /* ------------------+----------------------+---------------------- */ /* 3510 | 100 | 11244544 */ /* 13227 | 100 | 41634434 */ /* 42653 | 100 | 133587688 */ /* 110455 | 100 | 345085750 */ /**/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/stat.h> #include <inttypes.h> #include <errno.h> #include <cutil_inline.h> #include <cuda_keccak_batch.cuh> #define PRINT_GPU_RESULT \ printf("\nOutput of GPU:\n"); \ for(int i=0;i<200;++i) { \ printf("%02X ", h_out[j][i]); \ } printf("\n\n\n"); #define BLOCK_SIZE 256 /* threads per block */ #define BLOCKS_PER_SM 4 /* threads per streaming multiprocessor */ #define WARPS_PER_SM ((BLOCK_SIZE/32)*BLOCKS_PER_SM) #define SM 30 #define FILES (WARPS_PER_SM*SM) #define MAX_FILE_SIZE 1000000000 #define BITRATE 1024 #define ROUNDS 24 #define OFFSET 63 #define R64(a,b,c) (((a) << b) ^ ((a) >> c)) /* works on the GPU also for b = 64 or c = 64 */ static uint8_t **h_data; static uint8_t **h_out; static uint64_t **d_data; static uint64_t **d_out; static uint64_t *h_dblen; static uint64_t *d_dblen; static uint64_t *d_data2[FILES]; static uint64_t *d_out2[FILES]; static const uint64_t round_const[5][ROUNDS] = { {0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808AULL, 0x8000000080008000ULL, 0x000000000000808BULL, 0x0000000080000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008AULL, 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000AULL, 0x000000008000808BULL, 0x800000000000008BULL, 0x8000000000008089ULL, 0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL, 0x000000000000800AULL, 0x800000008000000AULL, 0x8000000080008081ULL, 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL}, {0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL}, {0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL}, {0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL}, {0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL}}; /* Rho-Offsets. Note that for each entry pair their respective sum is 64. Only the first entry of each pair is a rho-offset. The second part is used in the R64 macros. */ static const int rho_offsets[25][2] = { /*y=0*/ /*y=1*/ /*y=2*/ /*y=3*/ /*y=4*/ /*x=0*/{ 0,64}, /*x=1*/{44,20}, /*x=2*/{43,21}, /*x=3*/{21,43}, /*x=4*/{14,50}, /*x=1*/{ 1,63}, /*x=2*/{ 6,58}, /*x=3*/{25,39}, /*x=4*/{ 8,56}, /*x=0*/{18,46}, /*x=2*/{62, 2}, /*x=3*/{55, 9}, /*x=4*/{39,25}, /*x=0*/{41,23}, /*x=1*/{ 2,62}, /*x=3*/{28,36}, /*x=4*/{20,44}, /*x=0*/{ 3,61}, /*x=1*/{45,19}, /*x=2*/{61, 3}, /*x=4*/{27,37}, /*x=0*/{36,28}, /*x=1*/{10,54}, /*x=2*/{15,49}, /*x=3*/{56, 8}}; static const int a_host[25] = { 0, 6, 12, 18, 24, 1, 7, 13, 19, 20, 2, 8, 14, 15, 21, 3, 9, 10, 16, 22, 4, 5, 11, 17, 23}; static const int b_host[25] = { 0, 1, 2, 3, 4, 1, 2, 3, 4, 0, 2, 3, 4, 0, 1, 3, 4, 0, 1, 2, 4, 0, 1, 2, 3}; static const int c_host[25][3] = { { 0, 1, 2}, { 1, 2, 3}, { 2, 3, 4}, { 3, 4, 0}, { 4, 0, 1}, { 5, 6, 7}, { 6, 7, 8}, { 7, 8, 9}, { 8, 9, 5}, { 9, 5, 6}, {10,11,12}, {11,12,13}, {12,13,14}, {13,14,10}, {14,10,11}, {15,16,17}, {16,17,18}, {17,18,19}, {18,19,15}, {19,15,16}, {20,21,22}, {21,22,23}, {22,23,24}, {23,24,20}, {24,20,21}}; static const int d_host[25] = { 0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 20, 21, 22, 23, 24, 5, 6, 7, 8, 9, 15, 16, 17, 18, 19}; __device__ __constant__ uint32_t a[25]; __device__ __constant__ uint32_t b[25]; __device__ __constant__ uint32_t c[25][3]; __device__ __constant__ uint32_t d[25]; __device__ __constant__ uint32_t ro[25][2]; __device__ __constant__ uint64_t rc[5][ROUNDS]; __global__ void keccac_squeeze_kernel(uint64_t **data) {/* In case a digest of length */ /* greater than 1024 bits is needed, call */ int const tid = threadIdx.x; /* this kernel multiple times. Another way */ int const tw = tid/32; /* would be to have a loop here and squeeze*/ int const t = tid%32; /* more than once. */ int const s = t%5; int const gw = (tid + blockIdx.x*blockDim.x)/32; __shared__ uint64_t A_[8][25]; __shared__ uint64_t C_[8][25]; __shared__ uint64_t D_[8][25]; if(t < 25) { /*each thread sets a pointer to its corresponding leaf (=warp) memory*/ uint64_t *__restrict__ A = &A_[tw][0]; uint64_t *__restrict__ C = &C_[tw][0]; uint64_t *__restrict__ D = &D_[tw][0]; A[t] = data[gw][t]; for(int i=0;i<ROUNDS;++i) { /* Keccak-f */ C[t] = A[s]^A[s+5]^A[s+10]^A[s+15]^A[s+20]; D[t] = C[b[20+s]] ^ R64(C[b[5+s]],1,63); C[t] = R64(A[a[t]]^D[b[t]], ro[t][0], ro[t][1]); A[d[t]] = C[c[t][0]] ^ ((~C[c[t][1]]) & C[c[t][2]]); A[t] ^= rc[(t==0) ? 0 : 1][i]; } data[gw][t] = A[t]; } } /* The batch kernel is executed in blocks consisting of 256 threads. The */ /* basic implementation of Keccak uses only one warp of 32 threads. Therefore*/ /* the batch kernel executes 8 such warps in parallel. */ __global__ void keccac_kernel(uint64_t **d_data, uint64_t **out, uint64_t *dblen) { int const tid = threadIdx.x; int const tw = tid/32; /* warp of the thread local to the block */ int const t = tid%32; /* thread number local to the warp */ int const s = t%5; int const gw = (tid + blockIdx.x*blockDim.x)/32; /* global warp number */ __shared__ uint64_t A_[8][25]; /* 8 warps per block are executing Keccak*/ __shared__ uint64_t B_[8][25]; /* in parallel. */ __shared__ uint64_t C_[8][25]; __shared__ uint64_t D_[8][25]; if(t < 25) {/* only the lower 25 threads per warp are active. each thread*/ /* sets a pointer to its corresponding warp memory. This way,*/ /* no synchronization between the threads of the block is */ /* needed. Threads in a warp are always synchronized. */ uint64_t *__restrict__ A = &A_[tw][0], *__restrict__ B = &B_[tw][0]; uint64_t *__restrict__ C = &C_[tw][0], *__restrict__ D = &D_[tw][0]; uint64_t *__restrict__ data = d_data[gw]; uint64_t databitlen = dblen[gw]; A[t] = 0ULL; B[t] = 0ULL; if(t < 16) B[t] = data[t]; int const blocks = databitlen/BITRATE; for(int block=0;block<blocks;++block) {/* load data without crossing */ /* a 128-byte boundary. */ A[t] ^= B[t]; data += BITRATE/64; if(t < 16) B[t] = data[t]; /* prefetch data */ for(int i=0;i<ROUNDS;++i) { /* Keccak-f */ C[t] = A[s]^A[s+5]^A[s+10]^A[s+15]^A[s+20]; D[t] = C[b[20+s]] ^ R64(C[b[5+s]],1,63); C[t] = R64(A[a[t]]^D[b[t]], ro[t][0], ro[t][1]); A[d[t]] = C[c[t][0]] ^ ((~C[c[t][1]]) & C[c[t][2]]); A[t] ^= rc[(t==0) ? 0 : 1][i]; } databitlen -= BITRATE; } int const bytes = databitlen/8; if(t == 0) { /* pad the end of the data */ uint8_t *p = (uint8_t *)B+bytes; uint8_t const q = *p; *p++ = (q >> (8-(databitlen&7)) | (1 << (databitlen&7))); *p++ = 0x00; *p++ = BITRATE/8; *p++ = 0x01; while(p < (uint8_t *)&B[25]) *p++ = 0; } if(t < 16) A[t] ^= B[t]; /* load 128 byte of data */ for(int i=0;i<ROUNDS;++i) { /* Keccak-f */ C[t] = A[s]^A[s+5]^A[s+10]^A[s+15]^A[s+20]; D[t] = C[b[20+s]] ^ R64(C[b[5+s]],1,63); C[t] = R64(A[a[t]]^D[b[t]], ro[t][0], ro[t][1]); A[d[t]] = C[c[t][0]] ^ ((~C[c[t][1]]) & C[c[t][2]]); A[t] ^= rc[(t==0) ? 0 : 1][i]; } if((bytes+4) > BITRATE/8) {/*then thread t=0 has crossed the 128 byte*/ if(t < 16) B[t] = 0ULL;/* boundary and touched some higher parts */ if(t < 9) B[t] = B[t+16]; /* of B. */ if(t < 16) A[t] ^= B[t]; for(int i=0;i<ROUNDS;++i) { /* Keccak-f */ C[t] = A[s]^A[s+5]^A[s+10]^A[s+15]^A[s+20]; D[t] = C[b[20+s]] ^ R64(C[b[5+s]],1,63); C[t] = R64(A[a[t]]^D[b[t]], ro[t][0], ro[t][1]); A[d[t]] = C[c[t][0]] ^ ((~C[c[t][1]]) & C[c[t][2]]); A[t] ^= rc[(t==0) ? 0 : 1][i]; } } out[gw][t] = A[t]; /* write the result */ } } /**/ /**/ /**/ void call_keccak_batch_kernel(char const *filename, int digestlength) { struct stat buf; size_t size; if(stat(filename, &buf) < 0) { fprintf(stderr, "stat %s failed: %s\n", filename, strerror(errno)); return; } if(buf.st_size == 0 || buf.st_size > MAX_FILE_SIZE/FILES) { fprintf(stderr, "%s wrong sized %d\n", filename, (int)buf.st_size); return; } /* align the data on BITRATE/8 bytes */ size = ((buf.st_size-1)/(BITRATE/8) + 1)*(BITRATE/8); h_data = (uint8_t **)malloc(FILES*sizeof(*h_data)); h_out = (uint8_t **)malloc(FILES*sizeof(*h_out)); h_dblen = (uint64_t *)malloc(FILES*sizeof(*h_dblen)); CUDA_SAFE_CALL(cudaMalloc((void **)&d_dblen, FILES*sizeof(*d_dblen))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_data, FILES*sizeof(*d_data))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_out, FILES*sizeof(*d_out))); for(int i=0;i<FILES;++i) { /* allocate memory for each file */ h_data[i] = (uint8_t *)malloc(size); /* and for each output buffer */ h_out[i] = (uint8_t *)malloc(200); CUDA_SAFE_CALL(cudaMalloc((void **)&d_data2[i], size)); CUDA_SAFE_CALL(cudaMalloc((void **)&d_out2[i], 200)); } CUDA_SAFE_CALL(cudaMemcpy(d_data, d_data2 /* copy the device pointers */ , FILES*sizeof(d_data2[0]), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_out, d_out2 , FILES*sizeof(d_out2[0]), cudaMemcpyHostToDevice)); FILE *in = fopen(filename, "r"); if(in == NULL) { fprintf(stderr, "open %s failed: %s\n", filename, strerror(errno)); return; } memset(&h_data[0][0], 0x00, size); /* read the file(s) */ if(fread(&h_data[0][0], 1, (size_t)buf.st_size, in) < buf.st_size) { fprintf(stderr, "read %s failed: %s\n", filename, strerror(errno)); return; } for(int i=1;i<FILES;++i) { /* copy the file content (only for this test) */ memcpy(h_data[i], h_data[0], size); } fclose(in); for(int j=0;j<FILES;++j) { int count = 0; for(int i=0;i<8;++i) { if((h_data[j][buf.st_size-1] >> i) & 1) { /* compute bit count */ count = 8 - i; break; } } h_dblen[j] = (buf.st_size-1)*8 + count; } CUDA_SAFE_CALL(cudaMemcpy(d_dblen, h_dblen, FILES*sizeof(*h_dblen) , cudaMemcpyHostToDevice)); /* copy the Keccak tables from host to GPU */ CUDA_SAFE_CALL(cudaMemcpyToSymbol(a, a_host, sizeof(a_host))); CUDA_SAFE_CALL(cudaMemcpyToSymbol(b, b_host, sizeof(b_host))); CUDA_SAFE_CALL(cudaMemcpyToSymbol(c, c_host, sizeof(c_host))); CUDA_SAFE_CALL(cudaMemcpyToSymbol(d, d_host, sizeof(d_host))); CUDA_SAFE_CALL(cudaMemcpyToSymbol(ro, rho_offsets, sizeof(rho_offsets))); CUDA_SAFE_CALL(cudaMemcpyToSymbol(rc, round_const, sizeof(round_const))); for(int i=0;i<FILES;++i) { /* copy the file contents to the GPU */ CUDA_SAFE_CALL(cudaMemcpy(d_data2[i], h_data[i], size , cudaMemcpyHostToDevice)); } /* call the GPU */ keccac_kernel<<<BLOCKS_PER_SM*SM,BLOCK_SIZE>>>/*BLOCKS_PER_SM*SM==FILES/8*/ (d_data, d_out, d_dblen); for(int j=0;j<2/*FILES*/;++j) { /* fetch only two of the hashed files to */ memset(h_out[j], 0x00, 200); /* check for correctness */ CUDA_SAFE_CALL(cudaMemcpy(h_out[j], d_out2[j], 200 , cudaMemcpyDeviceToHost)); printf("FILE %03d:", j); PRINT_GPU_RESULT; } for(int j=0;j<digestlength/BITRATE;++j) { /* GPU: call the squeeze phase */ keccac_squeeze_kernel<<<BLOCKS_PER_SM*SM, BLOCK_SIZE>>>(d_out); CUDA_SAFE_CALL(cudaMemcpy(h_out, d_out, 200, cudaMemcpyDeviceToHost)); PRINT_GPU_RESULT; } for(int i=0;i<FILES;++i) { /* release memory */ CUDA_SAFE_CALL(cudaFree(d_data2[i])); CUDA_SAFE_CALL(cudaFree(d_out2[i])); free(h_out[i]); free(h_data[i]); } CUDA_SAFE_CALL(cudaFree(d_data)); CUDA_SAFE_CALL(cudaFree(d_out)); CUDA_SAFE_CALL(cudaFree(d_dblen)); free(h_dblen); free(h_data); free(h_out); } /********************************* end-of-file *******************************/
b67201e79006c371a7cc262c1ce95d667aa527c2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <fstream> #include <vector> #include <string> #include <math.h> #include "KR.h" #include <assert.h> using namespace std; KR::KR(float* KerTrain, float* KerTrainTest, float* Y_train, float _lambda, int training_num_, int testing_num_, int gpu_id_) { assert(KerTrain != NULL); assert(KerTrainTest != NULL); assert(Y_train != NULL); training_num = training_num_; testing_num = testing_num_; KerTr = (float*)malloc(sizeof(float) * training_num * training_num); assert(KerTr != NULL); for(int i_tr = 0; i_tr < training_num * training_num; i_tr++) { *(KerTr + i_tr) = *(KerTrain + i_tr); } KerTrTe = (float*)malloc(sizeof(float) * training_num * testing_num); assert(KerTrTe != NULL); for(int i_te = 0; i_te < training_num * testing_num; i_te++) { *(KerTrTe + i_te) = *(KerTrainTest + i_te); } Y_tr = (float*)malloc(sizeof(float) * training_num); assert(Y_tr != NULL); for(int i_tr = 0; i_tr < training_num; i_tr++) { *(Y_tr + i_tr) = *(Y_train + i_tr); } lambda = _lambda; gpu_id = gpu_id_; Y_Pre = (float*)malloc(sizeof(float) * testing_num); assert(Y_Pre != NULL); bias = (float*)malloc(sizeof(float)); assert(bias != NULL); W = (float*)malloc(sizeof(float) * training_num); assert(W != NULL); } KR::KR(float* KerTrain, float* Y_train, float _lambda, int training_num_, int gpu_id_) { assert(KerTrain != NULL); assert(Y_train != NULL); lambda = _lambda; training_num = training_num_; KerTr = (float*)malloc(sizeof(float) * training_num * training_num); assert(KerTr != NULL); for(int i_tr = 0; i_tr < training_num * training_num; i_tr++) { *(KerTr + i_tr) = *(KerTrain + i_tr); } Y_tr = (float*)malloc(sizeof(float) * training_num); assert(Y_tr); for(int i_tr = 0; i_tr < training_num; i_tr++) { *(Y_tr + i_tr) = *(Y_train + i_tr); } gpu_id = gpu_id_; bias = (float*)malloc(sizeof(float)); assert(bias != NULL); W = (float*)malloc(sizeof(float) * training_num); assert(W != NULL); } KR::~KR() { free(KerTr); if(KerTrTe != NULL) { free(KerTrTe); } free(Y_tr); free(Y_Pre); free(W); } void KR::checkStatus(culaStatus status) { char buf[256]; if(!status) { return; } culaGetErrorInfoString(status, culaGetErrorInfo(), buf, sizeof(buf)); cout << "CULA Exception: " << buf << endl; culaShutdown(); exit(EXIT_FAILURE); } void KR::print_matrix(const float* A, int nr_rows_A, int nr_cols_A) { for(int i = 0; i < 5; i++) { for(int j = 0; j < 5; j++) { cout << A[j * nr_rows_A + i] << " "; } cout << endl; } /* for(int i = 0; i < nr_rows_A * nr_cols_A; i++) cout << A[i] << " "; cout << endl; */ } void KR::compute() { culaSelectDevice(gpu_id); culaStatus status; status = culaInitialize(); checkStatus(status); float* h_I = NULL; float* h_ones = NULL; h_I = (float*)malloc(sizeof(float) * training_num * training_num); assert(h_I != NULL); h_ones = (float*)malloc(sizeof(float) * training_num); assert(h_ones != NULL); size_t j = 0; for(size_t i = 0; i < training_num * training_num; i++) { if(i % training_num == j) { h_I[i] = 1; } else { h_I[i] = 0; } if((i + 1) % training_num == 0) { j++; } } for(j = 0; j < training_num; j++) { h_ones[j] = 1; } float* d_KerTr = NULL; int* ipiv = NULL; float* d_I = NULL; float* d_H = NULL; float* d_ones = NULL; float* d_HKernel = NULL; float* d_YTrain = NULL; float* d_Yinvk = NULL; float* d_Yinvk_K = NULL; float* d_bias; float* d_YTrinvK = NULL; float* d_YPre = NULL; float* d_KerTrTe = NULL; hipMalloc(&d_KerTr, sizeof(float)*training_num * training_num); hipMalloc(&d_YTrain, sizeof(float)*training_num); hipMalloc(&d_I, sizeof(float)*training_num * training_num); hipMalloc(&d_H, sizeof(float)*training_num * training_num); hipMalloc(&d_HKernel, sizeof(float)* training_num * training_num); hipMalloc(&d_ones, sizeof(float)* training_num); hipMalloc(&ipiv, sizeof(int)*training_num); hipMalloc(&d_Yinvk, sizeof(float)*training_num); hipMalloc(&d_Yinvk_K, sizeof(float)*training_num); hipMalloc(&d_KerTrTe, sizeof(float)*training_num * testing_num); hipMalloc(&d_bias, sizeof(float)); hipMalloc(&d_YPre, sizeof(float)*testing_num); hipMalloc(&d_YTrinvK, sizeof(float)*training_num); hipMemcpy(d_I, h_I, sizeof(float)* training_num * training_num, hipMemcpyHostToDevice); hipMemcpy(d_H, h_I, sizeof(float)* training_num * training_num, hipMemcpyHostToDevice); hipMemcpy(d_ones, h_ones, sizeof(float)* training_num, hipMemcpyHostToDevice); hipMemcpy(d_YTrain, Y_tr, sizeof(float)*training_num, hipMemcpyHostToDevice); hipMemcpy(d_KerTr, KerTr, sizeof(float)*training_num * training_num, hipMemcpyHostToDevice); hipMemcpy(d_KerTrTe, KerTrTe, sizeof(float)*training_num * testing_num, hipMemcpyHostToDevice); status = culaDeviceSgemm('N', 'N', training_num, training_num, 1, -1.0 / training_num, d_ones, training_num, d_ones, 1, 1.0, d_H, training_num); checkStatus(status); status = culaDeviceSgemm('N', 'N', training_num, training_num, training_num, 1, d_H, training_num, d_KerTr, training_num, 0.0, d_HKernel, training_num); checkStatus(status); status = culaDeviceSgemm('N', 'N', training_num, training_num, training_num, 1, d_HKernel, training_num, d_H, training_num, lambda, d_I, training_num); checkStatus(status); // invK: invI*H status = culaDeviceSgesv(training_num, training_num, d_I, training_num, ipiv, d_H, training_num); checkStatus(status); // d_H is invK // // bias status = culaDeviceSgemm('T', 'N', 1, training_num, training_num, 1, d_YTrain, training_num, d_H, training_num, 0, d_Yinvk, 1); checkStatus(status); status = culaDeviceSgemm('N', 'T', 1, training_num, training_num, 1, d_Yinvk, 1, d_KerTr, training_num, 0, d_Yinvk_K, 1); checkStatus(status); status = culaDeviceSgemm('N', 'N', 1, 1, training_num, 1, d_Yinvk_K, 1, d_ones, training_num, 0, d_bias, 1); checkStatus(status); status = culaDeviceSgemm('N', 'N', 1, 1, training_num, 1.0 / training_num, d_YTrain, 1, d_ones, training_num, -1.0 / training_num, d_bias, 1); checkStatus(status); // bias end hipMemcpy(W, d_Yinvk, sizeof(float)*training_num, hipMemcpyDeviceToHost); status = culaDeviceSgemm('N', 'T', 1, testing_num, training_num, 1, d_Yinvk, 1, d_KerTrTe, testing_num, 0, d_YPre, 1); checkStatus(status); // status = culaDeviceSgemm('N', 'N', 1, testing_num, training_num, 1, d_YTrinvK, 1, d_KerTrTe, training_num, 0, d_YPre, 1); // checkStatus(status); hipMemcpy(Y_Pre, d_YPre, sizeof(float)*testing_num, hipMemcpyDeviceToHost); hipMemcpy(bias, d_bias, sizeof(float), hipMemcpyDeviceToHost); for(int i = 0; i < testing_num; i++) { *(Y_Pre + i) = *(Y_Pre + i) + *bias; } culaShutdown(); free(h_I); free(h_ones); hipFree(d_KerTr); hipFree(d_YTrain); hipFree(d_H); hipFree(d_I); hipFree(d_ones); hipFree(d_KerTr); hipFree(d_KerTrTe); } void KR::computeW() { culaSelectDevice(gpu_id); culaStatus status; status = culaInitialize(); checkStatus(status); float* h_I = NULL; float* h_ones = NULL; h_I = (float*)malloc(sizeof(float) * training_num * training_num); assert(h_I != NULL); h_ones = (float*)malloc(sizeof(float) * training_num); assert(h_ones != NULL); size_t j = 0; for(size_t i = 0; i < training_num * training_num; i++) { if(i % training_num == j) { h_I[i] = 1; } else { h_I[i] = 0; } if((i + 1) % training_num == 0) { j++; } } for(j = 0; j < training_num; j++) { h_ones[j] = 1; } float* d_KerTr = NULL; int* ipiv = NULL; float* d_I = NULL; float* d_H = NULL; float* d_ones = NULL; float* d_HKernel = NULL; float* d_YTrain = NULL; float* d_Yinvk = NULL; float* d_Yinvk_K = NULL; float* d_bias; float* d_YTrinvK = NULL; hipMalloc(&d_KerTr, sizeof(float)*training_num * training_num); hipMalloc(&d_YTrain, sizeof(float)*training_num); hipMalloc(&d_I, sizeof(float)*training_num * training_num); hipMalloc(&d_H, sizeof(float)*training_num * training_num); hipMalloc(&d_HKernel, sizeof(float)* training_num * training_num); hipMalloc(&d_ones, sizeof(float)* training_num); hipMalloc(&ipiv, sizeof(int)*training_num); hipMalloc(&d_Yinvk, sizeof(float)*training_num); hipMalloc(&d_Yinvk_K, sizeof(float)*training_num); hipMalloc(&d_bias, sizeof(float)); hipMalloc(&d_YTrinvK, sizeof(float)*training_num); hipMemcpy(d_I, h_I, sizeof(float)* training_num * training_num, hipMemcpyHostToDevice); hipMemcpy(d_H, h_I, sizeof(float)* training_num * training_num, hipMemcpyHostToDevice); hipMemcpy(d_ones, h_ones, sizeof(float)* training_num, hipMemcpyHostToDevice); hipMemcpy(d_YTrain, Y_tr, sizeof(float)*training_num, hipMemcpyHostToDevice); hipMemcpy(d_KerTr, KerTr, sizeof(float)*training_num * training_num, hipMemcpyHostToDevice); hipDeviceSynchronize(); status = culaDeviceSgemm('N', 'N', training_num, training_num, 1, -1.0 / training_num, d_ones, training_num, d_ones, 1, 1.0, d_H, training_num); checkStatus(status); /* float* hhh_H = (float*)malloc(sizeof(float)*training_num*training_num); hipMemcpy(hhh_H, d_H, sizeof(float)*training_num*training_num, hipMemcpyDeviceToHost); print_matrix(hhh_H, training_num, training_num); free(hhh_H); cout << endl; */ status = culaDeviceSgemm('N', 'N', training_num, training_num, training_num, 1, d_H, training_num, d_KerTr, training_num, 0.0, d_HKernel, training_num); checkStatus(status); /* float* hhh_HKernel = (float*)malloc(sizeof(float)*training_num*training_num); hipMemcpy(hhh_HKernel, d_HKernel, sizeof(float)*training_num*training_num, hipMemcpyDeviceToHost); print_matrix(hhh_HKernel, training_num, training_num); free(hhh_HKernel); cout << endl; */ status = culaDeviceSgemm('N', 'N', training_num, training_num, training_num, 1, d_HKernel, training_num, d_H, training_num, lambda, d_I, training_num); checkStatus(status); /* hhh_HKernel = (float*)malloc(sizeof(float)*training_num*training_num); hipMemcpy(hhh_HKernel, d_I, sizeof(float)*training_num*training_num, hipMemcpyDeviceToHost); print_matrix(hhh_HKernel, training_num, training_num); free(hhh_HKernel); cout << endl; */ // invK: invI*H status = culaDeviceSgesv(training_num, training_num, d_I, training_num, ipiv, d_H, training_num); checkStatus(status); // d_H is invK // /* float* hhhh_H = (float*)malloc(sizeof(float)*training_num*training_num); hipMemcpy(hhhh_H, d_H, sizeof(float)*training_num*training_num, hipMemcpyDeviceToHost); hipDeviceSynchronize(); print_matrix(hhhh_H, training_num, training_num); free(hhhh_H); cout << endl; */ // bias status = culaDeviceSgemm('T', 'N', 1, training_num, training_num, 1, d_YTrain, training_num, d_H, training_num, 0, d_Yinvk, 1); checkStatus(status); status = culaDeviceSgemm('N', 'T', 1, training_num, training_num, 1, d_Yinvk, 1, d_KerTr, training_num, 0, d_Yinvk_K, 1); checkStatus(status); status = culaDeviceSgemm('N', 'N', 1, 1, training_num, 1, d_Yinvk_K, 1, d_ones, training_num, 0, d_bias, 1); checkStatus(status); status = culaDeviceSgemm('N', 'N', 1, 1, training_num, 1.0 / training_num, d_YTrain, 1, d_ones, training_num, -1.0 / training_num, d_bias, 1); checkStatus(status); // bias end hipMemcpy(W, d_Yinvk, sizeof(float)*training_num, hipMemcpyDeviceToHost); /* for(int i = 0; i < 5; i++) cout << W[i] << " "; cout << endl; */ hipMemcpy(bias, d_bias, sizeof(float), hipMemcpyDeviceToHost); // cout << *bias << endl; hipDeviceSynchronize(); culaShutdown(); free(h_I); free(h_ones); hipFree(d_KerTr); hipFree(d_YTrain); hipFree(d_H); hipFree(d_I); hipFree(d_ones); hipFree(d_KerTr); } int KR::MeetsMinimumCulaRequirements() { int cudaMinimumVersion = culaGetCudaMinimumVersion(); int cudaRuntimeVersion = culaGetCudaRuntimeVersion(); int cudaDriverVersion = culaGetCudaDriverVersion(); int cublasMinimumVersion = culaGetCublasMinimumVersion(); int cublasRuntimeVersion = culaGetCublasRuntimeVersion(); if(cudaRuntimeVersion < cudaMinimumVersion) { printf("CUDA runtime version is insufficient; " "version %d or greater is required\n", cudaMinimumVersion); return 0; } if(cudaDriverVersion < cudaMinimumVersion) { printf("CUDA driver version is insufficient; " "version %d or greater is required\n", cudaMinimumVersion); return 0; } if(cublasRuntimeVersion < cublasMinimumVersion) { printf("CUBLAS runtime version is insufficient; " "version %d or greater is required\n", cublasMinimumVersion); return 0; } return 1; } float* KR::get_YPre() { return Y_Pre; } float* KR::get_W() { return W; } float* KR::get_bias() { return bias; } class KR;
b67201e79006c371a7cc262c1ce95d667aa527c2.cu
#include <stdio.h> #include <iostream> #include <fstream> #include <vector> #include <string> #include <math.h> #include "KR.h" #include <assert.h> using namespace std; KR::KR(float* KerTrain, float* KerTrainTest, float* Y_train, float _lambda, int training_num_, int testing_num_, int gpu_id_) { assert(KerTrain != NULL); assert(KerTrainTest != NULL); assert(Y_train != NULL); training_num = training_num_; testing_num = testing_num_; KerTr = (float*)malloc(sizeof(float) * training_num * training_num); assert(KerTr != NULL); for(int i_tr = 0; i_tr < training_num * training_num; i_tr++) { *(KerTr + i_tr) = *(KerTrain + i_tr); } KerTrTe = (float*)malloc(sizeof(float) * training_num * testing_num); assert(KerTrTe != NULL); for(int i_te = 0; i_te < training_num * testing_num; i_te++) { *(KerTrTe + i_te) = *(KerTrainTest + i_te); } Y_tr = (float*)malloc(sizeof(float) * training_num); assert(Y_tr != NULL); for(int i_tr = 0; i_tr < training_num; i_tr++) { *(Y_tr + i_tr) = *(Y_train + i_tr); } lambda = _lambda; gpu_id = gpu_id_; Y_Pre = (float*)malloc(sizeof(float) * testing_num); assert(Y_Pre != NULL); bias = (float*)malloc(sizeof(float)); assert(bias != NULL); W = (float*)malloc(sizeof(float) * training_num); assert(W != NULL); } KR::KR(float* KerTrain, float* Y_train, float _lambda, int training_num_, int gpu_id_) { assert(KerTrain != NULL); assert(Y_train != NULL); lambda = _lambda; training_num = training_num_; KerTr = (float*)malloc(sizeof(float) * training_num * training_num); assert(KerTr != NULL); for(int i_tr = 0; i_tr < training_num * training_num; i_tr++) { *(KerTr + i_tr) = *(KerTrain + i_tr); } Y_tr = (float*)malloc(sizeof(float) * training_num); assert(Y_tr); for(int i_tr = 0; i_tr < training_num; i_tr++) { *(Y_tr + i_tr) = *(Y_train + i_tr); } gpu_id = gpu_id_; bias = (float*)malloc(sizeof(float)); assert(bias != NULL); W = (float*)malloc(sizeof(float) * training_num); assert(W != NULL); } KR::~KR() { free(KerTr); if(KerTrTe != NULL) { free(KerTrTe); } free(Y_tr); free(Y_Pre); free(W); } void KR::checkStatus(culaStatus status) { char buf[256]; if(!status) { return; } culaGetErrorInfoString(status, culaGetErrorInfo(), buf, sizeof(buf)); cout << "CULA Exception: " << buf << endl; culaShutdown(); exit(EXIT_FAILURE); } void KR::print_matrix(const float* A, int nr_rows_A, int nr_cols_A) { for(int i = 0; i < 5; i++) { for(int j = 0; j < 5; j++) { cout << A[j * nr_rows_A + i] << " "; } cout << endl; } /* for(int i = 0; i < nr_rows_A * nr_cols_A; i++) cout << A[i] << " "; cout << endl; */ } void KR::compute() { culaSelectDevice(gpu_id); culaStatus status; status = culaInitialize(); checkStatus(status); float* h_I = NULL; float* h_ones = NULL; h_I = (float*)malloc(sizeof(float) * training_num * training_num); assert(h_I != NULL); h_ones = (float*)malloc(sizeof(float) * training_num); assert(h_ones != NULL); size_t j = 0; for(size_t i = 0; i < training_num * training_num; i++) { if(i % training_num == j) { h_I[i] = 1; } else { h_I[i] = 0; } if((i + 1) % training_num == 0) { j++; } } for(j = 0; j < training_num; j++) { h_ones[j] = 1; } float* d_KerTr = NULL; int* ipiv = NULL; float* d_I = NULL; float* d_H = NULL; float* d_ones = NULL; float* d_HKernel = NULL; float* d_YTrain = NULL; float* d_Yinvk = NULL; float* d_Yinvk_K = NULL; float* d_bias; float* d_YTrinvK = NULL; float* d_YPre = NULL; float* d_KerTrTe = NULL; cudaMalloc(&d_KerTr, sizeof(float)*training_num * training_num); cudaMalloc(&d_YTrain, sizeof(float)*training_num); cudaMalloc(&d_I, sizeof(float)*training_num * training_num); cudaMalloc(&d_H, sizeof(float)*training_num * training_num); cudaMalloc(&d_HKernel, sizeof(float)* training_num * training_num); cudaMalloc(&d_ones, sizeof(float)* training_num); cudaMalloc(&ipiv, sizeof(int)*training_num); cudaMalloc(&d_Yinvk, sizeof(float)*training_num); cudaMalloc(&d_Yinvk_K, sizeof(float)*training_num); cudaMalloc(&d_KerTrTe, sizeof(float)*training_num * testing_num); cudaMalloc(&d_bias, sizeof(float)); cudaMalloc(&d_YPre, sizeof(float)*testing_num); cudaMalloc(&d_YTrinvK, sizeof(float)*training_num); cudaMemcpy(d_I, h_I, sizeof(float)* training_num * training_num, cudaMemcpyHostToDevice); cudaMemcpy(d_H, h_I, sizeof(float)* training_num * training_num, cudaMemcpyHostToDevice); cudaMemcpy(d_ones, h_ones, sizeof(float)* training_num, cudaMemcpyHostToDevice); cudaMemcpy(d_YTrain, Y_tr, sizeof(float)*training_num, cudaMemcpyHostToDevice); cudaMemcpy(d_KerTr, KerTr, sizeof(float)*training_num * training_num, cudaMemcpyHostToDevice); cudaMemcpy(d_KerTrTe, KerTrTe, sizeof(float)*training_num * testing_num, cudaMemcpyHostToDevice); status = culaDeviceSgemm('N', 'N', training_num, training_num, 1, -1.0 / training_num, d_ones, training_num, d_ones, 1, 1.0, d_H, training_num); checkStatus(status); status = culaDeviceSgemm('N', 'N', training_num, training_num, training_num, 1, d_H, training_num, d_KerTr, training_num, 0.0, d_HKernel, training_num); checkStatus(status); status = culaDeviceSgemm('N', 'N', training_num, training_num, training_num, 1, d_HKernel, training_num, d_H, training_num, lambda, d_I, training_num); checkStatus(status); // invK: invI*H status = culaDeviceSgesv(training_num, training_num, d_I, training_num, ipiv, d_H, training_num); checkStatus(status); // d_H is invK // // bias status = culaDeviceSgemm('T', 'N', 1, training_num, training_num, 1, d_YTrain, training_num, d_H, training_num, 0, d_Yinvk, 1); checkStatus(status); status = culaDeviceSgemm('N', 'T', 1, training_num, training_num, 1, d_Yinvk, 1, d_KerTr, training_num, 0, d_Yinvk_K, 1); checkStatus(status); status = culaDeviceSgemm('N', 'N', 1, 1, training_num, 1, d_Yinvk_K, 1, d_ones, training_num, 0, d_bias, 1); checkStatus(status); status = culaDeviceSgemm('N', 'N', 1, 1, training_num, 1.0 / training_num, d_YTrain, 1, d_ones, training_num, -1.0 / training_num, d_bias, 1); checkStatus(status); // bias end cudaMemcpy(W, d_Yinvk, sizeof(float)*training_num, cudaMemcpyDeviceToHost); status = culaDeviceSgemm('N', 'T', 1, testing_num, training_num, 1, d_Yinvk, 1, d_KerTrTe, testing_num, 0, d_YPre, 1); checkStatus(status); // status = culaDeviceSgemm('N', 'N', 1, testing_num, training_num, 1, d_YTrinvK, 1, d_KerTrTe, training_num, 0, d_YPre, 1); // checkStatus(status); cudaMemcpy(Y_Pre, d_YPre, sizeof(float)*testing_num, cudaMemcpyDeviceToHost); cudaMemcpy(bias, d_bias, sizeof(float), cudaMemcpyDeviceToHost); for(int i = 0; i < testing_num; i++) { *(Y_Pre + i) = *(Y_Pre + i) + *bias; } culaShutdown(); free(h_I); free(h_ones); cudaFree(d_KerTr); cudaFree(d_YTrain); cudaFree(d_H); cudaFree(d_I); cudaFree(d_ones); cudaFree(d_KerTr); cudaFree(d_KerTrTe); } void KR::computeW() { culaSelectDevice(gpu_id); culaStatus status; status = culaInitialize(); checkStatus(status); float* h_I = NULL; float* h_ones = NULL; h_I = (float*)malloc(sizeof(float) * training_num * training_num); assert(h_I != NULL); h_ones = (float*)malloc(sizeof(float) * training_num); assert(h_ones != NULL); size_t j = 0; for(size_t i = 0; i < training_num * training_num; i++) { if(i % training_num == j) { h_I[i] = 1; } else { h_I[i] = 0; } if((i + 1) % training_num == 0) { j++; } } for(j = 0; j < training_num; j++) { h_ones[j] = 1; } float* d_KerTr = NULL; int* ipiv = NULL; float* d_I = NULL; float* d_H = NULL; float* d_ones = NULL; float* d_HKernel = NULL; float* d_YTrain = NULL; float* d_Yinvk = NULL; float* d_Yinvk_K = NULL; float* d_bias; float* d_YTrinvK = NULL; cudaMalloc(&d_KerTr, sizeof(float)*training_num * training_num); cudaMalloc(&d_YTrain, sizeof(float)*training_num); cudaMalloc(&d_I, sizeof(float)*training_num * training_num); cudaMalloc(&d_H, sizeof(float)*training_num * training_num); cudaMalloc(&d_HKernel, sizeof(float)* training_num * training_num); cudaMalloc(&d_ones, sizeof(float)* training_num); cudaMalloc(&ipiv, sizeof(int)*training_num); cudaMalloc(&d_Yinvk, sizeof(float)*training_num); cudaMalloc(&d_Yinvk_K, sizeof(float)*training_num); cudaMalloc(&d_bias, sizeof(float)); cudaMalloc(&d_YTrinvK, sizeof(float)*training_num); cudaMemcpy(d_I, h_I, sizeof(float)* training_num * training_num, cudaMemcpyHostToDevice); cudaMemcpy(d_H, h_I, sizeof(float)* training_num * training_num, cudaMemcpyHostToDevice); cudaMemcpy(d_ones, h_ones, sizeof(float)* training_num, cudaMemcpyHostToDevice); cudaMemcpy(d_YTrain, Y_tr, sizeof(float)*training_num, cudaMemcpyHostToDevice); cudaMemcpy(d_KerTr, KerTr, sizeof(float)*training_num * training_num, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); status = culaDeviceSgemm('N', 'N', training_num, training_num, 1, -1.0 / training_num, d_ones, training_num, d_ones, 1, 1.0, d_H, training_num); checkStatus(status); /* float* hhh_H = (float*)malloc(sizeof(float)*training_num*training_num); cudaMemcpy(hhh_H, d_H, sizeof(float)*training_num*training_num, cudaMemcpyDeviceToHost); print_matrix(hhh_H, training_num, training_num); free(hhh_H); cout << endl; */ status = culaDeviceSgemm('N', 'N', training_num, training_num, training_num, 1, d_H, training_num, d_KerTr, training_num, 0.0, d_HKernel, training_num); checkStatus(status); /* float* hhh_HKernel = (float*)malloc(sizeof(float)*training_num*training_num); cudaMemcpy(hhh_HKernel, d_HKernel, sizeof(float)*training_num*training_num, cudaMemcpyDeviceToHost); print_matrix(hhh_HKernel, training_num, training_num); free(hhh_HKernel); cout << endl; */ status = culaDeviceSgemm('N', 'N', training_num, training_num, training_num, 1, d_HKernel, training_num, d_H, training_num, lambda, d_I, training_num); checkStatus(status); /* hhh_HKernel = (float*)malloc(sizeof(float)*training_num*training_num); cudaMemcpy(hhh_HKernel, d_I, sizeof(float)*training_num*training_num, cudaMemcpyDeviceToHost); print_matrix(hhh_HKernel, training_num, training_num); free(hhh_HKernel); cout << endl; */ // invK: invI*H status = culaDeviceSgesv(training_num, training_num, d_I, training_num, ipiv, d_H, training_num); checkStatus(status); // d_H is invK // /* float* hhhh_H = (float*)malloc(sizeof(float)*training_num*training_num); cudaMemcpy(hhhh_H, d_H, sizeof(float)*training_num*training_num, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); print_matrix(hhhh_H, training_num, training_num); free(hhhh_H); cout << endl; */ // bias status = culaDeviceSgemm('T', 'N', 1, training_num, training_num, 1, d_YTrain, training_num, d_H, training_num, 0, d_Yinvk, 1); checkStatus(status); status = culaDeviceSgemm('N', 'T', 1, training_num, training_num, 1, d_Yinvk, 1, d_KerTr, training_num, 0, d_Yinvk_K, 1); checkStatus(status); status = culaDeviceSgemm('N', 'N', 1, 1, training_num, 1, d_Yinvk_K, 1, d_ones, training_num, 0, d_bias, 1); checkStatus(status); status = culaDeviceSgemm('N', 'N', 1, 1, training_num, 1.0 / training_num, d_YTrain, 1, d_ones, training_num, -1.0 / training_num, d_bias, 1); checkStatus(status); // bias end cudaMemcpy(W, d_Yinvk, sizeof(float)*training_num, cudaMemcpyDeviceToHost); /* for(int i = 0; i < 5; i++) cout << W[i] << " "; cout << endl; */ cudaMemcpy(bias, d_bias, sizeof(float), cudaMemcpyDeviceToHost); // cout << *bias << endl; cudaDeviceSynchronize(); culaShutdown(); free(h_I); free(h_ones); cudaFree(d_KerTr); cudaFree(d_YTrain); cudaFree(d_H); cudaFree(d_I); cudaFree(d_ones); cudaFree(d_KerTr); } int KR::MeetsMinimumCulaRequirements() { int cudaMinimumVersion = culaGetCudaMinimumVersion(); int cudaRuntimeVersion = culaGetCudaRuntimeVersion(); int cudaDriverVersion = culaGetCudaDriverVersion(); int cublasMinimumVersion = culaGetCublasMinimumVersion(); int cublasRuntimeVersion = culaGetCublasRuntimeVersion(); if(cudaRuntimeVersion < cudaMinimumVersion) { printf("CUDA runtime version is insufficient; " "version %d or greater is required\n", cudaMinimumVersion); return 0; } if(cudaDriverVersion < cudaMinimumVersion) { printf("CUDA driver version is insufficient; " "version %d or greater is required\n", cudaMinimumVersion); return 0; } if(cublasRuntimeVersion < cublasMinimumVersion) { printf("CUBLAS runtime version is insufficient; " "version %d or greater is required\n", cublasMinimumVersion); return 0; } return 1; } float* KR::get_YPre() { return Y_Pre; } float* KR::get_W() { return W; } float* KR::get_bias() { return bias; } class KR;
7061e7788597f7fc618280b93add6671c1621d7d.hip
// !!! This is a file automatically generated by hipify!!! // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_NO_COMPLEX #define EIGEN_TEST_FUNC cxx11_tensor_cuda #define EIGEN_USE_GPU #if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500 #include <hip/hip_fp16.h> #endif #include "main.h" #include <unsupported/Eigen/CXX11/Tensor> using Eigen::Tensor; void test_cuda_nullary() { Tensor<float, 1, 0, int> in1(2); Tensor<float, 1, 0, int> in2(2); in1.setRandom(); in2.setRandom(); std::size_t tensor_bytes = in1.size() * sizeof(float); float* d_in1; float* d_in2; hipMalloc((void**)(&d_in1), tensor_bytes); hipMalloc((void**)(&d_in2), tensor_bytes); hipMemcpy(d_in1, in1.data(), tensor_bytes, hipMemcpyHostToDevice); hipMemcpy(d_in2, in2.data(), tensor_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 1, 0, int>, Eigen::Aligned> gpu_in1( d_in1, 2); Eigen::TensorMap<Eigen::Tensor<float, 1, 0, int>, Eigen::Aligned> gpu_in2( d_in2, 2); gpu_in1.device(gpu_device) = gpu_in1.constant(3.14f); gpu_in2.device(gpu_device) = gpu_in2.random(); Tensor<float, 1, 0, int> new1(2); Tensor<float, 1, 0, int> new2(2); assert(hipMemcpyAsync(new1.data(), d_in1, tensor_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipMemcpyAsync(new2.data(), d_in2, tensor_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 2; ++i) { VERIFY_IS_APPROX(new1(i), 3.14f); VERIFY_IS_NOT_EQUAL(new2(i), in2(i)); } hipFree(d_in1); hipFree(d_in2); } void test_cuda_elementwise_small() { Tensor<float, 1> in1(Eigen::array<Eigen::DenseIndex, 1>(2)); Tensor<float, 1> in2(Eigen::array<Eigen::DenseIndex, 1>(2)); Tensor<float, 1> out(Eigen::array<Eigen::DenseIndex, 1>(2)); in1.setRandom(); in2.setRandom(); std::size_t in1_bytes = in1.size() * sizeof(float); std::size_t in2_bytes = in2.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_in1; float* d_in2; float* d_out; hipMalloc((void**)(&d_in1), in1_bytes); hipMalloc((void**)(&d_in2), in2_bytes); hipMalloc((void**)(&d_out), out_bytes); hipMemcpy(d_in1, in1.data(), in1_bytes, hipMemcpyHostToDevice); hipMemcpy(d_in2, in2.data(), in2_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_in1( d_in1, Eigen::array<Eigen::DenseIndex, 1>(2)); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_in2( d_in2, Eigen::array<Eigen::DenseIndex, 1>(2)); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_out( d_out, Eigen::array<Eigen::DenseIndex, 1>(2)); gpu_out.device(gpu_device) = gpu_in1 + gpu_in2; assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 2; ++i) { VERIFY_IS_APPROX( out(Eigen::array<Eigen::DenseIndex, 1>(i)), in1(Eigen::array<Eigen::DenseIndex, 1>(i)) + in2(Eigen::array<Eigen::DenseIndex, 1>(i))); } hipFree(d_in1); hipFree(d_in2); hipFree(d_out); } void test_cuda_elementwise() { Tensor<float, 3> in1(Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Tensor<float, 3> in2(Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Tensor<float, 3> in3(Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Tensor<float, 3> out(Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); in1.setRandom(); in2.setRandom(); in3.setRandom(); std::size_t in1_bytes = in1.size() * sizeof(float); std::size_t in2_bytes = in2.size() * sizeof(float); std::size_t in3_bytes = in3.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_in1; float* d_in2; float* d_in3; float* d_out; hipMalloc((void**)(&d_in1), in1_bytes); hipMalloc((void**)(&d_in2), in2_bytes); hipMalloc((void**)(&d_in3), in3_bytes); hipMalloc((void**)(&d_out), out_bytes); hipMemcpy(d_in1, in1.data(), in1_bytes, hipMemcpyHostToDevice); hipMemcpy(d_in2, in2.data(), in2_bytes, hipMemcpyHostToDevice); hipMemcpy(d_in3, in3.data(), in3_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in1(d_in1, Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in2(d_in2, Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in3(d_in3, Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_out(d_out, Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); gpu_out.device(gpu_device) = gpu_in1 + gpu_in2 * gpu_in3; assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 72; ++i) { for (int j = 0; j < 53; ++j) { for (int k = 0; k < 97; ++k) { VERIFY_IS_APPROX(out(Eigen::array<Eigen::DenseIndex, 3>(i,j,k)), in1(Eigen::array<Eigen::DenseIndex, 3>(i,j,k)) + in2(Eigen::array<Eigen::DenseIndex, 3>(i,j,k)) * in3(Eigen::array<Eigen::DenseIndex, 3>(i,j,k))); } } } hipFree(d_in1); hipFree(d_in2); hipFree(d_in3); hipFree(d_out); } void test_cuda_props() { Tensor<float, 1> in1(200); Tensor<bool, 1> out(200); in1.setRandom(); std::size_t in1_bytes = in1.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(bool); float* d_in1; bool* d_out; hipMalloc((void**)(&d_in1), in1_bytes); hipMalloc((void**)(&d_out), out_bytes); hipMemcpy(d_in1, in1.data(), in1_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_in1( d_in1, 200); Eigen::TensorMap<Eigen::Tensor<bool, 1>, Eigen::Aligned> gpu_out( d_out, 200); gpu_out.device(gpu_device) = (gpu_in1.isnan)(); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 200; ++i) { VERIFY_IS_EQUAL(out(i), (std::isnan)(in1(i))); } hipFree(d_in1); hipFree(d_out); } void test_cuda_reduction() { Tensor<float, 4> in1(72,53,97,113); Tensor<float, 2> out(72,97); in1.setRandom(); std::size_t in1_bytes = in1.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_in1; float* d_out; hipMalloc((void**)(&d_in1), in1_bytes); hipMalloc((void**)(&d_out), out_bytes); hipMemcpy(d_in1, in1.data(), in1_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4> > gpu_in1(d_in1, 72,53,97,113); Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97); array<Eigen::DenseIndex, 2> reduction_axis; reduction_axis[0] = 1; reduction_axis[1] = 3; gpu_out.device(gpu_device) = gpu_in1.maximum(reduction_axis); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 72; ++i) { for (int j = 0; j < 97; ++j) { float expected = 0; for (int k = 0; k < 53; ++k) { for (int l = 0; l < 113; ++l) { expected = std::max<float>(expected, in1(i, k, j, l)); } } VERIFY_IS_APPROX(out(i,j), expected); } } hipFree(d_in1); hipFree(d_out); } template<int DataLayout> void test_cuda_contraction() { // with these dimensions, the output has 300 * 140 elements, which is // more than 30 * 1024, which is the number of threads in blocks on // a 15 SM GK110 GPU Tensor<float, 4, DataLayout> t_left(6, 50, 3, 31); Tensor<float, 5, DataLayout> t_right(Eigen::array<Eigen::DenseIndex, 5>(3, 31, 7, 20, 1)); Tensor<float, 5, DataLayout> t_result(Eigen::array<Eigen::DenseIndex, 5>(6, 50, 7, 20, 1)); t_left.setRandom(); t_right.setRandom(); std::size_t t_left_bytes = t_left.size() * sizeof(float); std::size_t t_right_bytes = t_right.size() * sizeof(float); std::size_t t_result_bytes = t_result.size() * sizeof(float); float* d_t_left; float* d_t_right; float* d_t_result; hipMalloc((void**)(&d_t_left), t_left_bytes); hipMalloc((void**)(&d_t_right), t_right_bytes); hipMalloc((void**)(&d_t_result), t_result_bytes); hipMemcpy(d_t_left, t_left.data(), t_left_bytes, hipMemcpyHostToDevice); hipMemcpy(d_t_right, t_right.data(), t_right_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_t_left(d_t_left, 6, 50, 3, 31); Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_t_right(d_t_right, 3, 31, 7, 20, 1); Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_t_result(d_t_result, 6, 50, 7, 20, 1); typedef Eigen::Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> > MapXf; MapXf m_left(t_left.data(), 300, 93); MapXf m_right(t_right.data(), 93, 140); Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> m_result(300, 140); typedef Tensor<float, 1>::DimensionPair DimPair; Eigen::array<DimPair, 2> dims; dims[0] = DimPair(2, 0); dims[1] = DimPair(3, 1); m_result = m_left * m_right; gpu_t_result.device(gpu_device) = gpu_t_left.contract(gpu_t_right, dims); hipMemcpy(t_result.data(), d_t_result, t_result_bytes, hipMemcpyDeviceToHost); for (DenseIndex i = 0; i < t_result.size(); i++) { if (fabs(t_result.data()[i] - m_result.data()[i]) >= 1e-4f) { std::cout << "mismatch detected at index " << i << ": " << t_result.data()[i] << " vs " << m_result.data()[i] << std::endl; assert(false); } } hipFree(d_t_left); hipFree(d_t_right); hipFree(d_t_result); } template<int DataLayout> void test_cuda_convolution_1d() { Tensor<float, 4, DataLayout> input(74,37,11,137); Tensor<float, 1, DataLayout> kernel(4); Tensor<float, 4, DataLayout> out(74,34,11,137); input = input.constant(10.0f) + input.random(); kernel = kernel.constant(7.0f) + kernel.random(); std::size_t input_bytes = input.size() * sizeof(float); std::size_t kernel_bytes = kernel.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_input; float* d_kernel; float* d_out; hipMalloc((void**)(&d_input), input_bytes); hipMalloc((void**)(&d_kernel), kernel_bytes); hipMalloc((void**)(&d_out), out_bytes); hipMemcpy(d_input, input.data(), input_bytes, hipMemcpyHostToDevice); hipMemcpy(d_kernel, kernel.data(), kernel_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_input(d_input, 74,37,11,137); Eigen::TensorMap<Eigen::Tensor<float, 1, DataLayout> > gpu_kernel(d_kernel, 4); Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_out(d_out, 74,34,11,137); Eigen::array<Eigen::DenseIndex, 1> dims(1); gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 74; ++i) { for (int j = 0; j < 34; ++j) { for (int k = 0; k < 11; ++k) { for (int l = 0; l < 137; ++l) { const float result = out(i,j,k,l); const float expected = input(i,j+0,k,l) * kernel(0) + input(i,j+1,k,l) * kernel(1) + input(i,j+2,k,l) * kernel(2) + input(i,j+3,k,l) * kernel(3); VERIFY_IS_APPROX(result, expected); } } } } hipFree(d_input); hipFree(d_kernel); hipFree(d_out); } void test_cuda_convolution_inner_dim_col_major_1d() { Tensor<float, 4, ColMajor> input(74,9,11,7); Tensor<float, 1, ColMajor> kernel(4); Tensor<float, 4, ColMajor> out(71,9,11,7); input = input.constant(10.0f) + input.random(); kernel = kernel.constant(7.0f) + kernel.random(); std::size_t input_bytes = input.size() * sizeof(float); std::size_t kernel_bytes = kernel.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_input; float* d_kernel; float* d_out; hipMalloc((void**)(&d_input), input_bytes); hipMalloc((void**)(&d_kernel), kernel_bytes); hipMalloc((void**)(&d_out), out_bytes); hipMemcpy(d_input, input.data(), input_bytes, hipMemcpyHostToDevice); hipMemcpy(d_kernel, kernel.data(), kernel_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4, ColMajor> > gpu_input(d_input,74,9,11,7); Eigen::TensorMap<Eigen::Tensor<float, 1, ColMajor> > gpu_kernel(d_kernel,4); Eigen::TensorMap<Eigen::Tensor<float, 4, ColMajor> > gpu_out(d_out,71,9,11,7); Eigen::array<Eigen::DenseIndex, 1> dims(0); gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 71; ++i) { for (int j = 0; j < 9; ++j) { for (int k = 0; k < 11; ++k) { for (int l = 0; l < 7; ++l) { const float result = out(i,j,k,l); const float expected = input(i+0,j,k,l) * kernel(0) + input(i+1,j,k,l) * kernel(1) + input(i+2,j,k,l) * kernel(2) + input(i+3,j,k,l) * kernel(3); VERIFY_IS_APPROX(result, expected); } } } } hipFree(d_input); hipFree(d_kernel); hipFree(d_out); } void test_cuda_convolution_inner_dim_row_major_1d() { Tensor<float, 4, RowMajor> input(7,9,11,74); Tensor<float, 1, RowMajor> kernel(4); Tensor<float, 4, RowMajor> out(7,9,11,71); input = input.constant(10.0f) + input.random(); kernel = kernel.constant(7.0f) + kernel.random(); std::size_t input_bytes = input.size() * sizeof(float); std::size_t kernel_bytes = kernel.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_input; float* d_kernel; float* d_out; hipMalloc((void**)(&d_input), input_bytes); hipMalloc((void**)(&d_kernel), kernel_bytes); hipMalloc((void**)(&d_out), out_bytes); hipMemcpy(d_input, input.data(), input_bytes, hipMemcpyHostToDevice); hipMemcpy(d_kernel, kernel.data(), kernel_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4, RowMajor> > gpu_input(d_input, 7,9,11,74); Eigen::TensorMap<Eigen::Tensor<float, 1, RowMajor> > gpu_kernel(d_kernel, 4); Eigen::TensorMap<Eigen::Tensor<float, 4, RowMajor> > gpu_out(d_out, 7,9,11,71); Eigen::array<Eigen::DenseIndex, 1> dims(3); gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 7; ++i) { for (int j = 0; j < 9; ++j) { for (int k = 0; k < 11; ++k) { for (int l = 0; l < 71; ++l) { const float result = out(i,j,k,l); const float expected = input(i,j,k,l+0) * kernel(0) + input(i,j,k,l+1) * kernel(1) + input(i,j,k,l+2) * kernel(2) + input(i,j,k,l+3) * kernel(3); VERIFY_IS_APPROX(result, expected); } } } } hipFree(d_input); hipFree(d_kernel); hipFree(d_out); } template<int DataLayout> void test_cuda_convolution_2d() { Tensor<float, 4, DataLayout> input(74,37,11,137); Tensor<float, 2, DataLayout> kernel(3,4); Tensor<float, 4, DataLayout> out(74,35,8,137); input = input.constant(10.0f) + input.random(); kernel = kernel.constant(7.0f) + kernel.random(); std::size_t input_bytes = input.size() * sizeof(float); std::size_t kernel_bytes = kernel.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_input; float* d_kernel; float* d_out; hipMalloc((void**)(&d_input), input_bytes); hipMalloc((void**)(&d_kernel), kernel_bytes); hipMalloc((void**)(&d_out), out_bytes); hipMemcpy(d_input, input.data(), input_bytes, hipMemcpyHostToDevice); hipMemcpy(d_kernel, kernel.data(), kernel_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_input(d_input,74,37,11,137); Eigen::TensorMap<Eigen::Tensor<float, 2, DataLayout> > gpu_kernel(d_kernel,3,4); Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_out(d_out,74,35,8,137); Eigen::array<Eigen::DenseIndex, 2> dims(1,2); gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 74; ++i) { for (int j = 0; j < 35; ++j) { for (int k = 0; k < 8; ++k) { for (int l = 0; l < 137; ++l) { const float result = out(i,j,k,l); const float expected = input(i,j+0,k+0,l) * kernel(0,0) + input(i,j+1,k+0,l) * kernel(1,0) + input(i,j+2,k+0,l) * kernel(2,0) + input(i,j+0,k+1,l) * kernel(0,1) + input(i,j+1,k+1,l) * kernel(1,1) + input(i,j+2,k+1,l) * kernel(2,1) + input(i,j+0,k+2,l) * kernel(0,2) + input(i,j+1,k+2,l) * kernel(1,2) + input(i,j+2,k+2,l) * kernel(2,2) + input(i,j+0,k+3,l) * kernel(0,3) + input(i,j+1,k+3,l) * kernel(1,3) + input(i,j+2,k+3,l) * kernel(2,3); VERIFY_IS_APPROX(result, expected); } } } } hipFree(d_input); hipFree(d_kernel); hipFree(d_out); } template<int DataLayout> void test_cuda_convolution_3d() { Tensor<float, 5, DataLayout> input(Eigen::array<Eigen::DenseIndex, 5>(74,37,11,137,17)); Tensor<float, 3, DataLayout> kernel(3,4,2); Tensor<float, 5, DataLayout> out(Eigen::array<Eigen::DenseIndex, 5>(74,35,8,136,17)); input = input.constant(10.0f) + input.random(); kernel = kernel.constant(7.0f) + kernel.random(); std::size_t input_bytes = input.size() * sizeof(float); std::size_t kernel_bytes = kernel.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_input; float* d_kernel; float* d_out; hipMalloc((void**)(&d_input), input_bytes); hipMalloc((void**)(&d_kernel), kernel_bytes); hipMalloc((void**)(&d_out), out_bytes); hipMemcpy(d_input, input.data(), input_bytes, hipMemcpyHostToDevice); hipMemcpy(d_kernel, kernel.data(), kernel_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_input(d_input,74,37,11,137,17); Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> > gpu_kernel(d_kernel,3,4,2); Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_out(d_out,74,35,8,136,17); Eigen::array<Eigen::DenseIndex, 3> dims(1,2,3); gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 74; ++i) { for (int j = 0; j < 35; ++j) { for (int k = 0; k < 8; ++k) { for (int l = 0; l < 136; ++l) { for (int m = 0; m < 17; ++m) { const float result = out(i,j,k,l,m); const float expected = input(i,j+0,k+0,l+0,m) * kernel(0,0,0) + input(i,j+1,k+0,l+0,m) * kernel(1,0,0) + input(i,j+2,k+0,l+0,m) * kernel(2,0,0) + input(i,j+0,k+1,l+0,m) * kernel(0,1,0) + input(i,j+1,k+1,l+0,m) * kernel(1,1,0) + input(i,j+2,k+1,l+0,m) * kernel(2,1,0) + input(i,j+0,k+2,l+0,m) * kernel(0,2,0) + input(i,j+1,k+2,l+0,m) * kernel(1,2,0) + input(i,j+2,k+2,l+0,m) * kernel(2,2,0) + input(i,j+0,k+3,l+0,m) * kernel(0,3,0) + input(i,j+1,k+3,l+0,m) * kernel(1,3,0) + input(i,j+2,k+3,l+0,m) * kernel(2,3,0) + input(i,j+0,k+0,l+1,m) * kernel(0,0,1) + input(i,j+1,k+0,l+1,m) * kernel(1,0,1) + input(i,j+2,k+0,l+1,m) * kernel(2,0,1) + input(i,j+0,k+1,l+1,m) * kernel(0,1,1) + input(i,j+1,k+1,l+1,m) * kernel(1,1,1) + input(i,j+2,k+1,l+1,m) * kernel(2,1,1) + input(i,j+0,k+2,l+1,m) * kernel(0,2,1) + input(i,j+1,k+2,l+1,m) * kernel(1,2,1) + input(i,j+2,k+2,l+1,m) * kernel(2,2,1) + input(i,j+0,k+3,l+1,m) * kernel(0,3,1) + input(i,j+1,k+3,l+1,m) * kernel(1,3,1) + input(i,j+2,k+3,l+1,m) * kernel(2,3,1); VERIFY_IS_APPROX(result, expected); } } } } } hipFree(d_input); hipFree(d_kernel); hipFree(d_out); } template <typename Scalar> void test_cuda_lgamma(const Scalar stddev) { Tensor<Scalar, 2> in(72,97); in.setRandom(); in *= in.constant(stddev); Tensor<Scalar, 2> out(72,97); out.setZero(); std::size_t bytes = in.size() * sizeof(Scalar); Scalar* d_in; Scalar* d_out; hipMalloc((void**)(&d_in), bytes); hipMalloc((void**)(&d_out), bytes); hipMemcpy(d_in, in.data(), bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_in(d_in, 72, 97); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 72, 97); gpu_out.device(gpu_device) = gpu_in.lgamma(); assert(hipMemcpyAsync(out.data(), d_out, bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 72; ++i) { for (int j = 0; j < 97; ++j) { VERIFY_IS_APPROX(out(i,j), (std::lgamma)(in(i,j))); } } hipFree(d_in); hipFree(d_out); } template <typename Scalar> void test_cuda_digamma() { Tensor<Scalar, 1> in(7); Tensor<Scalar, 1> out(7); Tensor<Scalar, 1> expected_out(7); out.setZero(); in(0) = Scalar(1); in(1) = Scalar(1.5); in(2) = Scalar(4); in(3) = Scalar(-10.5); in(4) = Scalar(10000.5); in(5) = Scalar(0); in(6) = Scalar(-1); expected_out(0) = Scalar(-0.5772156649015329); expected_out(1) = Scalar(0.03648997397857645); expected_out(2) = Scalar(1.2561176684318); expected_out(3) = Scalar(2.398239129535781); expected_out(4) = Scalar(9.210340372392849); expected_out(5) = std::numeric_limits<Scalar>::infinity(); expected_out(6) = std::numeric_limits<Scalar>::infinity(); std::size_t bytes = in.size() * sizeof(Scalar); Scalar* d_in; Scalar* d_out; hipMalloc((void**)(&d_in), bytes); hipMalloc((void**)(&d_out), bytes); hipMemcpy(d_in, in.data(), bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in(d_in, 7); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 7); gpu_out.device(gpu_device) = gpu_in.digamma(); assert(hipMemcpyAsync(out.data(), d_out, bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 5; ++i) { VERIFY_IS_APPROX(out(i), expected_out(i)); } for (int i = 5; i < 7; ++i) { VERIFY_IS_EQUAL(out(i), expected_out(i)); } hipFree(d_in); hipFree(d_out); } template <typename Scalar> void test_cuda_zeta() { Tensor<Scalar, 1> in_x(6); Tensor<Scalar, 1> in_q(6); Tensor<Scalar, 1> out(6); Tensor<Scalar, 1> expected_out(6); out.setZero(); in_x(0) = Scalar(1); in_x(1) = Scalar(1.5); in_x(2) = Scalar(4); in_x(3) = Scalar(-10.5); in_x(4) = Scalar(10000.5); in_x(5) = Scalar(3); in_q(0) = Scalar(1.2345); in_q(1) = Scalar(2); in_q(2) = Scalar(1.5); in_q(3) = Scalar(3); in_q(4) = Scalar(1.0001); in_q(5) = Scalar(-2.5); expected_out(0) = std::numeric_limits<Scalar>::infinity(); expected_out(1) = Scalar(1.61237534869); expected_out(2) = Scalar(0.234848505667); expected_out(3) = Scalar(1.03086757337e-5); expected_out(4) = Scalar(0.367879440865); expected_out(5) = Scalar(0.054102025820864097); std::size_t bytes = in_x.size() * sizeof(Scalar); Scalar* d_in_x; Scalar* d_in_q; Scalar* d_out; hipMalloc((void**)(&d_in_x), bytes); hipMalloc((void**)(&d_in_q), bytes); hipMalloc((void**)(&d_out), bytes); hipMemcpy(d_in_x, in_x.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(d_in_q, in_q.data(), bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_x(d_in_x, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_q(d_in_q, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 6); gpu_out.device(gpu_device) = gpu_in_x.zeta(gpu_in_q); assert(hipMemcpyAsync(out.data(), d_out, bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); VERIFY_IS_EQUAL(out(0), expected_out(0)); VERIFY((std::isnan)(out(3))); for (int i = 1; i < 6; ++i) { if (i != 3) { VERIFY_IS_APPROX(out(i), expected_out(i)); } } hipFree(d_in_x); hipFree(d_in_q); hipFree(d_out); } template <typename Scalar> void test_cuda_polygamma() { Tensor<Scalar, 1> in_x(7); Tensor<Scalar, 1> in_n(7); Tensor<Scalar, 1> out(7); Tensor<Scalar, 1> expected_out(7); out.setZero(); in_n(0) = Scalar(1); in_n(1) = Scalar(1); in_n(2) = Scalar(1); in_n(3) = Scalar(17); in_n(4) = Scalar(31); in_n(5) = Scalar(28); in_n(6) = Scalar(8); in_x(0) = Scalar(2); in_x(1) = Scalar(3); in_x(2) = Scalar(25.5); in_x(3) = Scalar(4.7); in_x(4) = Scalar(11.8); in_x(5) = Scalar(17.7); in_x(6) = Scalar(30.2); expected_out(0) = Scalar(0.644934066848); expected_out(1) = Scalar(0.394934066848); expected_out(2) = Scalar(0.0399946696496); expected_out(3) = Scalar(293.334565435); expected_out(4) = Scalar(0.445487887616); expected_out(5) = Scalar(-2.47810300902e-07); expected_out(6) = Scalar(-8.29668781082e-09); std::size_t bytes = in_x.size() * sizeof(Scalar); Scalar* d_in_x; Scalar* d_in_n; Scalar* d_out; hipMalloc((void**)(&d_in_x), bytes); hipMalloc((void**)(&d_in_n), bytes); hipMalloc((void**)(&d_out), bytes); hipMemcpy(d_in_x, in_x.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(d_in_n, in_n.data(), bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_x(d_in_x, 7); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_n(d_in_n, 7); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 7); gpu_out.device(gpu_device) = gpu_in_n.polygamma(gpu_in_x); assert(hipMemcpyAsync(out.data(), d_out, bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 7; ++i) { VERIFY_IS_APPROX(out(i), expected_out(i)); } hipFree(d_in_x); hipFree(d_in_n); hipFree(d_out); } template <typename Scalar> void test_cuda_igamma() { Tensor<Scalar, 2> a(6, 6); Tensor<Scalar, 2> x(6, 6); Tensor<Scalar, 2> out(6, 6); out.setZero(); Scalar a_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; Scalar x_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; for (int i = 0; i < 6; ++i) { for (int j = 0; j < 6; ++j) { a(i, j) = a_s[i]; x(i, j) = x_s[j]; } } Scalar nan = std::numeric_limits<Scalar>::quiet_NaN(); Scalar igamma_s[][6] = {{0.0, nan, nan, nan, nan, nan}, {0.0, 0.6321205588285578, 0.7768698398515702, 0.9816843611112658, 9.999500016666262e-05, 1.0}, {0.0, 0.4275932955291202, 0.608374823728911, 0.9539882943107686, 7.522076445089201e-07, 1.0}, {0.0, 0.01898815687615381, 0.06564245437845008, 0.5665298796332909, 4.166333347221828e-18, 1.0}, {0.0, 0.9999780593618628, 0.9999899967080838, 0.9999996219837988, 0.9991370418689945, 1.0}, {0.0, 0.0, 0.0, 0.0, 0.0, 0.5042041932513908}}; std::size_t bytes = a.size() * sizeof(Scalar); Scalar* d_a; Scalar* d_x; Scalar* d_out; assert(hipMalloc((void**)(&d_a), bytes) == hipSuccess); assert(hipMalloc((void**)(&d_x), bytes) == hipSuccess); assert(hipMalloc((void**)(&d_out), bytes) == hipSuccess); hipMemcpy(d_a, a.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(d_x, x.data(), bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_a(d_a, 6, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_x(d_x, 6, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 6, 6); gpu_out.device(gpu_device) = gpu_a.igamma(gpu_x); assert(hipMemcpyAsync(out.data(), d_out, bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 6; ++i) { for (int j = 0; j < 6; ++j) { if ((std::isnan)(igamma_s[i][j])) { VERIFY((std::isnan)(out(i, j))); } else { VERIFY_IS_APPROX(out(i, j), igamma_s[i][j]); } } } hipFree(d_a); hipFree(d_x); hipFree(d_out); } template <typename Scalar> void test_cuda_igammac() { Tensor<Scalar, 2> a(6, 6); Tensor<Scalar, 2> x(6, 6); Tensor<Scalar, 2> out(6, 6); out.setZero(); Scalar a_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; Scalar x_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; for (int i = 0; i < 6; ++i) { for (int j = 0; j < 6; ++j) { a(i, j) = a_s[i]; x(i, j) = x_s[j]; } } Scalar nan = std::numeric_limits<Scalar>::quiet_NaN(); Scalar igammac_s[][6] = {{nan, nan, nan, nan, nan, nan}, {1.0, 0.36787944117144233, 0.22313016014842982, 0.018315638888734182, 0.9999000049998333, 0.0}, {1.0, 0.5724067044708798, 0.3916251762710878, 0.04601170568923136, 0.9999992477923555, 0.0}, {1.0, 0.9810118431238462, 0.9343575456215499, 0.4334701203667089, 1.0, 0.0}, {1.0, 2.1940638138146658e-05, 1.0003291916285e-05, 3.7801620118431334e-07, 0.0008629581310054535, 0.0}, {1.0, 1.0, 1.0, 1.0, 1.0, 0.49579580674813944}}; std::size_t bytes = a.size() * sizeof(Scalar); Scalar* d_a; Scalar* d_x; Scalar* d_out; hipMalloc((void**)(&d_a), bytes); hipMalloc((void**)(&d_x), bytes); hipMalloc((void**)(&d_out), bytes); hipMemcpy(d_a, a.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(d_x, x.data(), bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_a(d_a, 6, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_x(d_x, 6, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 6, 6); gpu_out.device(gpu_device) = gpu_a.igammac(gpu_x); assert(hipMemcpyAsync(out.data(), d_out, bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 6; ++i) { for (int j = 0; j < 6; ++j) { if ((std::isnan)(igammac_s[i][j])) { VERIFY((std::isnan)(out(i, j))); } else { VERIFY_IS_APPROX(out(i, j), igammac_s[i][j]); } } } hipFree(d_a); hipFree(d_x); hipFree(d_out); } template <typename Scalar> void test_cuda_erf(const Scalar stddev) { Tensor<Scalar, 2> in(72,97); in.setRandom(); in *= in.constant(stddev); Tensor<Scalar, 2> out(72,97); out.setZero(); std::size_t bytes = in.size() * sizeof(Scalar); Scalar* d_in; Scalar* d_out; assert(hipMalloc((void**)(&d_in), bytes) == hipSuccess); assert(hipMalloc((void**)(&d_out), bytes) == hipSuccess); hipMemcpy(d_in, in.data(), bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_in(d_in, 72, 97); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 72, 97); gpu_out.device(gpu_device) = gpu_in.erf(); assert(hipMemcpyAsync(out.data(), d_out, bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 72; ++i) { for (int j = 0; j < 97; ++j) { VERIFY_IS_APPROX(out(i,j), (std::erf)(in(i,j))); } } hipFree(d_in); hipFree(d_out); } template <typename Scalar> void test_cuda_erfc(const Scalar stddev) { Tensor<Scalar, 2> in(72,97); in.setRandom(); in *= in.constant(stddev); Tensor<Scalar, 2> out(72,97); out.setZero(); std::size_t bytes = in.size() * sizeof(Scalar); Scalar* d_in; Scalar* d_out; hipMalloc((void**)(&d_in), bytes); hipMalloc((void**)(&d_out), bytes); hipMemcpy(d_in, in.data(), bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_in(d_in, 72, 97); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 72, 97); gpu_out.device(gpu_device) = gpu_in.erfc(); assert(hipMemcpyAsync(out.data(), d_out, bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 72; ++i) { for (int j = 0; j < 97; ++j) { VERIFY_IS_APPROX(out(i,j), (std::erfc)(in(i,j))); } } hipFree(d_in); hipFree(d_out); } template <typename Scalar> void test_cuda_betainc() { Tensor<Scalar, 1> in_x(125); Tensor<Scalar, 1> in_a(125); Tensor<Scalar, 1> in_b(125); Tensor<Scalar, 1> out(125); Tensor<Scalar, 1> expected_out(125); out.setZero(); Scalar nan = std::numeric_limits<Scalar>::quiet_NaN(); Array<Scalar, 1, Dynamic> x(125); Array<Scalar, 1, Dynamic> a(125); Array<Scalar, 1, Dynamic> b(125); Array<Scalar, 1, Dynamic> v(125); a << 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999; b << 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999; x << -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1; v << nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.47972119876364683, 0.5, 0.5202788012363533, nan, nan, 0.9518683957740043, 0.9789663010413743, 0.9931729188073435, nan, nan, 0.999995949033062, 0.9999999999993698, 0.9999999999999999, nan, nan, 0.9999999999999999, 0.9999999999999999, 0.9999999999999999, nan, nan, nan, nan, nan, nan, nan, 0.006827081192655869, 0.0210336989586256, 0.04813160422599567, nan, nan, 0.20014344256217678, 0.5000000000000001, 0.7998565574378232, nan, nan, 0.9991401428435834, 0.999999999698403, 0.9999999999999999, nan, nan, 0.9999999999999999, 0.9999999999999999, 0.9999999999999999, nan, nan, nan, nan, nan, nan, nan, 1.0646600232370887e-25, 6.301722877826246e-13, 4.050966937974938e-06, nan, nan, 7.864342668429763e-23, 3.015969667594166e-10, 0.0008598571564165444, nan, nan, 6.031987710123844e-08, 0.5000000000000007, 0.9999999396801229, nan, nan, 0.9999999999999999, 0.9999999999999999, 0.9999999999999999, nan, nan, nan, nan, nan, nan, nan, 0.0, 7.029920380986636e-306, 2.2450728208591345e-101, nan, nan, 0.0, 9.275871147869727e-302, 1.2232913026152827e-97, nan, nan, 0.0, 3.0891393081932924e-252, 2.9303043666183996e-60, nan, nan, 2.248913486879199e-196, 0.5000000000004947, 0.9999999999999999, nan; for (int i = 0; i < 125; ++i) { in_x(i) = x(i); in_a(i) = a(i); in_b(i) = b(i); expected_out(i) = v(i); } std::size_t bytes = in_x.size() * sizeof(Scalar); Scalar* d_in_x; Scalar* d_in_a; Scalar* d_in_b; Scalar* d_out; hipMalloc((void**)(&d_in_x), bytes); hipMalloc((void**)(&d_in_a), bytes); hipMalloc((void**)(&d_in_b), bytes); hipMalloc((void**)(&d_out), bytes); hipMemcpy(d_in_x, in_x.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(d_in_a, in_a.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(d_in_b, in_b.data(), bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_x(d_in_x, 125); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_a(d_in_a, 125); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_b(d_in_b, 125); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 125); gpu_out.device(gpu_device) = betainc(gpu_in_a, gpu_in_b, gpu_in_x); assert(hipMemcpyAsync(out.data(), d_out, bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 1; i < 125; ++i) { if ((std::isnan)(expected_out(i))) { VERIFY((std::isnan)(out(i))); } else { VERIFY_IS_APPROX(out(i), expected_out(i)); } } hipFree(d_in_x); hipFree(d_in_a); hipFree(d_in_b); hipFree(d_out); } void test_cxx11_tensor_cuda() { CALL_SUBTEST_1(test_cuda_nullary()); CALL_SUBTEST_1(test_cuda_elementwise_small()); CALL_SUBTEST_1(test_cuda_elementwise()); CALL_SUBTEST_1(test_cuda_props()); CALL_SUBTEST_1(test_cuda_reduction()); CALL_SUBTEST_2(test_cuda_contraction<ColMajor>()); CALL_SUBTEST_2(test_cuda_contraction<RowMajor>()); CALL_SUBTEST_3(test_cuda_convolution_1d<ColMajor>()); CALL_SUBTEST_3(test_cuda_convolution_1d<RowMajor>()); CALL_SUBTEST_3(test_cuda_convolution_inner_dim_col_major_1d()); CALL_SUBTEST_3(test_cuda_convolution_inner_dim_row_major_1d()); CALL_SUBTEST_3(test_cuda_convolution_2d<ColMajor>()); CALL_SUBTEST_3(test_cuda_convolution_2d<RowMajor>()); CALL_SUBTEST_3(test_cuda_convolution_3d<ColMajor>()); CALL_SUBTEST_3(test_cuda_convolution_3d<RowMajor>()); #if __cplusplus > 199711L // std::erf, std::erfc, and so on where only added in c++11. We use them // as a golden reference to validate the results produced by Eigen. Therefore // we can only run these tests if we use a c++11 compiler. CALL_SUBTEST_4(test_cuda_lgamma<float>(1.0f)); CALL_SUBTEST_4(test_cuda_lgamma<float>(100.0f)); CALL_SUBTEST_4(test_cuda_lgamma<float>(0.01f)); CALL_SUBTEST_4(test_cuda_lgamma<float>(0.001f)); CALL_SUBTEST_4(test_cuda_lgamma<double>(1.0)); CALL_SUBTEST_4(test_cuda_lgamma<double>(100.0)); CALL_SUBTEST_4(test_cuda_lgamma<double>(0.01)); CALL_SUBTEST_4(test_cuda_lgamma<double>(0.001)); CALL_SUBTEST_4(test_cuda_erf<float>(1.0f)); CALL_SUBTEST_4(test_cuda_erf<float>(100.0f)); CALL_SUBTEST_4(test_cuda_erf<float>(0.01f)); CALL_SUBTEST_4(test_cuda_erf<float>(0.001f)); CALL_SUBTEST_4(test_cuda_erfc<float>(1.0f)); // CALL_SUBTEST(test_cuda_erfc<float>(100.0f)); CALL_SUBTEST_4(test_cuda_erfc<float>(5.0f)); // CUDA erfc lacks precision for large inputs CALL_SUBTEST_4(test_cuda_erfc<float>(0.01f)); CALL_SUBTEST_4(test_cuda_erfc<float>(0.001f)); CALL_SUBTEST_4(test_cuda_erf<double>(1.0)); CALL_SUBTEST_4(test_cuda_erf<double>(100.0)); CALL_SUBTEST_4(test_cuda_erf<double>(0.01)); CALL_SUBTEST_4(test_cuda_erf<double>(0.001)); CALL_SUBTEST_4(test_cuda_erfc<double>(1.0)); // CALL_SUBTEST(test_cuda_erfc<double>(100.0)); CALL_SUBTEST_4(test_cuda_erfc<double>(5.0)); // CUDA erfc lacks precision for large inputs CALL_SUBTEST_4(test_cuda_erfc<double>(0.01)); CALL_SUBTEST_4(test_cuda_erfc<double>(0.001)); CALL_SUBTEST_5(test_cuda_digamma<float>()); CALL_SUBTEST_5(test_cuda_digamma<double>()); CALL_SUBTEST_5(test_cuda_polygamma<float>()); CALL_SUBTEST_5(test_cuda_polygamma<double>()); CALL_SUBTEST_5(test_cuda_zeta<float>()); CALL_SUBTEST_5(test_cuda_zeta<double>()); CALL_SUBTEST_5(test_cuda_igamma<float>()); CALL_SUBTEST_5(test_cuda_igammac<float>()); CALL_SUBTEST_5(test_cuda_igamma<double>()); CALL_SUBTEST_5(test_cuda_igammac<double>()); CALL_SUBTEST_6(test_cuda_betainc<float>()); CALL_SUBTEST_6(test_cuda_betainc<double>()); #endif }
7061e7788597f7fc618280b93add6671c1621d7d.cu
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_NO_COMPLEX #define EIGEN_TEST_FUNC cxx11_tensor_cuda #define EIGEN_USE_GPU #if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500 #include <cuda_fp16.h> #endif #include "main.h" #include <unsupported/Eigen/CXX11/Tensor> using Eigen::Tensor; void test_cuda_nullary() { Tensor<float, 1, 0, int> in1(2); Tensor<float, 1, 0, int> in2(2); in1.setRandom(); in2.setRandom(); std::size_t tensor_bytes = in1.size() * sizeof(float); float* d_in1; float* d_in2; cudaMalloc((void**)(&d_in1), tensor_bytes); cudaMalloc((void**)(&d_in2), tensor_bytes); cudaMemcpy(d_in1, in1.data(), tensor_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_in2, in2.data(), tensor_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 1, 0, int>, Eigen::Aligned> gpu_in1( d_in1, 2); Eigen::TensorMap<Eigen::Tensor<float, 1, 0, int>, Eigen::Aligned> gpu_in2( d_in2, 2); gpu_in1.device(gpu_device) = gpu_in1.constant(3.14f); gpu_in2.device(gpu_device) = gpu_in2.random(); Tensor<float, 1, 0, int> new1(2); Tensor<float, 1, 0, int> new2(2); assert(cudaMemcpyAsync(new1.data(), d_in1, tensor_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaMemcpyAsync(new2.data(), d_in2, tensor_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 2; ++i) { VERIFY_IS_APPROX(new1(i), 3.14f); VERIFY_IS_NOT_EQUAL(new2(i), in2(i)); } cudaFree(d_in1); cudaFree(d_in2); } void test_cuda_elementwise_small() { Tensor<float, 1> in1(Eigen::array<Eigen::DenseIndex, 1>(2)); Tensor<float, 1> in2(Eigen::array<Eigen::DenseIndex, 1>(2)); Tensor<float, 1> out(Eigen::array<Eigen::DenseIndex, 1>(2)); in1.setRandom(); in2.setRandom(); std::size_t in1_bytes = in1.size() * sizeof(float); std::size_t in2_bytes = in2.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_in1; float* d_in2; float* d_out; cudaMalloc((void**)(&d_in1), in1_bytes); cudaMalloc((void**)(&d_in2), in2_bytes); cudaMalloc((void**)(&d_out), out_bytes); cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_in2, in2.data(), in2_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_in1( d_in1, Eigen::array<Eigen::DenseIndex, 1>(2)); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_in2( d_in2, Eigen::array<Eigen::DenseIndex, 1>(2)); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_out( d_out, Eigen::array<Eigen::DenseIndex, 1>(2)); gpu_out.device(gpu_device) = gpu_in1 + gpu_in2; assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 2; ++i) { VERIFY_IS_APPROX( out(Eigen::array<Eigen::DenseIndex, 1>(i)), in1(Eigen::array<Eigen::DenseIndex, 1>(i)) + in2(Eigen::array<Eigen::DenseIndex, 1>(i))); } cudaFree(d_in1); cudaFree(d_in2); cudaFree(d_out); } void test_cuda_elementwise() { Tensor<float, 3> in1(Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Tensor<float, 3> in2(Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Tensor<float, 3> in3(Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Tensor<float, 3> out(Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); in1.setRandom(); in2.setRandom(); in3.setRandom(); std::size_t in1_bytes = in1.size() * sizeof(float); std::size_t in2_bytes = in2.size() * sizeof(float); std::size_t in3_bytes = in3.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_in1; float* d_in2; float* d_in3; float* d_out; cudaMalloc((void**)(&d_in1), in1_bytes); cudaMalloc((void**)(&d_in2), in2_bytes); cudaMalloc((void**)(&d_in3), in3_bytes); cudaMalloc((void**)(&d_out), out_bytes); cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_in2, in2.data(), in2_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_in3, in3.data(), in3_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in1(d_in1, Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in2(d_in2, Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in3(d_in3, Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_out(d_out, Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); gpu_out.device(gpu_device) = gpu_in1 + gpu_in2 * gpu_in3; assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 72; ++i) { for (int j = 0; j < 53; ++j) { for (int k = 0; k < 97; ++k) { VERIFY_IS_APPROX(out(Eigen::array<Eigen::DenseIndex, 3>(i,j,k)), in1(Eigen::array<Eigen::DenseIndex, 3>(i,j,k)) + in2(Eigen::array<Eigen::DenseIndex, 3>(i,j,k)) * in3(Eigen::array<Eigen::DenseIndex, 3>(i,j,k))); } } } cudaFree(d_in1); cudaFree(d_in2); cudaFree(d_in3); cudaFree(d_out); } void test_cuda_props() { Tensor<float, 1> in1(200); Tensor<bool, 1> out(200); in1.setRandom(); std::size_t in1_bytes = in1.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(bool); float* d_in1; bool* d_out; cudaMalloc((void**)(&d_in1), in1_bytes); cudaMalloc((void**)(&d_out), out_bytes); cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_in1( d_in1, 200); Eigen::TensorMap<Eigen::Tensor<bool, 1>, Eigen::Aligned> gpu_out( d_out, 200); gpu_out.device(gpu_device) = (gpu_in1.isnan)(); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 200; ++i) { VERIFY_IS_EQUAL(out(i), (std::isnan)(in1(i))); } cudaFree(d_in1); cudaFree(d_out); } void test_cuda_reduction() { Tensor<float, 4> in1(72,53,97,113); Tensor<float, 2> out(72,97); in1.setRandom(); std::size_t in1_bytes = in1.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_in1; float* d_out; cudaMalloc((void**)(&d_in1), in1_bytes); cudaMalloc((void**)(&d_out), out_bytes); cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4> > gpu_in1(d_in1, 72,53,97,113); Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97); array<Eigen::DenseIndex, 2> reduction_axis; reduction_axis[0] = 1; reduction_axis[1] = 3; gpu_out.device(gpu_device) = gpu_in1.maximum(reduction_axis); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 72; ++i) { for (int j = 0; j < 97; ++j) { float expected = 0; for (int k = 0; k < 53; ++k) { for (int l = 0; l < 113; ++l) { expected = std::max<float>(expected, in1(i, k, j, l)); } } VERIFY_IS_APPROX(out(i,j), expected); } } cudaFree(d_in1); cudaFree(d_out); } template<int DataLayout> void test_cuda_contraction() { // with these dimensions, the output has 300 * 140 elements, which is // more than 30 * 1024, which is the number of threads in blocks on // a 15 SM GK110 GPU Tensor<float, 4, DataLayout> t_left(6, 50, 3, 31); Tensor<float, 5, DataLayout> t_right(Eigen::array<Eigen::DenseIndex, 5>(3, 31, 7, 20, 1)); Tensor<float, 5, DataLayout> t_result(Eigen::array<Eigen::DenseIndex, 5>(6, 50, 7, 20, 1)); t_left.setRandom(); t_right.setRandom(); std::size_t t_left_bytes = t_left.size() * sizeof(float); std::size_t t_right_bytes = t_right.size() * sizeof(float); std::size_t t_result_bytes = t_result.size() * sizeof(float); float* d_t_left; float* d_t_right; float* d_t_result; cudaMalloc((void**)(&d_t_left), t_left_bytes); cudaMalloc((void**)(&d_t_right), t_right_bytes); cudaMalloc((void**)(&d_t_result), t_result_bytes); cudaMemcpy(d_t_left, t_left.data(), t_left_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_t_right, t_right.data(), t_right_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_t_left(d_t_left, 6, 50, 3, 31); Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_t_right(d_t_right, 3, 31, 7, 20, 1); Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_t_result(d_t_result, 6, 50, 7, 20, 1); typedef Eigen::Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> > MapXf; MapXf m_left(t_left.data(), 300, 93); MapXf m_right(t_right.data(), 93, 140); Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> m_result(300, 140); typedef Tensor<float, 1>::DimensionPair DimPair; Eigen::array<DimPair, 2> dims; dims[0] = DimPair(2, 0); dims[1] = DimPair(3, 1); m_result = m_left * m_right; gpu_t_result.device(gpu_device) = gpu_t_left.contract(gpu_t_right, dims); cudaMemcpy(t_result.data(), d_t_result, t_result_bytes, cudaMemcpyDeviceToHost); for (DenseIndex i = 0; i < t_result.size(); i++) { if (fabs(t_result.data()[i] - m_result.data()[i]) >= 1e-4f) { std::cout << "mismatch detected at index " << i << ": " << t_result.data()[i] << " vs " << m_result.data()[i] << std::endl; assert(false); } } cudaFree(d_t_left); cudaFree(d_t_right); cudaFree(d_t_result); } template<int DataLayout> void test_cuda_convolution_1d() { Tensor<float, 4, DataLayout> input(74,37,11,137); Tensor<float, 1, DataLayout> kernel(4); Tensor<float, 4, DataLayout> out(74,34,11,137); input = input.constant(10.0f) + input.random(); kernel = kernel.constant(7.0f) + kernel.random(); std::size_t input_bytes = input.size() * sizeof(float); std::size_t kernel_bytes = kernel.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_input; float* d_kernel; float* d_out; cudaMalloc((void**)(&d_input), input_bytes); cudaMalloc((void**)(&d_kernel), kernel_bytes); cudaMalloc((void**)(&d_out), out_bytes); cudaMemcpy(d_input, input.data(), input_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_kernel, kernel.data(), kernel_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_input(d_input, 74,37,11,137); Eigen::TensorMap<Eigen::Tensor<float, 1, DataLayout> > gpu_kernel(d_kernel, 4); Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_out(d_out, 74,34,11,137); Eigen::array<Eigen::DenseIndex, 1> dims(1); gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 74; ++i) { for (int j = 0; j < 34; ++j) { for (int k = 0; k < 11; ++k) { for (int l = 0; l < 137; ++l) { const float result = out(i,j,k,l); const float expected = input(i,j+0,k,l) * kernel(0) + input(i,j+1,k,l) * kernel(1) + input(i,j+2,k,l) * kernel(2) + input(i,j+3,k,l) * kernel(3); VERIFY_IS_APPROX(result, expected); } } } } cudaFree(d_input); cudaFree(d_kernel); cudaFree(d_out); } void test_cuda_convolution_inner_dim_col_major_1d() { Tensor<float, 4, ColMajor> input(74,9,11,7); Tensor<float, 1, ColMajor> kernel(4); Tensor<float, 4, ColMajor> out(71,9,11,7); input = input.constant(10.0f) + input.random(); kernel = kernel.constant(7.0f) + kernel.random(); std::size_t input_bytes = input.size() * sizeof(float); std::size_t kernel_bytes = kernel.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_input; float* d_kernel; float* d_out; cudaMalloc((void**)(&d_input), input_bytes); cudaMalloc((void**)(&d_kernel), kernel_bytes); cudaMalloc((void**)(&d_out), out_bytes); cudaMemcpy(d_input, input.data(), input_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_kernel, kernel.data(), kernel_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4, ColMajor> > gpu_input(d_input,74,9,11,7); Eigen::TensorMap<Eigen::Tensor<float, 1, ColMajor> > gpu_kernel(d_kernel,4); Eigen::TensorMap<Eigen::Tensor<float, 4, ColMajor> > gpu_out(d_out,71,9,11,7); Eigen::array<Eigen::DenseIndex, 1> dims(0); gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 71; ++i) { for (int j = 0; j < 9; ++j) { for (int k = 0; k < 11; ++k) { for (int l = 0; l < 7; ++l) { const float result = out(i,j,k,l); const float expected = input(i+0,j,k,l) * kernel(0) + input(i+1,j,k,l) * kernel(1) + input(i+2,j,k,l) * kernel(2) + input(i+3,j,k,l) * kernel(3); VERIFY_IS_APPROX(result, expected); } } } } cudaFree(d_input); cudaFree(d_kernel); cudaFree(d_out); } void test_cuda_convolution_inner_dim_row_major_1d() { Tensor<float, 4, RowMajor> input(7,9,11,74); Tensor<float, 1, RowMajor> kernel(4); Tensor<float, 4, RowMajor> out(7,9,11,71); input = input.constant(10.0f) + input.random(); kernel = kernel.constant(7.0f) + kernel.random(); std::size_t input_bytes = input.size() * sizeof(float); std::size_t kernel_bytes = kernel.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_input; float* d_kernel; float* d_out; cudaMalloc((void**)(&d_input), input_bytes); cudaMalloc((void**)(&d_kernel), kernel_bytes); cudaMalloc((void**)(&d_out), out_bytes); cudaMemcpy(d_input, input.data(), input_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_kernel, kernel.data(), kernel_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4, RowMajor> > gpu_input(d_input, 7,9,11,74); Eigen::TensorMap<Eigen::Tensor<float, 1, RowMajor> > gpu_kernel(d_kernel, 4); Eigen::TensorMap<Eigen::Tensor<float, 4, RowMajor> > gpu_out(d_out, 7,9,11,71); Eigen::array<Eigen::DenseIndex, 1> dims(3); gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 7; ++i) { for (int j = 0; j < 9; ++j) { for (int k = 0; k < 11; ++k) { for (int l = 0; l < 71; ++l) { const float result = out(i,j,k,l); const float expected = input(i,j,k,l+0) * kernel(0) + input(i,j,k,l+1) * kernel(1) + input(i,j,k,l+2) * kernel(2) + input(i,j,k,l+3) * kernel(3); VERIFY_IS_APPROX(result, expected); } } } } cudaFree(d_input); cudaFree(d_kernel); cudaFree(d_out); } template<int DataLayout> void test_cuda_convolution_2d() { Tensor<float, 4, DataLayout> input(74,37,11,137); Tensor<float, 2, DataLayout> kernel(3,4); Tensor<float, 4, DataLayout> out(74,35,8,137); input = input.constant(10.0f) + input.random(); kernel = kernel.constant(7.0f) + kernel.random(); std::size_t input_bytes = input.size() * sizeof(float); std::size_t kernel_bytes = kernel.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_input; float* d_kernel; float* d_out; cudaMalloc((void**)(&d_input), input_bytes); cudaMalloc((void**)(&d_kernel), kernel_bytes); cudaMalloc((void**)(&d_out), out_bytes); cudaMemcpy(d_input, input.data(), input_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_kernel, kernel.data(), kernel_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_input(d_input,74,37,11,137); Eigen::TensorMap<Eigen::Tensor<float, 2, DataLayout> > gpu_kernel(d_kernel,3,4); Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_out(d_out,74,35,8,137); Eigen::array<Eigen::DenseIndex, 2> dims(1,2); gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 74; ++i) { for (int j = 0; j < 35; ++j) { for (int k = 0; k < 8; ++k) { for (int l = 0; l < 137; ++l) { const float result = out(i,j,k,l); const float expected = input(i,j+0,k+0,l) * kernel(0,0) + input(i,j+1,k+0,l) * kernel(1,0) + input(i,j+2,k+0,l) * kernel(2,0) + input(i,j+0,k+1,l) * kernel(0,1) + input(i,j+1,k+1,l) * kernel(1,1) + input(i,j+2,k+1,l) * kernel(2,1) + input(i,j+0,k+2,l) * kernel(0,2) + input(i,j+1,k+2,l) * kernel(1,2) + input(i,j+2,k+2,l) * kernel(2,2) + input(i,j+0,k+3,l) * kernel(0,3) + input(i,j+1,k+3,l) * kernel(1,3) + input(i,j+2,k+3,l) * kernel(2,3); VERIFY_IS_APPROX(result, expected); } } } } cudaFree(d_input); cudaFree(d_kernel); cudaFree(d_out); } template<int DataLayout> void test_cuda_convolution_3d() { Tensor<float, 5, DataLayout> input(Eigen::array<Eigen::DenseIndex, 5>(74,37,11,137,17)); Tensor<float, 3, DataLayout> kernel(3,4,2); Tensor<float, 5, DataLayout> out(Eigen::array<Eigen::DenseIndex, 5>(74,35,8,136,17)); input = input.constant(10.0f) + input.random(); kernel = kernel.constant(7.0f) + kernel.random(); std::size_t input_bytes = input.size() * sizeof(float); std::size_t kernel_bytes = kernel.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_input; float* d_kernel; float* d_out; cudaMalloc((void**)(&d_input), input_bytes); cudaMalloc((void**)(&d_kernel), kernel_bytes); cudaMalloc((void**)(&d_out), out_bytes); cudaMemcpy(d_input, input.data(), input_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_kernel, kernel.data(), kernel_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_input(d_input,74,37,11,137,17); Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> > gpu_kernel(d_kernel,3,4,2); Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_out(d_out,74,35,8,136,17); Eigen::array<Eigen::DenseIndex, 3> dims(1,2,3); gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 74; ++i) { for (int j = 0; j < 35; ++j) { for (int k = 0; k < 8; ++k) { for (int l = 0; l < 136; ++l) { for (int m = 0; m < 17; ++m) { const float result = out(i,j,k,l,m); const float expected = input(i,j+0,k+0,l+0,m) * kernel(0,0,0) + input(i,j+1,k+0,l+0,m) * kernel(1,0,0) + input(i,j+2,k+0,l+0,m) * kernel(2,0,0) + input(i,j+0,k+1,l+0,m) * kernel(0,1,0) + input(i,j+1,k+1,l+0,m) * kernel(1,1,0) + input(i,j+2,k+1,l+0,m) * kernel(2,1,0) + input(i,j+0,k+2,l+0,m) * kernel(0,2,0) + input(i,j+1,k+2,l+0,m) * kernel(1,2,0) + input(i,j+2,k+2,l+0,m) * kernel(2,2,0) + input(i,j+0,k+3,l+0,m) * kernel(0,3,0) + input(i,j+1,k+3,l+0,m) * kernel(1,3,0) + input(i,j+2,k+3,l+0,m) * kernel(2,3,0) + input(i,j+0,k+0,l+1,m) * kernel(0,0,1) + input(i,j+1,k+0,l+1,m) * kernel(1,0,1) + input(i,j+2,k+0,l+1,m) * kernel(2,0,1) + input(i,j+0,k+1,l+1,m) * kernel(0,1,1) + input(i,j+1,k+1,l+1,m) * kernel(1,1,1) + input(i,j+2,k+1,l+1,m) * kernel(2,1,1) + input(i,j+0,k+2,l+1,m) * kernel(0,2,1) + input(i,j+1,k+2,l+1,m) * kernel(1,2,1) + input(i,j+2,k+2,l+1,m) * kernel(2,2,1) + input(i,j+0,k+3,l+1,m) * kernel(0,3,1) + input(i,j+1,k+3,l+1,m) * kernel(1,3,1) + input(i,j+2,k+3,l+1,m) * kernel(2,3,1); VERIFY_IS_APPROX(result, expected); } } } } } cudaFree(d_input); cudaFree(d_kernel); cudaFree(d_out); } template <typename Scalar> void test_cuda_lgamma(const Scalar stddev) { Tensor<Scalar, 2> in(72,97); in.setRandom(); in *= in.constant(stddev); Tensor<Scalar, 2> out(72,97); out.setZero(); std::size_t bytes = in.size() * sizeof(Scalar); Scalar* d_in; Scalar* d_out; cudaMalloc((void**)(&d_in), bytes); cudaMalloc((void**)(&d_out), bytes); cudaMemcpy(d_in, in.data(), bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_in(d_in, 72, 97); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 72, 97); gpu_out.device(gpu_device) = gpu_in.lgamma(); assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 72; ++i) { for (int j = 0; j < 97; ++j) { VERIFY_IS_APPROX(out(i,j), (std::lgamma)(in(i,j))); } } cudaFree(d_in); cudaFree(d_out); } template <typename Scalar> void test_cuda_digamma() { Tensor<Scalar, 1> in(7); Tensor<Scalar, 1> out(7); Tensor<Scalar, 1> expected_out(7); out.setZero(); in(0) = Scalar(1); in(1) = Scalar(1.5); in(2) = Scalar(4); in(3) = Scalar(-10.5); in(4) = Scalar(10000.5); in(5) = Scalar(0); in(6) = Scalar(-1); expected_out(0) = Scalar(-0.5772156649015329); expected_out(1) = Scalar(0.03648997397857645); expected_out(2) = Scalar(1.2561176684318); expected_out(3) = Scalar(2.398239129535781); expected_out(4) = Scalar(9.210340372392849); expected_out(5) = std::numeric_limits<Scalar>::infinity(); expected_out(6) = std::numeric_limits<Scalar>::infinity(); std::size_t bytes = in.size() * sizeof(Scalar); Scalar* d_in; Scalar* d_out; cudaMalloc((void**)(&d_in), bytes); cudaMalloc((void**)(&d_out), bytes); cudaMemcpy(d_in, in.data(), bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in(d_in, 7); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 7); gpu_out.device(gpu_device) = gpu_in.digamma(); assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 5; ++i) { VERIFY_IS_APPROX(out(i), expected_out(i)); } for (int i = 5; i < 7; ++i) { VERIFY_IS_EQUAL(out(i), expected_out(i)); } cudaFree(d_in); cudaFree(d_out); } template <typename Scalar> void test_cuda_zeta() { Tensor<Scalar, 1> in_x(6); Tensor<Scalar, 1> in_q(6); Tensor<Scalar, 1> out(6); Tensor<Scalar, 1> expected_out(6); out.setZero(); in_x(0) = Scalar(1); in_x(1) = Scalar(1.5); in_x(2) = Scalar(4); in_x(3) = Scalar(-10.5); in_x(4) = Scalar(10000.5); in_x(5) = Scalar(3); in_q(0) = Scalar(1.2345); in_q(1) = Scalar(2); in_q(2) = Scalar(1.5); in_q(3) = Scalar(3); in_q(4) = Scalar(1.0001); in_q(5) = Scalar(-2.5); expected_out(0) = std::numeric_limits<Scalar>::infinity(); expected_out(1) = Scalar(1.61237534869); expected_out(2) = Scalar(0.234848505667); expected_out(3) = Scalar(1.03086757337e-5); expected_out(4) = Scalar(0.367879440865); expected_out(5) = Scalar(0.054102025820864097); std::size_t bytes = in_x.size() * sizeof(Scalar); Scalar* d_in_x; Scalar* d_in_q; Scalar* d_out; cudaMalloc((void**)(&d_in_x), bytes); cudaMalloc((void**)(&d_in_q), bytes); cudaMalloc((void**)(&d_out), bytes); cudaMemcpy(d_in_x, in_x.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_in_q, in_q.data(), bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_x(d_in_x, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_q(d_in_q, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 6); gpu_out.device(gpu_device) = gpu_in_x.zeta(gpu_in_q); assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); VERIFY_IS_EQUAL(out(0), expected_out(0)); VERIFY((std::isnan)(out(3))); for (int i = 1; i < 6; ++i) { if (i != 3) { VERIFY_IS_APPROX(out(i), expected_out(i)); } } cudaFree(d_in_x); cudaFree(d_in_q); cudaFree(d_out); } template <typename Scalar> void test_cuda_polygamma() { Tensor<Scalar, 1> in_x(7); Tensor<Scalar, 1> in_n(7); Tensor<Scalar, 1> out(7); Tensor<Scalar, 1> expected_out(7); out.setZero(); in_n(0) = Scalar(1); in_n(1) = Scalar(1); in_n(2) = Scalar(1); in_n(3) = Scalar(17); in_n(4) = Scalar(31); in_n(5) = Scalar(28); in_n(6) = Scalar(8); in_x(0) = Scalar(2); in_x(1) = Scalar(3); in_x(2) = Scalar(25.5); in_x(3) = Scalar(4.7); in_x(4) = Scalar(11.8); in_x(5) = Scalar(17.7); in_x(6) = Scalar(30.2); expected_out(0) = Scalar(0.644934066848); expected_out(1) = Scalar(0.394934066848); expected_out(2) = Scalar(0.0399946696496); expected_out(3) = Scalar(293.334565435); expected_out(4) = Scalar(0.445487887616); expected_out(5) = Scalar(-2.47810300902e-07); expected_out(6) = Scalar(-8.29668781082e-09); std::size_t bytes = in_x.size() * sizeof(Scalar); Scalar* d_in_x; Scalar* d_in_n; Scalar* d_out; cudaMalloc((void**)(&d_in_x), bytes); cudaMalloc((void**)(&d_in_n), bytes); cudaMalloc((void**)(&d_out), bytes); cudaMemcpy(d_in_x, in_x.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_in_n, in_n.data(), bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_x(d_in_x, 7); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_n(d_in_n, 7); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 7); gpu_out.device(gpu_device) = gpu_in_n.polygamma(gpu_in_x); assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 7; ++i) { VERIFY_IS_APPROX(out(i), expected_out(i)); } cudaFree(d_in_x); cudaFree(d_in_n); cudaFree(d_out); } template <typename Scalar> void test_cuda_igamma() { Tensor<Scalar, 2> a(6, 6); Tensor<Scalar, 2> x(6, 6); Tensor<Scalar, 2> out(6, 6); out.setZero(); Scalar a_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; Scalar x_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; for (int i = 0; i < 6; ++i) { for (int j = 0; j < 6; ++j) { a(i, j) = a_s[i]; x(i, j) = x_s[j]; } } Scalar nan = std::numeric_limits<Scalar>::quiet_NaN(); Scalar igamma_s[][6] = {{0.0, nan, nan, nan, nan, nan}, {0.0, 0.6321205588285578, 0.7768698398515702, 0.9816843611112658, 9.999500016666262e-05, 1.0}, {0.0, 0.4275932955291202, 0.608374823728911, 0.9539882943107686, 7.522076445089201e-07, 1.0}, {0.0, 0.01898815687615381, 0.06564245437845008, 0.5665298796332909, 4.166333347221828e-18, 1.0}, {0.0, 0.9999780593618628, 0.9999899967080838, 0.9999996219837988, 0.9991370418689945, 1.0}, {0.0, 0.0, 0.0, 0.0, 0.0, 0.5042041932513908}}; std::size_t bytes = a.size() * sizeof(Scalar); Scalar* d_a; Scalar* d_x; Scalar* d_out; assert(cudaMalloc((void**)(&d_a), bytes) == cudaSuccess); assert(cudaMalloc((void**)(&d_x), bytes) == cudaSuccess); assert(cudaMalloc((void**)(&d_out), bytes) == cudaSuccess); cudaMemcpy(d_a, a.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_x, x.data(), bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_a(d_a, 6, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_x(d_x, 6, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 6, 6); gpu_out.device(gpu_device) = gpu_a.igamma(gpu_x); assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 6; ++i) { for (int j = 0; j < 6; ++j) { if ((std::isnan)(igamma_s[i][j])) { VERIFY((std::isnan)(out(i, j))); } else { VERIFY_IS_APPROX(out(i, j), igamma_s[i][j]); } } } cudaFree(d_a); cudaFree(d_x); cudaFree(d_out); } template <typename Scalar> void test_cuda_igammac() { Tensor<Scalar, 2> a(6, 6); Tensor<Scalar, 2> x(6, 6); Tensor<Scalar, 2> out(6, 6); out.setZero(); Scalar a_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; Scalar x_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; for (int i = 0; i < 6; ++i) { for (int j = 0; j < 6; ++j) { a(i, j) = a_s[i]; x(i, j) = x_s[j]; } } Scalar nan = std::numeric_limits<Scalar>::quiet_NaN(); Scalar igammac_s[][6] = {{nan, nan, nan, nan, nan, nan}, {1.0, 0.36787944117144233, 0.22313016014842982, 0.018315638888734182, 0.9999000049998333, 0.0}, {1.0, 0.5724067044708798, 0.3916251762710878, 0.04601170568923136, 0.9999992477923555, 0.0}, {1.0, 0.9810118431238462, 0.9343575456215499, 0.4334701203667089, 1.0, 0.0}, {1.0, 2.1940638138146658e-05, 1.0003291916285e-05, 3.7801620118431334e-07, 0.0008629581310054535, 0.0}, {1.0, 1.0, 1.0, 1.0, 1.0, 0.49579580674813944}}; std::size_t bytes = a.size() * sizeof(Scalar); Scalar* d_a; Scalar* d_x; Scalar* d_out; cudaMalloc((void**)(&d_a), bytes); cudaMalloc((void**)(&d_x), bytes); cudaMalloc((void**)(&d_out), bytes); cudaMemcpy(d_a, a.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_x, x.data(), bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_a(d_a, 6, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_x(d_x, 6, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 6, 6); gpu_out.device(gpu_device) = gpu_a.igammac(gpu_x); assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 6; ++i) { for (int j = 0; j < 6; ++j) { if ((std::isnan)(igammac_s[i][j])) { VERIFY((std::isnan)(out(i, j))); } else { VERIFY_IS_APPROX(out(i, j), igammac_s[i][j]); } } } cudaFree(d_a); cudaFree(d_x); cudaFree(d_out); } template <typename Scalar> void test_cuda_erf(const Scalar stddev) { Tensor<Scalar, 2> in(72,97); in.setRandom(); in *= in.constant(stddev); Tensor<Scalar, 2> out(72,97); out.setZero(); std::size_t bytes = in.size() * sizeof(Scalar); Scalar* d_in; Scalar* d_out; assert(cudaMalloc((void**)(&d_in), bytes) == cudaSuccess); assert(cudaMalloc((void**)(&d_out), bytes) == cudaSuccess); cudaMemcpy(d_in, in.data(), bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_in(d_in, 72, 97); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 72, 97); gpu_out.device(gpu_device) = gpu_in.erf(); assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 72; ++i) { for (int j = 0; j < 97; ++j) { VERIFY_IS_APPROX(out(i,j), (std::erf)(in(i,j))); } } cudaFree(d_in); cudaFree(d_out); } template <typename Scalar> void test_cuda_erfc(const Scalar stddev) { Tensor<Scalar, 2> in(72,97); in.setRandom(); in *= in.constant(stddev); Tensor<Scalar, 2> out(72,97); out.setZero(); std::size_t bytes = in.size() * sizeof(Scalar); Scalar* d_in; Scalar* d_out; cudaMalloc((void**)(&d_in), bytes); cudaMalloc((void**)(&d_out), bytes); cudaMemcpy(d_in, in.data(), bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_in(d_in, 72, 97); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 72, 97); gpu_out.device(gpu_device) = gpu_in.erfc(); assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 72; ++i) { for (int j = 0; j < 97; ++j) { VERIFY_IS_APPROX(out(i,j), (std::erfc)(in(i,j))); } } cudaFree(d_in); cudaFree(d_out); } template <typename Scalar> void test_cuda_betainc() { Tensor<Scalar, 1> in_x(125); Tensor<Scalar, 1> in_a(125); Tensor<Scalar, 1> in_b(125); Tensor<Scalar, 1> out(125); Tensor<Scalar, 1> expected_out(125); out.setZero(); Scalar nan = std::numeric_limits<Scalar>::quiet_NaN(); Array<Scalar, 1, Dynamic> x(125); Array<Scalar, 1, Dynamic> a(125); Array<Scalar, 1, Dynamic> b(125); Array<Scalar, 1, Dynamic> v(125); a << 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999; b << 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999; x << -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1; v << nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.47972119876364683, 0.5, 0.5202788012363533, nan, nan, 0.9518683957740043, 0.9789663010413743, 0.9931729188073435, nan, nan, 0.999995949033062, 0.9999999999993698, 0.9999999999999999, nan, nan, 0.9999999999999999, 0.9999999999999999, 0.9999999999999999, nan, nan, nan, nan, nan, nan, nan, 0.006827081192655869, 0.0210336989586256, 0.04813160422599567, nan, nan, 0.20014344256217678, 0.5000000000000001, 0.7998565574378232, nan, nan, 0.9991401428435834, 0.999999999698403, 0.9999999999999999, nan, nan, 0.9999999999999999, 0.9999999999999999, 0.9999999999999999, nan, nan, nan, nan, nan, nan, nan, 1.0646600232370887e-25, 6.301722877826246e-13, 4.050966937974938e-06, nan, nan, 7.864342668429763e-23, 3.015969667594166e-10, 0.0008598571564165444, nan, nan, 6.031987710123844e-08, 0.5000000000000007, 0.9999999396801229, nan, nan, 0.9999999999999999, 0.9999999999999999, 0.9999999999999999, nan, nan, nan, nan, nan, nan, nan, 0.0, 7.029920380986636e-306, 2.2450728208591345e-101, nan, nan, 0.0, 9.275871147869727e-302, 1.2232913026152827e-97, nan, nan, 0.0, 3.0891393081932924e-252, 2.9303043666183996e-60, nan, nan, 2.248913486879199e-196, 0.5000000000004947, 0.9999999999999999, nan; for (int i = 0; i < 125; ++i) { in_x(i) = x(i); in_a(i) = a(i); in_b(i) = b(i); expected_out(i) = v(i); } std::size_t bytes = in_x.size() * sizeof(Scalar); Scalar* d_in_x; Scalar* d_in_a; Scalar* d_in_b; Scalar* d_out; cudaMalloc((void**)(&d_in_x), bytes); cudaMalloc((void**)(&d_in_a), bytes); cudaMalloc((void**)(&d_in_b), bytes); cudaMalloc((void**)(&d_out), bytes); cudaMemcpy(d_in_x, in_x.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_in_a, in_a.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_in_b, in_b.data(), bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_x(d_in_x, 125); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_a(d_in_a, 125); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_b(d_in_b, 125); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 125); gpu_out.device(gpu_device) = betainc(gpu_in_a, gpu_in_b, gpu_in_x); assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 1; i < 125; ++i) { if ((std::isnan)(expected_out(i))) { VERIFY((std::isnan)(out(i))); } else { VERIFY_IS_APPROX(out(i), expected_out(i)); } } cudaFree(d_in_x); cudaFree(d_in_a); cudaFree(d_in_b); cudaFree(d_out); } void test_cxx11_tensor_cuda() { CALL_SUBTEST_1(test_cuda_nullary()); CALL_SUBTEST_1(test_cuda_elementwise_small()); CALL_SUBTEST_1(test_cuda_elementwise()); CALL_SUBTEST_1(test_cuda_props()); CALL_SUBTEST_1(test_cuda_reduction()); CALL_SUBTEST_2(test_cuda_contraction<ColMajor>()); CALL_SUBTEST_2(test_cuda_contraction<RowMajor>()); CALL_SUBTEST_3(test_cuda_convolution_1d<ColMajor>()); CALL_SUBTEST_3(test_cuda_convolution_1d<RowMajor>()); CALL_SUBTEST_3(test_cuda_convolution_inner_dim_col_major_1d()); CALL_SUBTEST_3(test_cuda_convolution_inner_dim_row_major_1d()); CALL_SUBTEST_3(test_cuda_convolution_2d<ColMajor>()); CALL_SUBTEST_3(test_cuda_convolution_2d<RowMajor>()); CALL_SUBTEST_3(test_cuda_convolution_3d<ColMajor>()); CALL_SUBTEST_3(test_cuda_convolution_3d<RowMajor>()); #if __cplusplus > 199711L // std::erf, std::erfc, and so on where only added in c++11. We use them // as a golden reference to validate the results produced by Eigen. Therefore // we can only run these tests if we use a c++11 compiler. CALL_SUBTEST_4(test_cuda_lgamma<float>(1.0f)); CALL_SUBTEST_4(test_cuda_lgamma<float>(100.0f)); CALL_SUBTEST_4(test_cuda_lgamma<float>(0.01f)); CALL_SUBTEST_4(test_cuda_lgamma<float>(0.001f)); CALL_SUBTEST_4(test_cuda_lgamma<double>(1.0)); CALL_SUBTEST_4(test_cuda_lgamma<double>(100.0)); CALL_SUBTEST_4(test_cuda_lgamma<double>(0.01)); CALL_SUBTEST_4(test_cuda_lgamma<double>(0.001)); CALL_SUBTEST_4(test_cuda_erf<float>(1.0f)); CALL_SUBTEST_4(test_cuda_erf<float>(100.0f)); CALL_SUBTEST_4(test_cuda_erf<float>(0.01f)); CALL_SUBTEST_4(test_cuda_erf<float>(0.001f)); CALL_SUBTEST_4(test_cuda_erfc<float>(1.0f)); // CALL_SUBTEST(test_cuda_erfc<float>(100.0f)); CALL_SUBTEST_4(test_cuda_erfc<float>(5.0f)); // CUDA erfc lacks precision for large inputs CALL_SUBTEST_4(test_cuda_erfc<float>(0.01f)); CALL_SUBTEST_4(test_cuda_erfc<float>(0.001f)); CALL_SUBTEST_4(test_cuda_erf<double>(1.0)); CALL_SUBTEST_4(test_cuda_erf<double>(100.0)); CALL_SUBTEST_4(test_cuda_erf<double>(0.01)); CALL_SUBTEST_4(test_cuda_erf<double>(0.001)); CALL_SUBTEST_4(test_cuda_erfc<double>(1.0)); // CALL_SUBTEST(test_cuda_erfc<double>(100.0)); CALL_SUBTEST_4(test_cuda_erfc<double>(5.0)); // CUDA erfc lacks precision for large inputs CALL_SUBTEST_4(test_cuda_erfc<double>(0.01)); CALL_SUBTEST_4(test_cuda_erfc<double>(0.001)); CALL_SUBTEST_5(test_cuda_digamma<float>()); CALL_SUBTEST_5(test_cuda_digamma<double>()); CALL_SUBTEST_5(test_cuda_polygamma<float>()); CALL_SUBTEST_5(test_cuda_polygamma<double>()); CALL_SUBTEST_5(test_cuda_zeta<float>()); CALL_SUBTEST_5(test_cuda_zeta<double>()); CALL_SUBTEST_5(test_cuda_igamma<float>()); CALL_SUBTEST_5(test_cuda_igammac<float>()); CALL_SUBTEST_5(test_cuda_igamma<double>()); CALL_SUBTEST_5(test_cuda_igammac<double>()); CALL_SUBTEST_6(test_cuda_betainc<float>()); CALL_SUBTEST_6(test_cuda_betainc<double>()); #endif }
0100a6929ac87fbea0b14349c6fe6e876e56701f.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <tuple> __global__ void forward_face_index_map_cuda_kernel( const float* faces, const int batch_size, const int num_faces, const int image_height, const int image_width, const float near, const float far, int32_t* face_index_map, float* weight_map, float* depth_map, int32_t* lock_map) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * num_faces) { return; } const int ih = image_height; const int iw = image_width; const int bn = i / num_faces; const int fn = i % num_faces; const float* face = &faces[i * 9]; /* pi[0], pi[1], pi[2] = leftmost, middle, rightmost points */ int pi[3]; if (face[0] < face[3]) { if (face[6] < face[0]) pi[0] = 2; else pi[0] = 0; if (face[3] < face[6]) pi[2] = 2; else pi[2] = 1; } else { if (face[6] < face[3]) pi[0] = 2; else pi[0] = 1; if (face[0] < face[6]) pi[2] = 2; else pi[2] = 0; } for (int k = 0; k < 3; k++) { if (pi[0] != k && pi[2] != k) { pi[1] = k; } } /* p[num][xyz]: x, y is normalized from [-1, 1] to [0, ih or iw - 1]. */ float p[3][3]; for (int num = 0; num < 3; num++) { for (int dim = 0; dim < 3; dim++) { if (dim == 0) { p[num][dim] = 0.5 * (face[3 * pi[num] + dim] * iw + iw - 1); } else if (dim == 1) { p[num][dim] = 0.5 * (face[3 * pi[num] + dim] * ih + ih - 1); } else { p[num][dim] = face[3 * pi[num] + dim]; } } } if (p[0][0] == p[2][0]) return; // line, not triangle /* compute face_inv */ float face_inv[9] = { p[1][1] - p[2][1], p[2][0] - p[1][0], p[1][0] * p[2][1] - p[2][0] * p[1][1], p[2][1] - p[0][1], p[0][0] - p[2][0], p[2][0] * p[0][1] - p[0][0] * p[2][1], p[0][1] - p[1][1], p[1][0] - p[0][0], p[0][0] * p[1][1] - p[1][0] * p[0][1]}; float face_inv_denominator = ( p[2][0] * (p[0][1] - p[1][1]) + p[0][0] * (p[1][1] - p[2][1]) + p[1][0] * (p[2][1] - p[0][1])); for (int k = 0; k < 9; k++) { face_inv[k] /= face_inv_denominator; } const int xi_min = max(ceil(p[0][0]), 0.); const int xi_max = min(p[2][0], iw - 1.0); for (int xi = xi_min; xi <= xi_max; xi++) { /* compute yi_min and yi_max */ float yi1, yi2; if (xi <= p[1][0]) { if (p[1][0] - p[0][0] != 0) { yi1 = (p[1][1] - p[0][1]) / (p[1][0] - p[0][0]) * (xi - p[0][0]) + p[0][1]; } else { yi1 = p[1][1]; } } else { if (p[2][0] - p[1][0] != 0) { yi1 = (p[2][1] - p[1][1]) / (p[2][0] - p[1][0]) * (xi - p[1][0]) + p[1][1]; } else { yi1 = p[1][1]; } } yi2 = (p[2][1] - p[0][1]) / (p[2][0] - p[0][0]) * (xi - p[0][0]) + p[0][1]; const int yi_min = max(0., ceil(min(yi1, yi2))); const int yi_max = min(max(yi1, yi2), ih - 1.0); for (int yi = yi_min; yi <= yi_max; yi++) { /* index in output buffers */ int index = bn * ih * iw + yi * iw + xi; /* compute w = face_inv * p */ float w[3]; for (int k = 0; k < 3; k++) { w[k] = face_inv[3 * k + 0] * xi + face_inv[3 * k + 1] * yi + face_inv[3 * k + 2]; } /* sum(w) -> 1, 0 < w < 1 */ float w_sum = 0; for (int k = 0; k < 3; k++) { w[k] = min(max(w[k], 0.0), 1.0); w_sum += w[k]; } for (int k = 0; k < 3; k++) w[k] /= w_sum; /* compute 1 / zp = sum(w / z) */ const float zp = 1.0 / (w[0] / p[0][2] + w[1] / p[1][2] + w[2] / p[2][2]); if (zp <= near || far <= zp) continue; /* lock and update */ bool locked = false; do { if (locked = atomicCAS(&lock_map[index], 0, 1) == 0) { if (zp < atomicAdd(&depth_map[index], 0)) { float record = 0; atomicExch(&depth_map[index], zp); atomicExch(&face_index_map[index], fn); for (int k = 0; k < 3; k++) { atomicExch(&weight_map[3 * index + pi[k]], w[k]); } record += atomicAdd(&depth_map[index], 0.); record += atomicAdd(&face_index_map[index], 0.); if (record > 0) atomicExch(&lock_map[index], 0); } else { atomicExch(&lock_map[index], 0); } } } while (!locked); } } } __global__ void forward_texture_sampling_cuda_kernel( const float* faces, const float* textures, const int32_t* face_index_map, const float* weight_map, const size_t batch_size, const int num_faces, const int image_height, const int image_width, const int texture_size, float* feature_map) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * image_height * image_width) { return; } const int ts = texture_size; const int face_index = face_index_map[i]; float* pixel = &feature_map[i * (ts + 1)]; if (face_index >= 0) { /* from global variables: batch number, num of faces, image_size, face[v012][RGB], pixel[RGB], weight[v012], texture[ts][RGB]; */ const int bn = i / (image_height * image_width); const int nf = num_faces; const float* texture = &textures[(bn * nf + face_index) * ts * 3]; const float* weight = &weight_map[i * 3]; /* blend */ for (int k = 0; k < ts; k++) { for (int j = 0; j < 3; j++) { pixel[k] += weight[j] * texture[ts * j + k]; } } pixel[ts] = 1.0f; } } __global__ void backward_cuda_kernel( const int32_t* face_index_map, const float* weight_map, const float* grad_feature_map, float* grad_textures, size_t batch_size, size_t num_faces, int image_height, int image_width, size_t texture_size) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * image_height * image_width) { return; } const int face_index = face_index_map[i]; if (face_index >= 0) { int bn = i / (image_width * image_height); // batch number [0 -> bs] int nf = num_faces; int ts = texture_size; const float* weight = &weight_map[i * 3]; float* grad_texture = &grad_textures[(bn * nf + face_index) * ts * 3]; for (int k = 0; k < ts; k++) { const float grad_feature = grad_feature_map[i * (ts + 1) + k]; for (int j = 0; j < 3; j++) { atomicAdd(&grad_texture[ts * j + k], weight[j] * grad_feature); } } } } std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> forward_cuda( at::Tensor feature_map, at::Tensor face_index_map, at::Tensor weight_map, at::Tensor depth_map, at::Tensor lock_map, const at::Tensor& faces, const at::Tensor& textures, const int image_height, const int image_width, const float near, const float far) { const int batch_size = faces.size(0); const int num_faces = faces.size(1); const int texture_size = textures.size(3); const int threads = 512; const dim3 blocks1 ((batch_size * num_faces - 1) / threads +1); hipLaunchKernelGGL(( forward_face_index_map_cuda_kernel), dim3(blocks1), dim3(threads), 0, 0, faces.data_ptr<float>(), batch_size, num_faces, image_height, image_width, near, far, face_index_map.data_ptr<int32_t>(), weight_map.data_ptr<float>(), depth_map.data_ptr<float>(), lock_map.data_ptr<int32_t>()); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("Error in forward_face_index_map: %s\n", hipGetErrorString(err)); } const dim3 blocks2 ((batch_size * image_height * image_width - 1) / threads + 1); hipLaunchKernelGGL(( forward_texture_sampling_cuda_kernel), dim3(blocks2), dim3(threads), 0, 0, faces.data_ptr<float>(), textures.data_ptr<float>(), face_index_map.data_ptr<int32_t>(), weight_map.data_ptr<float>(), batch_size, num_faces, image_height, image_width, texture_size, feature_map.data_ptr<float>()); err = hipGetLastError(); if (err != hipSuccess) { printf("Error in forward_texture_sampling: %s\n", hipGetErrorString(err)); } return std::make_tuple(face_index_map, weight_map, depth_map, feature_map); } at::Tensor backward_cuda( const at::Tensor& face_index_map, const at::Tensor& weight_map, at::Tensor& grad_feature_map, at::Tensor& grad_textures, int num_faces) { const int batch_size = face_index_map.size(0); const int image_height = face_index_map.size(1); const int image_width = face_index_map.size(2); const int texture_size = grad_textures.size(3); const int threads = 512; const dim3 blocks ((batch_size * image_height * image_width - 1) / threads + 1); hipLaunchKernelGGL(( backward_cuda_kernel), dim3(blocks), dim3(threads), 0, 0, face_index_map.data_ptr<int32_t>(), weight_map.data_ptr<float>(), grad_feature_map.data_ptr<float>(), grad_textures.data_ptr<float>(), batch_size, num_faces, image_height, image_width, texture_size); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("Error in backward: %s\n", hipGetErrorString(err)); } return grad_textures; }
0100a6929ac87fbea0b14349c6fe6e876e56701f.cu
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <tuple> __global__ void forward_face_index_map_cuda_kernel( const float* faces, const int batch_size, const int num_faces, const int image_height, const int image_width, const float near, const float far, int32_t* face_index_map, float* weight_map, float* depth_map, int32_t* lock_map) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * num_faces) { return; } const int ih = image_height; const int iw = image_width; const int bn = i / num_faces; const int fn = i % num_faces; const float* face = &faces[i * 9]; /* pi[0], pi[1], pi[2] = leftmost, middle, rightmost points */ int pi[3]; if (face[0] < face[3]) { if (face[6] < face[0]) pi[0] = 2; else pi[0] = 0; if (face[3] < face[6]) pi[2] = 2; else pi[2] = 1; } else { if (face[6] < face[3]) pi[0] = 2; else pi[0] = 1; if (face[0] < face[6]) pi[2] = 2; else pi[2] = 0; } for (int k = 0; k < 3; k++) { if (pi[0] != k && pi[2] != k) { pi[1] = k; } } /* p[num][xyz]: x, y is normalized from [-1, 1] to [0, ih or iw - 1]. */ float p[3][3]; for (int num = 0; num < 3; num++) { for (int dim = 0; dim < 3; dim++) { if (dim == 0) { p[num][dim] = 0.5 * (face[3 * pi[num] + dim] * iw + iw - 1); } else if (dim == 1) { p[num][dim] = 0.5 * (face[3 * pi[num] + dim] * ih + ih - 1); } else { p[num][dim] = face[3 * pi[num] + dim]; } } } if (p[0][0] == p[2][0]) return; // line, not triangle /* compute face_inv */ float face_inv[9] = { p[1][1] - p[2][1], p[2][0] - p[1][0], p[1][0] * p[2][1] - p[2][0] * p[1][1], p[2][1] - p[0][1], p[0][0] - p[2][0], p[2][0] * p[0][1] - p[0][0] * p[2][1], p[0][1] - p[1][1], p[1][0] - p[0][0], p[0][0] * p[1][1] - p[1][0] * p[0][1]}; float face_inv_denominator = ( p[2][0] * (p[0][1] - p[1][1]) + p[0][0] * (p[1][1] - p[2][1]) + p[1][0] * (p[2][1] - p[0][1])); for (int k = 0; k < 9; k++) { face_inv[k] /= face_inv_denominator; } const int xi_min = max(ceil(p[0][0]), 0.); const int xi_max = min(p[2][0], iw - 1.0); for (int xi = xi_min; xi <= xi_max; xi++) { /* compute yi_min and yi_max */ float yi1, yi2; if (xi <= p[1][0]) { if (p[1][0] - p[0][0] != 0) { yi1 = (p[1][1] - p[0][1]) / (p[1][0] - p[0][0]) * (xi - p[0][0]) + p[0][1]; } else { yi1 = p[1][1]; } } else { if (p[2][0] - p[1][0] != 0) { yi1 = (p[2][1] - p[1][1]) / (p[2][0] - p[1][0]) * (xi - p[1][0]) + p[1][1]; } else { yi1 = p[1][1]; } } yi2 = (p[2][1] - p[0][1]) / (p[2][0] - p[0][0]) * (xi - p[0][0]) + p[0][1]; const int yi_min = max(0., ceil(min(yi1, yi2))); const int yi_max = min(max(yi1, yi2), ih - 1.0); for (int yi = yi_min; yi <= yi_max; yi++) { /* index in output buffers */ int index = bn * ih * iw + yi * iw + xi; /* compute w = face_inv * p */ float w[3]; for (int k = 0; k < 3; k++) { w[k] = face_inv[3 * k + 0] * xi + face_inv[3 * k + 1] * yi + face_inv[3 * k + 2]; } /* sum(w) -> 1, 0 < w < 1 */ float w_sum = 0; for (int k = 0; k < 3; k++) { w[k] = min(max(w[k], 0.0), 1.0); w_sum += w[k]; } for (int k = 0; k < 3; k++) w[k] /= w_sum; /* compute 1 / zp = sum(w / z) */ const float zp = 1.0 / (w[0] / p[0][2] + w[1] / p[1][2] + w[2] / p[2][2]); if (zp <= near || far <= zp) continue; /* lock and update */ bool locked = false; do { if (locked = atomicCAS(&lock_map[index], 0, 1) == 0) { if (zp < atomicAdd(&depth_map[index], 0)) { float record = 0; atomicExch(&depth_map[index], zp); atomicExch(&face_index_map[index], fn); for (int k = 0; k < 3; k++) { atomicExch(&weight_map[3 * index + pi[k]], w[k]); } record += atomicAdd(&depth_map[index], 0.); record += atomicAdd(&face_index_map[index], 0.); if (record > 0) atomicExch(&lock_map[index], 0); } else { atomicExch(&lock_map[index], 0); } } } while (!locked); } } } __global__ void forward_texture_sampling_cuda_kernel( const float* faces, const float* textures, const int32_t* face_index_map, const float* weight_map, const size_t batch_size, const int num_faces, const int image_height, const int image_width, const int texture_size, float* feature_map) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * image_height * image_width) { return; } const int ts = texture_size; const int face_index = face_index_map[i]; float* pixel = &feature_map[i * (ts + 1)]; if (face_index >= 0) { /* from global variables: batch number, num of faces, image_size, face[v012][RGB], pixel[RGB], weight[v012], texture[ts][RGB]; */ const int bn = i / (image_height * image_width); const int nf = num_faces; const float* texture = &textures[(bn * nf + face_index) * ts * 3]; const float* weight = &weight_map[i * 3]; /* blend */ for (int k = 0; k < ts; k++) { for (int j = 0; j < 3; j++) { pixel[k] += weight[j] * texture[ts * j + k]; } } pixel[ts] = 1.0f; } } __global__ void backward_cuda_kernel( const int32_t* face_index_map, const float* weight_map, const float* grad_feature_map, float* grad_textures, size_t batch_size, size_t num_faces, int image_height, int image_width, size_t texture_size) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * image_height * image_width) { return; } const int face_index = face_index_map[i]; if (face_index >= 0) { int bn = i / (image_width * image_height); // batch number [0 -> bs] int nf = num_faces; int ts = texture_size; const float* weight = &weight_map[i * 3]; float* grad_texture = &grad_textures[(bn * nf + face_index) * ts * 3]; for (int k = 0; k < ts; k++) { const float grad_feature = grad_feature_map[i * (ts + 1) + k]; for (int j = 0; j < 3; j++) { atomicAdd(&grad_texture[ts * j + k], weight[j] * grad_feature); } } } } std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> forward_cuda( at::Tensor feature_map, at::Tensor face_index_map, at::Tensor weight_map, at::Tensor depth_map, at::Tensor lock_map, const at::Tensor& faces, const at::Tensor& textures, const int image_height, const int image_width, const float near, const float far) { const int batch_size = faces.size(0); const int num_faces = faces.size(1); const int texture_size = textures.size(3); const int threads = 512; const dim3 blocks1 ((batch_size * num_faces - 1) / threads +1); forward_face_index_map_cuda_kernel<<<blocks1, threads>>>( faces.data_ptr<float>(), batch_size, num_faces, image_height, image_width, near, far, face_index_map.data_ptr<int32_t>(), weight_map.data_ptr<float>(), depth_map.data_ptr<float>(), lock_map.data_ptr<int32_t>()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("Error in forward_face_index_map: %s\n", cudaGetErrorString(err)); } const dim3 blocks2 ((batch_size * image_height * image_width - 1) / threads + 1); forward_texture_sampling_cuda_kernel<<<blocks2, threads>>>( faces.data_ptr<float>(), textures.data_ptr<float>(), face_index_map.data_ptr<int32_t>(), weight_map.data_ptr<float>(), batch_size, num_faces, image_height, image_width, texture_size, feature_map.data_ptr<float>()); err = cudaGetLastError(); if (err != cudaSuccess) { printf("Error in forward_texture_sampling: %s\n", cudaGetErrorString(err)); } return std::make_tuple(face_index_map, weight_map, depth_map, feature_map); } at::Tensor backward_cuda( const at::Tensor& face_index_map, const at::Tensor& weight_map, at::Tensor& grad_feature_map, at::Tensor& grad_textures, int num_faces) { const int batch_size = face_index_map.size(0); const int image_height = face_index_map.size(1); const int image_width = face_index_map.size(2); const int texture_size = grad_textures.size(3); const int threads = 512; const dim3 blocks ((batch_size * image_height * image_width - 1) / threads + 1); backward_cuda_kernel<<<blocks, threads>>>( face_index_map.data_ptr<int32_t>(), weight_map.data_ptr<float>(), grad_feature_map.data_ptr<float>(), grad_textures.data_ptr<float>(), batch_size, num_faces, image_height, image_width, texture_size); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("Error in backward: %s\n", cudaGetErrorString(err)); } return grad_textures; }
f0d8b17bd94a097aee66eca49f6ad726671951a9.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <sys/time.h> #define CHECK(call)\ {\ const hipError_t error = call;\ if (error != hipSuccess)\ {\ printf("Error: %s:%d, ", __FILE__, __LINE__);\ printf("code: %d, reason: %s\n", error, hipGetErrorString(error));\ exit(1);\ }\ } double cpuSecond() { struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.0e-6); } void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i=0; i<N; i++) { if ( abs(hostRef[i] - gpuRef[i]) > epsilon ) { match = 0; printf("Arrays do not match! \n"); printf("host: %5.2f, gpu: %5.2f at current %d \n", hostRef[i], gpuRef[i], i); break; } } if (match) printf("Array match.\n\n"); } void initialData(float *ip, int size) { // Generate different seed for random number. time_t t; srand((unsigned int)time(&t)); for (int i=0; i<size; i++) { ip[i] = (float)(rand() & 0xFF )/10.0f; } } void sumArrayOnHost(float *A, float *B, float *C, const int N) { for (int idx=0; idx<N; idx++) { C[idx] = A[idx] + B[idx]; } } __global__ void sumArraysOnGPU(float *A, float *B, float *C) { int i = blockIdx.x * blockDim.x + threadIdx.x; C[i] = A[i] + B[i]; } void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny) { float *ia = A; float *ib = B; float *ic = C; for (int iy=0; iy<ny; iy++) { for (int ix=0; ix<nx; ix++) { ic[ix] = ia[ix] + ib[ix]; //printf("CPU Add: %f + %f = %f.\n", ia[ix], ib[ix], ic[ix]); } ia += nx; ib += nx; ic += nx; } } __global__ void sumMatrixOnGPU(float *MatA, float *MatB, float *MatC, int nx, int ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; //printf("nx: %d, ny: %d, ix: %d, iy: %d, idx: %d\n", nx, ny, ix, iy, idx); if (ix<nx && iy<ny) { MatC[idx] = MatA[idx] + MatB[idx]; //printf("GPU Add: %f + %f = %f.\n", MatA[idx], MatB[idx], MatC[idx]); } } int main(int argc, char **argv) { printf("%s Strarting...\n", argv[0]); // set up device int dev = 0; hipSetDevice(dev); // set up data size of vectors int nx = 1<<14; int ny = 1<<14; int nxy = nx * ny; size_t nBytes = nxy * sizeof(float); printf("Vector size %d\n", nxy); // malloc host memory float *h_A, *h_B, *hostRef, *gpuRef; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); // initialize data at host side initialData(h_A, nxy); initialData(h_B, nxy); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // malloc gpu global memory float *d_A, *d_B, *d_C; hipMalloc((float **)&d_A, nBytes); hipMalloc((float **)&d_B, nBytes); hipMalloc((float **)&d_C, nBytes); // transfer data from host to gpu hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice); // invoke kernel at host side int dimx = 32; int dimy = 32; if (argc > 2) { dimx = atoi(argv[1]); dimy = atoi(argv[2]); printf("Customized dimx: %d, dimy %d.\n", dimx, dimy); } dim3 block (dimx, dimy); dim3 grid ( (nx + block.x - 1)/block.x, (ny + block.y -1)/block.y ); // start time double time_gpu_start = cpuSecond(); hipLaunchKernelGGL(( sumMatrixOnGPU), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nx, ny); hipDeviceSynchronize(); // gpu finished time double time_gpu_finish = cpuSecond(); // copy kernel result back to host side hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost); printf("Execution configuration <<<(%d, %d), (%d, %d)>>>\n", grid.x, grid.y, block.x, block.y); // reset start time for CPU. double time_cpu_start = cpuSecond(); // add vector at host side for result check. sumMatrixOnHost(h_A, h_B, hostRef, nx, ny); // cpu finished time double time_cpu_finish = cpuSecond(); // Check device results checkResult(hostRef, gpuRef, nxy); // free device global memory hipFree(d_A); hipFree(d_B); hipFree(d_C); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); double cpu_time = time_cpu_finish - time_cpu_start; double gpu_time = time_gpu_finish - time_gpu_start; printf("CPU job Done in %lf. \n", time_cpu_finish - time_cpu_start); printf("GPU job Done in %lf. \n", time_gpu_finish - time_gpu_start); printf("Accelarate ratio: %lf%%. \n", (cpu_time/gpu_time)*100.0); return(0); }
f0d8b17bd94a097aee66eca49f6ad726671951a9.cu
#include <cuda_runtime.h> #include <stdio.h> #include <sys/time.h> #define CHECK(call)\ {\ const cudaError_t error = call;\ if (error != cudaSuccess)\ {\ printf("Error: %s:%d, ", __FILE__, __LINE__);\ printf("code: %d, reason: %s\n", error, cudaGetErrorString(error));\ exit(1);\ }\ } double cpuSecond() { struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.0e-6); } void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i=0; i<N; i++) { if ( abs(hostRef[i] - gpuRef[i]) > epsilon ) { match = 0; printf("Arrays do not match! \n"); printf("host: %5.2f, gpu: %5.2f at current %d \n", hostRef[i], gpuRef[i], i); break; } } if (match) printf("Array match.\n\n"); } void initialData(float *ip, int size) { // Generate different seed for random number. time_t t; srand((unsigned int)time(&t)); for (int i=0; i<size; i++) { ip[i] = (float)(rand() & 0xFF )/10.0f; } } void sumArrayOnHost(float *A, float *B, float *C, const int N) { for (int idx=0; idx<N; idx++) { C[idx] = A[idx] + B[idx]; } } __global__ void sumArraysOnGPU(float *A, float *B, float *C) { int i = blockIdx.x * blockDim.x + threadIdx.x; C[i] = A[i] + B[i]; } void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny) { float *ia = A; float *ib = B; float *ic = C; for (int iy=0; iy<ny; iy++) { for (int ix=0; ix<nx; ix++) { ic[ix] = ia[ix] + ib[ix]; //printf("CPU Add: %f + %f = %f.\n", ia[ix], ib[ix], ic[ix]); } ia += nx; ib += nx; ic += nx; } } __global__ void sumMatrixOnGPU(float *MatA, float *MatB, float *MatC, int nx, int ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; //printf("nx: %d, ny: %d, ix: %d, iy: %d, idx: %d\n", nx, ny, ix, iy, idx); if (ix<nx && iy<ny) { MatC[idx] = MatA[idx] + MatB[idx]; //printf("GPU Add: %f + %f = %f.\n", MatA[idx], MatB[idx], MatC[idx]); } } int main(int argc, char **argv) { printf("%s Strarting...\n", argv[0]); // set up device int dev = 0; cudaSetDevice(dev); // set up data size of vectors int nx = 1<<14; int ny = 1<<14; int nxy = nx * ny; size_t nBytes = nxy * sizeof(float); printf("Vector size %d\n", nxy); // malloc host memory float *h_A, *h_B, *hostRef, *gpuRef; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); // initialize data at host side initialData(h_A, nxy); initialData(h_B, nxy); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // malloc gpu global memory float *d_A, *d_B, *d_C; cudaMalloc((float **)&d_A, nBytes); cudaMalloc((float **)&d_B, nBytes); cudaMalloc((float **)&d_C, nBytes); // transfer data from host to gpu cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice); // invoke kernel at host side int dimx = 32; int dimy = 32; if (argc > 2) { dimx = atoi(argv[1]); dimy = atoi(argv[2]); printf("Customized dimx: %d, dimy %d.\n", dimx, dimy); } dim3 block (dimx, dimy); dim3 grid ( (nx + block.x - 1)/block.x, (ny + block.y -1)/block.y ); // start time double time_gpu_start = cpuSecond(); sumMatrixOnGPU<<<grid, block>>>(d_A, d_B, d_C, nx, ny); cudaDeviceSynchronize(); // gpu finished time double time_gpu_finish = cpuSecond(); // copy kernel result back to host side cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost); printf("Execution configuration <<<(%d, %d), (%d, %d)>>>\n", grid.x, grid.y, block.x, block.y); // reset start time for CPU. double time_cpu_start = cpuSecond(); // add vector at host side for result check. sumMatrixOnHost(h_A, h_B, hostRef, nx, ny); // cpu finished time double time_cpu_finish = cpuSecond(); // Check device results checkResult(hostRef, gpuRef, nxy); // free device global memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); double cpu_time = time_cpu_finish - time_cpu_start; double gpu_time = time_gpu_finish - time_gpu_start; printf("CPU job Done in %lf. \n", time_cpu_finish - time_cpu_start); printf("GPU job Done in %lf. \n", time_gpu_finish - time_gpu_start); printf("Accelarate ratio: %lf%%. \n", (cpu_time/gpu_time)*100.0); return(0); }
d95fdbf4a658cae644f5b1eb78e8ca078164be26.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" #include <assert.h> extern "C" { #include "blas.h" #include "hip/hip_runtime.h" #include "utils.h" } __global__ void scale_bias_kernel(float *output, float *biases, int n, int size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int filter = blockIdx.y; int batch = blockIdx.z; if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter]; } void scale_bias_gpu(float *output, float *biases, int batch, int n, int size) { dim3 dimGrid((size-1)/BLOCK + 1, n, batch); dim3 dimBlock(BLOCK, 1, 1); hipLaunchKernelGGL(( scale_bias_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, output, biases, n, size); check_error(hipPeekAtLastError()); } __global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { __shared__ float part[BLOCK]; int i,b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for(b = 0; b < batch; ++b){ for(i = 0; i < size; i += BLOCK){ int index = p + i + size*(filter + n*b); sum += (p+i < size) ? delta[index]*x_norm[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i]; } } void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { hipLaunchKernelGGL(( backward_scale_kernel), dim3(n), dim3(BLOCK), 0, 0, x_norm, delta, batch, n, size, scale_updates); check_error(hipPeekAtLastError()); } __global__ void backward_variance_outputs_kernel(float *variance_updates, float *delta, int n, float *rolling_variance, float *variance) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= n) return; variance_updates[i] += delta[i] * (-0.5f) * sqrtf(rolling_variance[i]+.00001f) * powf(variance[i]+.00001f, float(-3.0f / 2.0f)); } void backward_variance_outputs_gpu(float *variance_updates, float *delta, int n, float *rolling_variance, float *variance) { hipLaunchKernelGGL(( backward_variance_outputs_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, variance_updates, delta, n, rolling_variance, variance); check_error(hipPeekAtLastError()); } __global__ void backward_gamma_kernel(float *weights, float *delta, int c, int n, int size, float *rolling_variance, float *scale_updates) { int i,b; for(b = 0; b < n; ++b){ __shared__ float part[BLOCK]; int p = threadIdx.x; float sum = 0; for(i = 0; i < c*size; i += BLOCK){ int index = p + i + size*c*b; sum += (p+i < c*size) ? delta[index]*weights[index] : 0; } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) scale_updates[b] += part[i]; } } } void backward_gamma_gpu(float *weights, float *delta, int c, int n, int size, float *rolling_variance, float *scale_updates) { hipLaunchKernelGGL(( backward_gamma_kernel), dim3(n), dim3(BLOCK), 0, 0, weights, delta, c, n, size, rolling_variance, scale_updates); check_error(hipPeekAtLastError()); } __global__ void backward_shift_gamma_kernel(float *scale_updates, int n, float *bias_updates, float *rolling_variance, float *mean, float *variance) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= n) return; scale_updates[i] = scale_updates[i] / sqrtf(rolling_variance[i]+.00001f) - bias_updates[i] * mean[i] / sqrtf(variance[i]+.00001f); } void backward_shift_gamma_gpu(float *scale_updates, int n, float *bias_updates, float *rolling_variance, float *mean, float *variance) { hipLaunchKernelGGL(( backward_shift_gamma_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, scale_updates, n, bias_updates, rolling_variance, mean, variance); check_error(hipPeekAtLastError()); } __global__ void backward_mean_kernel(float *mean_updates, float *bias_updates, int n, float *scales_gpu, float *variance) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= n) return; mean_updates[i] = bias_updates[i] * (-scales_gpu[i] / sqrtf(variance[i]+.00001f)); } void backward_mean_gpu(float *mean_updates, float *bias_updates, int n, float *scales_gpu, float *variance) { hipLaunchKernelGGL(( backward_mean_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, mean_updates, bias_updates, n, scales_gpu, variance); check_error(hipPeekAtLastError()); } __global__ void backward_variance_bias_kernel(float *variance_updates, float *bias_updates, int n, float *scales_gpu, float *mean, float *variance) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= n) return; variance_updates[i] += bias_updates[i] * (0.5f * scales_gpu[i] * mean[i] * powf(variance[i]+.00001f, (float)(-3.f/2.f))); } void backward_variance_bias_gpu(float *variance_updates, float *bias_updates, int n, float *scales_gpu, float *mean, float *variance) { hipLaunchKernelGGL(( backward_variance_bias_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, variance_updates, bias_updates, n, scales_gpu, mean, variance); check_error(hipPeekAtLastError()); } __global__ void sum_outputs_kernel(float *x_stat, float *mean, int batch, int filters, int spatial, float *x_stat_sum) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? x_stat[index] - mean[filter] : 0; } } __syncthreads(); if(id == 0){ x_stat_sum[filter] = 0; for(i = 0; i < threads; ++i){ x_stat_sum[filter] += local[i]; } x_stat_sum[filter] /= -(spatial * batch); } } void sum_outputs_gpu(float *x_stat, float *mean, int batch, int filters, int spatial, float *x_stat_sum) { hipLaunchKernelGGL(( sum_outputs_kernel), dim3(filters), dim3(BLOCK), 0, 0, x_stat, mean, batch, filters, spatial, x_stat_sum); check_error(hipPeekAtLastError()); } __global__ void backward_stat_mean_var_kernel(float *x_stat_delta, float *delta_mean, float *delta_variance, int batch, int n, int size, float *x_stat, float *mean, float *x_stat_sum) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n*size*batch) return; int i = index % size; index /= size; int j = index % n; index /= n; int k = index; x_stat_delta[(k*n+j)*size + i] = delta_mean[j] / (batch * size) + delta_variance[j] * 2 / (batch * size) * (x_stat[(k*n+j)*size + i] - mean[j]+x_stat_sum[j]); } void backward_stat_mean_var_gpu(float *x_stat_delta, float *delta_mean, float *delta_variance, int batch, int n, int size, float *x_stat, float *mean, float *x_stat_sum) { int num = n*size*batch; hipLaunchKernelGGL(( backward_stat_mean_var_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, x_stat_delta, delta_mean, delta_variance, batch, n, size, x_stat, mean, x_stat_sum); check_error(hipPeekAtLastError()); } __global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n*size*batch) return; int i = index % size; index /= size; int j = index % n; index /= n; int k = index; output[(k*n+j)*size + i] += biases[j]; } void add_bias_gpu(float *output, float *biases, int batch, int n, int size) { int num = n*size*batch; hipLaunchKernelGGL(( add_bias_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, output, biases, batch, n, size); check_error(hipPeekAtLastError()); } __global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n) return; int b; float sum = 0; for(b = 0; b < batch; ++b){ int i = b*n + index; sum += delta[i]; } bias_updates[index] += sum; } __global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size) { __shared__ float part[BLOCK]; int i,b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for(b = 0; b < batch; ++b){ for(i = 0; i < size; i += BLOCK){ int index = p + i + size*(filter + n*b); sum += (p+i < size) ? delta[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i]; } } void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size) { if(size == 1){ hipLaunchKernelGGL(( backward_bias_conn_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, bias_updates, delta, batch, n); }else{ hipLaunchKernelGGL(( backward_bias_kernel), dim3(n), dim3(BLOCK), 0, 0, bias_updates, delta, batch, n, size); } check_error(hipPeekAtLastError()); } __global__ void backward_quantize_kernel(float *x_updates, float *x, int n, float bound, float shift) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { if (x[i] < -bound || x[i] > bound- (float)1.0 / shift) x_updates[i] = 0; } } void backward_quantize_gpu(float *x_updates, float *x, int n, int total_bitwidth, int fraction_bitwidth) { int integer_bitwidth = total_bitwidth - fraction_bitwidth; float bound = pow(2, integer_bitwidth - 1); float shift = pow(2, fraction_bitwidth); hipLaunchKernelGGL(( backward_quantize_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x_updates, x, n, bound, shift); } /* __global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int f1 = index / n; int f2 = index % n; if (f2 <= f1) return; float sum = 0; float norm1 = 0; float norm2 = 0; int b, i; for(b = 0; b < batch; ++b){ for(i = 0; i < size; ++i){ int i1 = b * size * n + f1 * size + i; int i2 = b * size * n + f2 * size + i; sum += output[i1] * output[i2]; norm1 += output[i1] * output[i1]; norm2 += output[i2] * output[i2]; } } norm1 = sqrt(norm1); norm2 = sqrt(norm2); float norm = norm1 * norm2; sum = sum / norm; for(b = 0; b < batch; ++b){ for(i = 0; i < size; ++i){ int i1 = b * size * n + f1 * size + i; int i2 = b * size * n + f2 * size + i; delta[i1] += - scale * sum * output[i2] / norm; delta[i2] += - scale * sum * output[i1] / norm; } } } void dot_error_gpu(layer l) { dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu); check_error(hipPeekAtLastError()); } */ __global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; float mhat = m[index] / (1.f - powf(B1, t)); float vhat = v[index] / (1.f - powf(B2, t)); x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps); } extern "C" void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) { hipLaunchKernelGGL(( adam_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, x, m, v, B1, B2, rate, eps, t); check_error(hipPeekAtLastError()); } extern "C" void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t) { scal_gpu(n, B1, m, 1); scal_gpu(n, B2, v, 1); axpy_gpu(n, -decay*batch, w, 1, d, 1); axpy_gpu(n, (1-B1), d, 1, m, 1); mul_gpu(n, d, 1, d, 1); axpy_gpu(n, (1-B2), d, 1, v, 1); adam_gpu(n, w, m, v, B1, B2, rate, eps, t); fill_gpu(n, 0, d, 1); } __global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index/spatial)%filters; x[index] = (x[index] - mean[f])/(sqrtf(variance[f] + .00001f)); } __global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index/spatial)%filters; delta[index] = delta[index] * 1.f/(sqrtf(variance[f] + .00001f)) + variance_delta[f] * 2.f * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch); } extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) { size_t N = batch*filters*spatial; hipLaunchKernelGGL(( normalize_delta_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta); check_error(hipPeekAtLastError()); } __global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; variance_delta[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance_delta[i] += delta[index]*(x[index] - mean[i]); } } variance_delta[i] *= -.5f * powf(variance[i] + .00001f, (float)(-3.f/2.f)); } __global__ void accumulate_kernel(float *x, int n, int groups, float *sum) { int k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= groups) return; sum[i] = 0; for(k = 0; k < n; ++k){ sum[i] += x[k*groups + i]; } } __global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index] : 0; } } __syncthreads(); if(id == 0){ mean_delta[filter] = 0; for(i = 0; i < threads; ++i){ mean_delta[filter] += local[i]; } mean_delta[filter] *= (-1.f/sqrtf(variance[filter] + .00001f)); } } __global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0; } } __syncthreads(); if(id == 0){ variance_delta[filter] = 0; for(i = 0; i < threads; ++i){ variance_delta[filter] += local[i]; } variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (float)(-3.f/2.f)); } } __global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; mean_delta[i] = 0; for (j = 0; j < batch; ++j) { for (k = 0; k < spatial; ++k) { int index = j*filters*spatial + i*spatial + k; mean_delta[i] += delta[index]; } } mean_delta[i] *= (-1.f/sqrtf(variance[i] + .00001f)); } extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { hipLaunchKernelGGL(( mean_delta_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, delta, variance, batch, filters, spatial, mean_delta); check_error(hipPeekAtLastError()); } extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { hipLaunchKernelGGL(( fast_mean_delta_kernel), dim3(filters), dim3(BLOCK), 0, 0, delta, variance, batch, filters, spatial, mean_delta); check_error(hipPeekAtLastError()); } extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { hipLaunchKernelGGL(( fast_variance_delta_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, delta, mean, variance, batch, filters, spatial, variance_delta); check_error(hipPeekAtLastError()); } __global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean) { float scale = 1.f/(batch * spatial); int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; mean[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; mean[i] += x[index]; } } mean[i] *= scale; } __global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) { float scale = 1.f/(batch * spatial - 1); int j,k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; variance[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance[i] += powf((x[index] - mean[i]), 2); } } variance[i] *= scale; } __global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_index = i; int in_w = i%w; i = i/w; int in_h = i%h; i = i/h; int in_c = i%c; i = i/c; int b = i%batch; int out_c = c/(stride*stride); int c2 = in_c % out_c; int offset = in_c / out_c; int w2 = in_w*stride + offset % stride; int h2 = in_h*stride + offset / stride; //printf("%d\n", offset); int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b)); // printf("%d %d %d\n", w2, h2, c2); //printf("%d %d\n", in_index, out_index); //if(out_index >= N || out_index < 0) printf("bad bad bad \n"); if(forward) out[out_index] = x[in_index]; else out[in_index] = x[out_index]; //if(forward) out[1] = x[1]; //else out[0] = x[0]; } __global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX]; } __global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA); } __global__ void add_pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] += pow(X[i*INCX], ALPHA); } __global__ void const_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX])); } __global__ void clamp_kernel(int N, float *X, int INCX, float clamp_min, float clamp_max) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = fminf(clamp_max, fmaxf(clamp_min, X[i*INCX])); } __global__ void fabsf_clamp_kernel(int N, float *X, int INCX, float clamp_min, float clamp_max) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) { if (X[i*INCX] >= 0) X[i*INCX] = fminf(clamp_max, fmaxf(clamp_min, X[i*INCX])); else X[i*INCX] = fminf(-clamp_min, fmaxf(-clamp_max, X[i*INCX])); } } __global__ void quantize_kernel(float *x, int n, float shift, float bound, int diff, float static_rate, int is_round) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { if (is_round) { x[i] = fminf(bound - (float)1.0 / shift, fmaxf(-bound, round(x[i] * shift) / shift)); } else { // make the quantization_aware_training same as post_training_quantization float quantize_x = ((int)(x[i] * static_rate) >> diff) / shift; x[i] = fminf(bound - (float)1.0 / shift, fmax(-bound, quantize_x)); } } } __global__ void shift_bias_kernel(float *x, int n, float *gamma, float * beta, float *mean, float * variance) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { x[i] = beta[i] - gamma[i] * mean[i] / sqrtf(variance[i] + .00001f); } } __global__ void supp_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) { if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0; } } __global__ void add_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] += ALPHA; } __global__ void scal_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] *= ALPHA; } __global__ void scal_add_kernel(int N, float ALPHA, float BETA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA * X[i*INCX] + BETA; } __global__ void reweight_kernel(int N, float *scale, int scale_offset, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] = scale[scale_offset] * X[i*INCX]; } __global__ void fill_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX]; } __global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] *= X[i*INCX]; } extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) { size_t N = batch*filters*spatial; hipLaunchKernelGGL(( normalize_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, mean, variance, batch, filters, spatial); check_error(hipPeekAtLastError()); } __global__ void l2norm_kernel(int N, float *x, float *dx, int batch, int filters, int spatial) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int b = index / spatial; int i = index % spatial; int f; float sum = 0; for(f = 0; f < filters; ++f){ int index = b*filters*spatial + f*spatial + i; sum += powf(x[index], 2); } sum = sqrtf(sum); if(sum == 0) sum = 1; //printf("%f\n", sum); for(f = 0; f < filters; ++f){ int index = b*filters*spatial + f*spatial + i; x[index] /= sum; dx[index] = (1 - x[index]) / sum; } } extern "C" void l2normalize_gpu(float *x, float *dx, int batch, int filters, int spatial) { size_t N = batch*spatial; hipLaunchKernelGGL(( l2norm_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, dx, batch, filters, spatial); check_error(hipPeekAtLastError()); } __global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? x[index] : 0; } } __syncthreads(); if(id == 0){ mean[filter] = 0; for(i = 0; i < threads; ++i){ mean[filter] += local[i]; } mean[filter] /= spatial * batch; } } __global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? powf((x[index] - mean[filter]), 2) : 0; } } __syncthreads(); if(id == 0){ variance[filter] = 0; for(i = 0; i < threads; ++i){ variance[filter] += local[i]; } variance[filter] /= (spatial * batch); } } extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean) { hipLaunchKernelGGL(( fast_mean_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, batch, filters, spatial, mean); check_error(hipPeekAtLastError()); } extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { hipLaunchKernelGGL(( fast_variance_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, mean, batch, filters, spatial, variance); check_error(hipPeekAtLastError()); } extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean) { hipLaunchKernelGGL(( mean_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, x, batch, filters, spatial, mean); check_error(hipPeekAtLastError()); } extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { hipLaunchKernelGGL(( variance_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, x, mean, batch, filters, spatial, variance); check_error(hipPeekAtLastError()); } extern "C" void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { axpy_gpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY); } extern "C" void pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { hipLaunchKernelGGL(( pow_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX, Y, INCY); check_error(hipPeekAtLastError()); } extern "C" void add_pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { hipLaunchKernelGGL(( add_pow_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX, Y, INCY); check_error(hipPeekAtLastError()); } extern "C" void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { hipLaunchKernelGGL(( axpy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY); check_error(hipPeekAtLastError()); } extern "C" void copy_gpu(int N, float * X, int INCX, float * Y, int INCY) { copy_gpu_offset(N, X, 0, INCX, Y, 0, INCY); } extern "C" void mul_gpu(int N, float * X, int INCX, float * Y, int INCY) { hipLaunchKernelGGL(( mul_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, INCX, Y, INCY); check_error(hipPeekAtLastError()); } extern "C" void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { hipLaunchKernelGGL(( copy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, OFFX, INCX, Y, OFFY, INCY); check_error(hipPeekAtLastError()); } __global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_s = i%spatial; i = i/spatial; int in_c = i%layers; i = i/layers; int b = i; int i1 = b*layers*spatial + in_c*spatial + in_s; int i2 = b*layers*spatial + in_s*layers + in_c; if (forward) out[i2] = x[i1]; else out[i1] = x[i2]; } extern "C" void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out) { int size = spatial*batch*layers; hipLaunchKernelGGL(( flatten_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, x, spatial, layers, batch, forward, out); check_error(hipPeekAtLastError()); } extern "C" void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int size = w*h*c*batch; hipLaunchKernelGGL(( reorg_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, x, w, h, c, batch, stride, forward, out); check_error(hipPeekAtLastError()); } __global__ void mask_kernel(int n, float *x, float mask_num, float *mask, float val) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] = val; } extern "C" void mask_gpu(int N, float * X, float mask_num, float * mask, float val) { hipLaunchKernelGGL(( mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, mask_num, mask, val); check_error(hipPeekAtLastError()); } __global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] *= scale; } extern "C" void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale) { hipLaunchKernelGGL(( scale_mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, mask_num, mask, scale); check_error(hipPeekAtLastError()); } extern "C" void const_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( const_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void constrain_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( constrain_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void clamp_gpu(int N, float *X, int INCX, float clamp_min, float clamp_max) { hipLaunchKernelGGL(( clamp_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, INCX, clamp_min, clamp_max); check_error(hipPeekAtLastError()); } extern "C" void fabsf_clamp_gpu(int N, float *X, int INCX, float clamp_min, float clamp_max) { hipLaunchKernelGGL(( fabsf_clamp_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, INCX, clamp_min, clamp_max); check_error(hipPeekAtLastError()); } extern "C" void quantize_gpu(float * x, int n, int total_bitwidth, int fraction_bitwidth, int net_fraction_bitwidth, int is_round) { int integer_bitwidth = total_bitwidth - fraction_bitwidth; float bound = pow(2, integer_bitwidth - 1); float shift = pow(2, fraction_bitwidth); int diff = net_fraction_bitwidth - fraction_bitwidth; float static_rate = pow(2, net_fraction_bitwidth); hipLaunchKernelGGL(( quantize_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, shift, bound, diff, static_rate, is_round); check_error(hipPeekAtLastError()); } __global__ void scale_weights_kernel(float *weights, int nweights, int n, float * gamma, float * variance) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index < nweights) { int i = index / (nweights / n); weights[index] *= gamma[i] / sqrtf(variance[i] + .00001f); } } extern "C" void scale_weights_gpu(float * weights, int nweights, int n, float * gamma, float * variance) { hipLaunchKernelGGL(( scale_weights_kernel), dim3(cuda_gridsize(nweights)), dim3(BLOCK), 0, 0, weights, nweights, n, gamma, variance); check_error(hipPeekAtLastError()); } __global__ void scale_outputs_kernel(float *outputs, float *variance, float *rolling_variance, int n, int size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int filter = blockIdx.y; int batch = blockIdx.z; if(offset < size) outputs[(batch*n+filter)*size + offset] *= (sqrtf(rolling_variance[filter]+.00001f) / sqrtf(variance[filter]+.00001f)); } extern "C" void scale_outputs_gpu(float * outputs, float * variance, float * rolling_variance, int batch, int n, int size) { dim3 dimGrid((size-1)/BLOCK + 1, n, batch); dim3 dimBlock(BLOCK, 1, 1); hipLaunchKernelGGL(( scale_outputs_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, outputs, variance, rolling_variance, n, size); check_error(hipPeekAtLastError()); } extern "C" void shift_bias_gpu(float * new_bias, int n, float * scales, float * bias, float * mean, float * variance) { hipLaunchKernelGGL(( shift_bias_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, new_bias, n, scales, bias, mean, variance); check_error(hipPeekAtLastError()); } extern "C" void add_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( add_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void scal_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( scal_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void scal_add_gpu(int N, float ALPHA, float BETA, float * X, int INCX) { hipLaunchKernelGGL(( scal_add_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, BETA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void reweight_gpu(int N, float * scale, int scale_offset, float * X, int INCX, float * Y, int INCY) { hipLaunchKernelGGL(( reweight_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, scale, scale_offset, X, INCX, Y, INCY); check_error(hipPeekAtLastError()); } extern "C" void supp_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( supp_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void fill_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( fill_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } __global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= size) return; int i = id % minw; id /= minw; int j = id % minh; id /= minh; int k = id % minc; id /= minc; int b = id % batch; int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] = s1*out[out_index] + s2*add[add_index]; //out[out_index] += add[add_index]; } extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) { int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; //printf("shortcut_layer: %d %d %d %d %d %d: %d %d %d\n", w1, h1, c1, w2, h2, c2, minw, minh, minc); int stride = w1/w2; int sample = w2/w1; assert(stride == h1/h2); assert(sample == h2/h1); if(stride < 1) stride = 1; if(sample < 1) sample = 1; int size = batch * minw * minh * minc; hipLaunchKernelGGL(( shortcut_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, s1, s2, out); check_error(hipPeekAtLastError()); } __global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; float abs_val = fabsf(diff); if(abs_val < 1) { error[i] = diff * diff; delta[i] = diff; } else { error[i] = 2*abs_val - 1; delta[i] = (diff > 0) ? 1 : -1; } } } extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( smooth_l1_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void add_l1_delta_kernel(int n, float scale, float *weight, float *delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ delta[i] += scale * ((weight[i] > 0) ? 1 : -1); } } extern "C" void add_l1_delta_gpu(int n, float scale, float *weight, float *delta) { hipLaunchKernelGGL(( add_l1_delta_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, scale, weight, delta); check_error(hipPeekAtLastError()); } __global__ void add_consistent_l1_delta_kernel(int n, float scale, float *weight, float *sqrt_sum, float *delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ delta[i] += scale * (weight[i] / sqrt_sum[i]); } } extern "C" void add_consistent_l1_delta_gpu(int n, float scale, float *weight, float *sqrt_sum, float *delta) { hipLaunchKernelGGL(( add_consistent_l1_delta_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, scale, weight, sqrt_sum, delta); check_error(hipPeekAtLastError()); } __global__ void softmax_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float t = truth[i]; float p = pred[i]; error[i] = (t) ? -log(p) : 0; delta[i] = t-p; } } extern "C" void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( softmax_x_ent_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void logistic_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float t = truth[i]; float p = pred[i]; error[i] = -t*log(p+.0000001) - (1-t)*log(1-p+.0000001); delta[i] = t-p; } } extern "C" void logistic_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( logistic_x_ent_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = diff * diff; //I know this is technically wrong, deal with it. delta[i] = diff; } } extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( l2_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void l1_margin_kernel(int n, float *spred, float *ppred, float margin, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { //float diff = fabsf(spred[i] - ppred[i]); float diff = margin - fabsf(spred[i] - ppred[i]); //printf("info: %f %f %f %f\n", margin, fabsf(spred[i] - ppred[i]), spred[i], ppred[i]); if (diff <= 0) { error[i] = 0; delta[i] = 0; } else { error[i] = diff; delta[i] = (spred[i] - ppred[i] > 0) ? 1 : -1; } } } // max(0, -|ls - lp| + margin) extern "C" void l1_margin_gpu(int n, float *spred, float *ppred, float margin, float *delta, float *error) { hipLaunchKernelGGL(( l1_margin_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, spred, ppred, margin, delta, error); check_error(hipPeekAtLastError()); } __global__ void l1_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = abs(diff); delta[i] = (diff > 0) ? 1 : -1; } } extern "C" void l1_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( l1_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void wgan_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ error[i] = truth[i] ? -pred[i] : pred[i]; delta[i] = (truth[i] > 0) ? 1 : -1; } } extern "C" void wgan_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( wgan_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0); } } __global__ void deinter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < (NX+NY)*B){ int b = i / (NX+NY); int j = i % (NX+NY); if (j < NX){ if(X) X[b*NX + j] += OUT[i]; } else { if(Y) Y[b*NY + j - NX] += OUT[i]; } } } extern "C" void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { hipLaunchKernelGGL(( deinter_kernel), dim3(cuda_gridsize((NX+NY)*B)), dim3(BLOCK), 0, 0, NX, X, NY, Y, B, OUT); check_error(hipPeekAtLastError()); } __global__ void inter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < (NX+NY)*B){ int b = i / (NX+NY); int j = i % (NX+NY); if (j < NX){ OUT[i] = X[b*NX + j]; } else { OUT[i] = Y[b*NY + j - NX]; } } } extern "C" void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { hipLaunchKernelGGL(( inter_kernel), dim3(cuda_gridsize((NX+NY)*B)), dim3(BLOCK), 0, 0, NX, X, NY, Y, B, OUT); check_error(hipPeekAtLastError()); } extern "C" void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c) { hipLaunchKernelGGL(( weighted_sum_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, s, c); check_error(hipPeekAtLastError()); } __global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ if(da) da[i] += dc[i] * s[i]; if(db) db[i] += dc[i] * (1-s[i]); ds[i] += dc[i] * (a[i] - b[i]); } } extern "C" void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc) { hipLaunchKernelGGL(( weighted_delta_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, s, da, db, ds, dc); check_error(hipPeekAtLastError()); } __global__ void mult_add_into_kernel(int n, float *a, float *b, float *c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ c[i] += a[i]*b[i]; } } extern "C" void mult_add_into_gpu(int num, float *a, float *b, float *c) { hipLaunchKernelGGL(( mult_add_into_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, c); check_error(hipPeekAtLastError()); } __device__ void softmax_device(float *input, int n, float temp, int stride, float *output) { int i; float sum = 0; float largest = -INFINITY; for(i = 0; i < n; ++i){ int val = input[i*stride]; largest = (val>largest) ? val : largest; } for(i = 0; i < n; ++i){ float e = expf(input[i*stride]/temp - largest/temp); sum += e; output[i*stride] = e; } for(i = 0; i < n; ++i){ output[i*stride] /= sum; } } __global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= spatial*batch*groups) return; int s = id % spatial; id = id / spatial; int g = id % groups; int b = id / groups; int goff = group_offset[g]*spatial; int boff = b*stride; softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s); } extern "C" void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier) { int *tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups); int *tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups); /* static int *tree_groups_size = 0; static int *tree_groups_offset = 0; if(!tree_groups_size){ tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups); tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups); } */ int num = spatial*batch*hier.groups; hipLaunchKernelGGL(( softmax_tree_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, input, spatial, batch, stride, temp, output, hier.groups, tree_groups_size, tree_groups_offset); check_error(hipPeekAtLastError()); cuda_free((float *)tree_groups_size); cuda_free((float *)tree_groups_offset); } __global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= batch*groups) return; int b = id / groups; int g = id % groups; softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset); } extern "C" void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { hipLaunchKernelGGL(( softmax_kernel), dim3(cuda_gridsize(batch*groups)), dim3(BLOCK), 0, 0, input, n, batch, batch_offset, groups, group_offset, stride, temp, output); check_error(hipPeekAtLastError()); } __global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int out_index = i; int out_w = i%(w*stride); i = i/(w*stride); int out_h = i%(h*stride); i = i/(h*stride); int out_c = i%c; i = i/c; int b = i%batch; int in_w = out_w / stride; int in_h = out_h / stride; int in_c = out_c; int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w; if(forward) out[out_index] += scale * x[in_index]; else atomicAdd(x+in_index, scale * out[out_index]); } extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t size = w*h*c*batch*stride*stride; hipLaunchKernelGGL(( upsample_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, in, w, h, c, batch, stride, forward, scale, out); check_error(hipPeekAtLastError()); }
d95fdbf4a658cae644f5b1eb78e8ca078164be26.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" #include <assert.h> extern "C" { #include "blas.h" #include "cuda.h" #include "utils.h" } __global__ void scale_bias_kernel(float *output, float *biases, int n, int size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int filter = blockIdx.y; int batch = blockIdx.z; if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter]; } void scale_bias_gpu(float *output, float *biases, int batch, int n, int size) { dim3 dimGrid((size-1)/BLOCK + 1, n, batch); dim3 dimBlock(BLOCK, 1, 1); scale_bias_kernel<<<dimGrid, dimBlock>>>(output, biases, n, size); check_error(cudaPeekAtLastError()); } __global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { __shared__ float part[BLOCK]; int i,b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for(b = 0; b < batch; ++b){ for(i = 0; i < size; i += BLOCK){ int index = p + i + size*(filter + n*b); sum += (p+i < size) ? delta[index]*x_norm[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i]; } } void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { backward_scale_kernel<<<n, BLOCK>>>(x_norm, delta, batch, n, size, scale_updates); check_error(cudaPeekAtLastError()); } __global__ void backward_variance_outputs_kernel(float *variance_updates, float *delta, int n, float *rolling_variance, float *variance) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= n) return; variance_updates[i] += delta[i] * (-0.5f) * sqrtf(rolling_variance[i]+.00001f) * powf(variance[i]+.00001f, float(-3.0f / 2.0f)); } void backward_variance_outputs_gpu(float *variance_updates, float *delta, int n, float *rolling_variance, float *variance) { backward_variance_outputs_kernel<<<cuda_gridsize(n), BLOCK>>>(variance_updates, delta, n, rolling_variance, variance); check_error(cudaPeekAtLastError()); } __global__ void backward_gamma_kernel(float *weights, float *delta, int c, int n, int size, float *rolling_variance, float *scale_updates) { int i,b; for(b = 0; b < n; ++b){ __shared__ float part[BLOCK]; int p = threadIdx.x; float sum = 0; for(i = 0; i < c*size; i += BLOCK){ int index = p + i + size*c*b; sum += (p+i < c*size) ? delta[index]*weights[index] : 0; } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) scale_updates[b] += part[i]; } } } void backward_gamma_gpu(float *weights, float *delta, int c, int n, int size, float *rolling_variance, float *scale_updates) { backward_gamma_kernel<<<n, BLOCK>>>(weights, delta, c, n, size, rolling_variance, scale_updates); check_error(cudaPeekAtLastError()); } __global__ void backward_shift_gamma_kernel(float *scale_updates, int n, float *bias_updates, float *rolling_variance, float *mean, float *variance) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= n) return; scale_updates[i] = scale_updates[i] / sqrtf(rolling_variance[i]+.00001f) - bias_updates[i] * mean[i] / sqrtf(variance[i]+.00001f); } void backward_shift_gamma_gpu(float *scale_updates, int n, float *bias_updates, float *rolling_variance, float *mean, float *variance) { backward_shift_gamma_kernel<<<cuda_gridsize(n), BLOCK>>>(scale_updates, n, bias_updates, rolling_variance, mean, variance); check_error(cudaPeekAtLastError()); } __global__ void backward_mean_kernel(float *mean_updates, float *bias_updates, int n, float *scales_gpu, float *variance) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= n) return; mean_updates[i] = bias_updates[i] * (-scales_gpu[i] / sqrtf(variance[i]+.00001f)); } void backward_mean_gpu(float *mean_updates, float *bias_updates, int n, float *scales_gpu, float *variance) { backward_mean_kernel<<<cuda_gridsize(n), BLOCK>>>(mean_updates, bias_updates, n, scales_gpu, variance); check_error(cudaPeekAtLastError()); } __global__ void backward_variance_bias_kernel(float *variance_updates, float *bias_updates, int n, float *scales_gpu, float *mean, float *variance) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= n) return; variance_updates[i] += bias_updates[i] * (0.5f * scales_gpu[i] * mean[i] * powf(variance[i]+.00001f, (float)(-3.f/2.f))); } void backward_variance_bias_gpu(float *variance_updates, float *bias_updates, int n, float *scales_gpu, float *mean, float *variance) { backward_variance_bias_kernel<<<cuda_gridsize(n), BLOCK>>>(variance_updates, bias_updates, n, scales_gpu, mean, variance); check_error(cudaPeekAtLastError()); } __global__ void sum_outputs_kernel(float *x_stat, float *mean, int batch, int filters, int spatial, float *x_stat_sum) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? x_stat[index] - mean[filter] : 0; } } __syncthreads(); if(id == 0){ x_stat_sum[filter] = 0; for(i = 0; i < threads; ++i){ x_stat_sum[filter] += local[i]; } x_stat_sum[filter] /= -(spatial * batch); } } void sum_outputs_gpu(float *x_stat, float *mean, int batch, int filters, int spatial, float *x_stat_sum) { sum_outputs_kernel<<<filters, BLOCK>>>(x_stat, mean, batch, filters, spatial, x_stat_sum); check_error(cudaPeekAtLastError()); } __global__ void backward_stat_mean_var_kernel(float *x_stat_delta, float *delta_mean, float *delta_variance, int batch, int n, int size, float *x_stat, float *mean, float *x_stat_sum) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n*size*batch) return; int i = index % size; index /= size; int j = index % n; index /= n; int k = index; x_stat_delta[(k*n+j)*size + i] = delta_mean[j] / (batch * size) + delta_variance[j] * 2 / (batch * size) * (x_stat[(k*n+j)*size + i] - mean[j]+x_stat_sum[j]); } void backward_stat_mean_var_gpu(float *x_stat_delta, float *delta_mean, float *delta_variance, int batch, int n, int size, float *x_stat, float *mean, float *x_stat_sum) { int num = n*size*batch; backward_stat_mean_var_kernel<<<cuda_gridsize(num), BLOCK>>>(x_stat_delta, delta_mean, delta_variance, batch, n, size, x_stat, mean, x_stat_sum); check_error(cudaPeekAtLastError()); } __global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n*size*batch) return; int i = index % size; index /= size; int j = index % n; index /= n; int k = index; output[(k*n+j)*size + i] += biases[j]; } void add_bias_gpu(float *output, float *biases, int batch, int n, int size) { int num = n*size*batch; add_bias_kernel<<<cuda_gridsize(num), BLOCK>>>(output, biases, batch, n, size); check_error(cudaPeekAtLastError()); } __global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n) return; int b; float sum = 0; for(b = 0; b < batch; ++b){ int i = b*n + index; sum += delta[i]; } bias_updates[index] += sum; } __global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size) { __shared__ float part[BLOCK]; int i,b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for(b = 0; b < batch; ++b){ for(i = 0; i < size; i += BLOCK){ int index = p + i + size*(filter + n*b); sum += (p+i < size) ? delta[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i]; } } void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size) { if(size == 1){ backward_bias_conn_kernel<<<cuda_gridsize(n), BLOCK>>>(bias_updates, delta, batch, n); }else{ backward_bias_kernel<<<n, BLOCK>>>(bias_updates, delta, batch, n, size); } check_error(cudaPeekAtLastError()); } __global__ void backward_quantize_kernel(float *x_updates, float *x, int n, float bound, float shift) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { if (x[i] < -bound || x[i] > bound- (float)1.0 / shift) x_updates[i] = 0; } } void backward_quantize_gpu(float *x_updates, float *x, int n, int total_bitwidth, int fraction_bitwidth) { int integer_bitwidth = total_bitwidth - fraction_bitwidth; float bound = pow(2, integer_bitwidth - 1); float shift = pow(2, fraction_bitwidth); backward_quantize_kernel<<<cuda_gridsize(n), BLOCK>>>(x_updates, x, n, bound, shift); } /* __global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int f1 = index / n; int f2 = index % n; if (f2 <= f1) return; float sum = 0; float norm1 = 0; float norm2 = 0; int b, i; for(b = 0; b < batch; ++b){ for(i = 0; i < size; ++i){ int i1 = b * size * n + f1 * size + i; int i2 = b * size * n + f2 * size + i; sum += output[i1] * output[i2]; norm1 += output[i1] * output[i1]; norm2 += output[i2] * output[i2]; } } norm1 = sqrt(norm1); norm2 = sqrt(norm2); float norm = norm1 * norm2; sum = sum / norm; for(b = 0; b < batch; ++b){ for(i = 0; i < size; ++i){ int i1 = b * size * n + f1 * size + i; int i2 = b * size * n + f2 * size + i; delta[i1] += - scale * sum * output[i2] / norm; delta[i2] += - scale * sum * output[i1] / norm; } } } void dot_error_gpu(layer l) { dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu); check_error(cudaPeekAtLastError()); } */ __global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; float mhat = m[index] / (1.f - powf(B1, t)); float vhat = v[index] / (1.f - powf(B2, t)); x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps); } extern "C" void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) { adam_kernel<<<cuda_gridsize(n), BLOCK>>>(n, x, m, v, B1, B2, rate, eps, t); check_error(cudaPeekAtLastError()); } extern "C" void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t) { scal_gpu(n, B1, m, 1); scal_gpu(n, B2, v, 1); axpy_gpu(n, -decay*batch, w, 1, d, 1); axpy_gpu(n, (1-B1), d, 1, m, 1); mul_gpu(n, d, 1, d, 1); axpy_gpu(n, (1-B2), d, 1, v, 1); adam_gpu(n, w, m, v, B1, B2, rate, eps, t); fill_gpu(n, 0, d, 1); } __global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index/spatial)%filters; x[index] = (x[index] - mean[f])/(sqrtf(variance[f] + .00001f)); } __global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index/spatial)%filters; delta[index] = delta[index] * 1.f/(sqrtf(variance[f] + .00001f)) + variance_delta[f] * 2.f * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch); } extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) { size_t N = batch*filters*spatial; normalize_delta_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta); check_error(cudaPeekAtLastError()); } __global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; variance_delta[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance_delta[i] += delta[index]*(x[index] - mean[i]); } } variance_delta[i] *= -.5f * powf(variance[i] + .00001f, (float)(-3.f/2.f)); } __global__ void accumulate_kernel(float *x, int n, int groups, float *sum) { int k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= groups) return; sum[i] = 0; for(k = 0; k < n; ++k){ sum[i] += x[k*groups + i]; } } __global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index] : 0; } } __syncthreads(); if(id == 0){ mean_delta[filter] = 0; for(i = 0; i < threads; ++i){ mean_delta[filter] += local[i]; } mean_delta[filter] *= (-1.f/sqrtf(variance[filter] + .00001f)); } } __global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0; } } __syncthreads(); if(id == 0){ variance_delta[filter] = 0; for(i = 0; i < threads; ++i){ variance_delta[filter] += local[i]; } variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (float)(-3.f/2.f)); } } __global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; mean_delta[i] = 0; for (j = 0; j < batch; ++j) { for (k = 0; k < spatial; ++k) { int index = j*filters*spatial + i*spatial + k; mean_delta[i] += delta[index]; } } mean_delta[i] *= (-1.f/sqrtf(variance[i] + .00001f)); } extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { mean_delta_kernel<<<cuda_gridsize(filters), BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta); check_error(cudaPeekAtLastError()); } extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { fast_mean_delta_kernel<<<filters, BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta); check_error(cudaPeekAtLastError()); } extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { fast_variance_delta_kernel<<<filters, BLOCK>>>(x, delta, mean, variance, batch, filters, spatial, variance_delta); check_error(cudaPeekAtLastError()); } __global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean) { float scale = 1.f/(batch * spatial); int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; mean[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; mean[i] += x[index]; } } mean[i] *= scale; } __global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) { float scale = 1.f/(batch * spatial - 1); int j,k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; variance[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance[i] += powf((x[index] - mean[i]), 2); } } variance[i] *= scale; } __global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_index = i; int in_w = i%w; i = i/w; int in_h = i%h; i = i/h; int in_c = i%c; i = i/c; int b = i%batch; int out_c = c/(stride*stride); int c2 = in_c % out_c; int offset = in_c / out_c; int w2 = in_w*stride + offset % stride; int h2 = in_h*stride + offset / stride; //printf("%d\n", offset); int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b)); // printf("%d %d %d\n", w2, h2, c2); //printf("%d %d\n", in_index, out_index); //if(out_index >= N || out_index < 0) printf("bad bad bad \n"); if(forward) out[out_index] = x[in_index]; else out[in_index] = x[out_index]; //if(forward) out[1] = x[1]; //else out[0] = x[0]; } __global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX]; } __global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA); } __global__ void add_pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] += pow(X[i*INCX], ALPHA); } __global__ void const_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX])); } __global__ void clamp_kernel(int N, float *X, int INCX, float clamp_min, float clamp_max) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = fminf(clamp_max, fmaxf(clamp_min, X[i*INCX])); } __global__ void fabsf_clamp_kernel(int N, float *X, int INCX, float clamp_min, float clamp_max) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) { if (X[i*INCX] >= 0) X[i*INCX] = fminf(clamp_max, fmaxf(clamp_min, X[i*INCX])); else X[i*INCX] = fminf(-clamp_min, fmaxf(-clamp_max, X[i*INCX])); } } __global__ void quantize_kernel(float *x, int n, float shift, float bound, int diff, float static_rate, int is_round) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { if (is_round) { x[i] = fminf(bound - (float)1.0 / shift, fmaxf(-bound, round(x[i] * shift) / shift)); } else { // make the quantization_aware_training same as post_training_quantization float quantize_x = ((int)(x[i] * static_rate) >> diff) / shift; x[i] = fminf(bound - (float)1.0 / shift, fmax(-bound, quantize_x)); } } } __global__ void shift_bias_kernel(float *x, int n, float *gamma, float * beta, float *mean, float * variance) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { x[i] = beta[i] - gamma[i] * mean[i] / sqrtf(variance[i] + .00001f); } } __global__ void supp_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) { if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0; } } __global__ void add_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] += ALPHA; } __global__ void scal_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] *= ALPHA; } __global__ void scal_add_kernel(int N, float ALPHA, float BETA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA * X[i*INCX] + BETA; } __global__ void reweight_kernel(int N, float *scale, int scale_offset, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] = scale[scale_offset] * X[i*INCX]; } __global__ void fill_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX]; } __global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] *= X[i*INCX]; } extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) { size_t N = batch*filters*spatial; normalize_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, batch, filters, spatial); check_error(cudaPeekAtLastError()); } __global__ void l2norm_kernel(int N, float *x, float *dx, int batch, int filters, int spatial) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int b = index / spatial; int i = index % spatial; int f; float sum = 0; for(f = 0; f < filters; ++f){ int index = b*filters*spatial + f*spatial + i; sum += powf(x[index], 2); } sum = sqrtf(sum); if(sum == 0) sum = 1; //printf("%f\n", sum); for(f = 0; f < filters; ++f){ int index = b*filters*spatial + f*spatial + i; x[index] /= sum; dx[index] = (1 - x[index]) / sum; } } extern "C" void l2normalize_gpu(float *x, float *dx, int batch, int filters, int spatial) { size_t N = batch*spatial; l2norm_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, dx, batch, filters, spatial); check_error(cudaPeekAtLastError()); } __global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? x[index] : 0; } } __syncthreads(); if(id == 0){ mean[filter] = 0; for(i = 0; i < threads; ++i){ mean[filter] += local[i]; } mean[filter] /= spatial * batch; } } __global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? powf((x[index] - mean[filter]), 2) : 0; } } __syncthreads(); if(id == 0){ variance[filter] = 0; for(i = 0; i < threads; ++i){ variance[filter] += local[i]; } variance[filter] /= (spatial * batch); } } extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean) { fast_mean_kernel<<<filters, BLOCK>>>(x, batch, filters, spatial, mean); check_error(cudaPeekAtLastError()); } extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { fast_variance_kernel<<<filters, BLOCK>>>(x, mean, batch, filters, spatial, variance); check_error(cudaPeekAtLastError()); } extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean) { mean_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, batch, filters, spatial, mean); check_error(cudaPeekAtLastError()); } extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { variance_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, mean, batch, filters, spatial, variance); check_error(cudaPeekAtLastError()); } extern "C" void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { axpy_gpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY); } extern "C" void pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { pow_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX, Y, INCY); check_error(cudaPeekAtLastError()); } extern "C" void add_pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { add_pow_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX, Y, INCY); check_error(cudaPeekAtLastError()); } extern "C" void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { axpy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY); check_error(cudaPeekAtLastError()); } extern "C" void copy_gpu(int N, float * X, int INCX, float * Y, int INCY) { copy_gpu_offset(N, X, 0, INCX, Y, 0, INCY); } extern "C" void mul_gpu(int N, float * X, int INCX, float * Y, int INCY) { mul_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, INCX, Y, INCY); check_error(cudaPeekAtLastError()); } extern "C" void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { copy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, OFFX, INCX, Y, OFFY, INCY); check_error(cudaPeekAtLastError()); } __global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_s = i%spatial; i = i/spatial; int in_c = i%layers; i = i/layers; int b = i; int i1 = b*layers*spatial + in_c*spatial + in_s; int i2 = b*layers*spatial + in_s*layers + in_c; if (forward) out[i2] = x[i1]; else out[i1] = x[i2]; } extern "C" void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out) { int size = spatial*batch*layers; flatten_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, spatial, layers, batch, forward, out); check_error(cudaPeekAtLastError()); } extern "C" void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int size = w*h*c*batch; reorg_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, w, h, c, batch, stride, forward, out); check_error(cudaPeekAtLastError()); } __global__ void mask_kernel(int n, float *x, float mask_num, float *mask, float val) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] = val; } extern "C" void mask_gpu(int N, float * X, float mask_num, float * mask, float val) { mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask, val); check_error(cudaPeekAtLastError()); } __global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] *= scale; } extern "C" void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale) { scale_mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask, scale); check_error(cudaPeekAtLastError()); } extern "C" void const_gpu(int N, float ALPHA, float * X, int INCX) { const_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void constrain_gpu(int N, float ALPHA, float * X, int INCX) { constrain_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void clamp_gpu(int N, float *X, int INCX, float clamp_min, float clamp_max) { clamp_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, INCX, clamp_min, clamp_max); check_error(cudaPeekAtLastError()); } extern "C" void fabsf_clamp_gpu(int N, float *X, int INCX, float clamp_min, float clamp_max) { fabsf_clamp_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, INCX, clamp_min, clamp_max); check_error(cudaPeekAtLastError()); } extern "C" void quantize_gpu(float * x, int n, int total_bitwidth, int fraction_bitwidth, int net_fraction_bitwidth, int is_round) { int integer_bitwidth = total_bitwidth - fraction_bitwidth; float bound = pow(2, integer_bitwidth - 1); float shift = pow(2, fraction_bitwidth); int diff = net_fraction_bitwidth - fraction_bitwidth; float static_rate = pow(2, net_fraction_bitwidth); quantize_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, shift, bound, diff, static_rate, is_round); check_error(cudaPeekAtLastError()); } __global__ void scale_weights_kernel(float *weights, int nweights, int n, float * gamma, float * variance) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index < nweights) { int i = index / (nweights / n); weights[index] *= gamma[i] / sqrtf(variance[i] + .00001f); } } extern "C" void scale_weights_gpu(float * weights, int nweights, int n, float * gamma, float * variance) { scale_weights_kernel<<<cuda_gridsize(nweights), BLOCK>>>(weights, nweights, n, gamma, variance); check_error(cudaPeekAtLastError()); } __global__ void scale_outputs_kernel(float *outputs, float *variance, float *rolling_variance, int n, int size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int filter = blockIdx.y; int batch = blockIdx.z; if(offset < size) outputs[(batch*n+filter)*size + offset] *= (sqrtf(rolling_variance[filter]+.00001f) / sqrtf(variance[filter]+.00001f)); } extern "C" void scale_outputs_gpu(float * outputs, float * variance, float * rolling_variance, int batch, int n, int size) { dim3 dimGrid((size-1)/BLOCK + 1, n, batch); dim3 dimBlock(BLOCK, 1, 1); scale_outputs_kernel<<<dimGrid, dimBlock>>>(outputs, variance, rolling_variance, n, size); check_error(cudaPeekAtLastError()); } extern "C" void shift_bias_gpu(float * new_bias, int n, float * scales, float * bias, float * mean, float * variance) { shift_bias_kernel<<<cuda_gridsize(n), BLOCK>>>(new_bias, n, scales, bias, mean, variance); check_error(cudaPeekAtLastError()); } extern "C" void add_gpu(int N, float ALPHA, float * X, int INCX) { add_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void scal_gpu(int N, float ALPHA, float * X, int INCX) { scal_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void scal_add_gpu(int N, float ALPHA, float BETA, float * X, int INCX) { scal_add_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, BETA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void reweight_gpu(int N, float * scale, int scale_offset, float * X, int INCX, float * Y, int INCY) { reweight_kernel<<<cuda_gridsize(N), BLOCK>>>(N, scale, scale_offset, X, INCX, Y, INCY); check_error(cudaPeekAtLastError()); } extern "C" void supp_gpu(int N, float ALPHA, float * X, int INCX) { supp_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void fill_gpu(int N, float ALPHA, float * X, int INCX) { fill_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } __global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= size) return; int i = id % minw; id /= minw; int j = id % minh; id /= minh; int k = id % minc; id /= minc; int b = id % batch; int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] = s1*out[out_index] + s2*add[add_index]; //out[out_index] += add[add_index]; } extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) { int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; //printf("shortcut_layer: %d %d %d %d %d %d: %d %d %d\n", w1, h1, c1, w2, h2, c2, minw, minh, minc); int stride = w1/w2; int sample = w2/w1; assert(stride == h1/h2); assert(sample == h2/h1); if(stride < 1) stride = 1; if(sample < 1) sample = 1; int size = batch * minw * minh * minc; shortcut_kernel<<<cuda_gridsize(size), BLOCK>>>(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, s1, s2, out); check_error(cudaPeekAtLastError()); } __global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; float abs_val = fabsf(diff); if(abs_val < 1) { error[i] = diff * diff; delta[i] = diff; } else { error[i] = 2*abs_val - 1; delta[i] = (diff > 0) ? 1 : -1; } } } extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error) { smooth_l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void add_l1_delta_kernel(int n, float scale, float *weight, float *delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ delta[i] += scale * ((weight[i] > 0) ? 1 : -1); } } extern "C" void add_l1_delta_gpu(int n, float scale, float *weight, float *delta) { add_l1_delta_kernel<<<cuda_gridsize(n), BLOCK>>>(n, scale, weight, delta); check_error(cudaPeekAtLastError()); } __global__ void add_consistent_l1_delta_kernel(int n, float scale, float *weight, float *sqrt_sum, float *delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ delta[i] += scale * (weight[i] / sqrt_sum[i]); } } extern "C" void add_consistent_l1_delta_gpu(int n, float scale, float *weight, float *sqrt_sum, float *delta) { add_consistent_l1_delta_kernel<<<cuda_gridsize(n), BLOCK>>>(n, scale, weight, sqrt_sum, delta); check_error(cudaPeekAtLastError()); } __global__ void softmax_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float t = truth[i]; float p = pred[i]; error[i] = (t) ? -log(p) : 0; delta[i] = t-p; } } extern "C" void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error) { softmax_x_ent_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void logistic_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float t = truth[i]; float p = pred[i]; error[i] = -t*log(p+.0000001) - (1-t)*log(1-p+.0000001); delta[i] = t-p; } } extern "C" void logistic_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error) { logistic_x_ent_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = diff * diff; //I know this is technically wrong, deal with it. delta[i] = diff; } } extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error) { l2_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void l1_margin_kernel(int n, float *spred, float *ppred, float margin, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { //float diff = fabsf(spred[i] - ppred[i]); float diff = margin - fabsf(spred[i] - ppred[i]); //printf("info: %f %f %f %f\n", margin, fabsf(spred[i] - ppred[i]), spred[i], ppred[i]); if (diff <= 0) { error[i] = 0; delta[i] = 0; } else { error[i] = diff; delta[i] = (spred[i] - ppred[i] > 0) ? 1 : -1; } } } // max(0, -|ls - lp| + margin) extern "C" void l1_margin_gpu(int n, float *spred, float *ppred, float margin, float *delta, float *error) { l1_margin_kernel<<<cuda_gridsize(n), BLOCK>>>(n, spred, ppred, margin, delta, error); check_error(cudaPeekAtLastError()); } __global__ void l1_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = abs(diff); delta[i] = (diff > 0) ? 1 : -1; } } extern "C" void l1_gpu(int n, float *pred, float *truth, float *delta, float *error) { l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void wgan_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ error[i] = truth[i] ? -pred[i] : pred[i]; delta[i] = (truth[i] > 0) ? 1 : -1; } } extern "C" void wgan_gpu(int n, float *pred, float *truth, float *delta, float *error) { wgan_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0); } } __global__ void deinter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < (NX+NY)*B){ int b = i / (NX+NY); int j = i % (NX+NY); if (j < NX){ if(X) X[b*NX + j] += OUT[i]; } else { if(Y) Y[b*NY + j - NX] += OUT[i]; } } } extern "C" void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { deinter_kernel<<<cuda_gridsize((NX+NY)*B), BLOCK>>>(NX, X, NY, Y, B, OUT); check_error(cudaPeekAtLastError()); } __global__ void inter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < (NX+NY)*B){ int b = i / (NX+NY); int j = i % (NX+NY); if (j < NX){ OUT[i] = X[b*NX + j]; } else { OUT[i] = Y[b*NY + j - NX]; } } } extern "C" void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { inter_kernel<<<cuda_gridsize((NX+NY)*B), BLOCK>>>(NX, X, NY, Y, B, OUT); check_error(cudaPeekAtLastError()); } extern "C" void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c) { weighted_sum_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, c); check_error(cudaPeekAtLastError()); } __global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ if(da) da[i] += dc[i] * s[i]; if(db) db[i] += dc[i] * (1-s[i]); ds[i] += dc[i] * (a[i] - b[i]); } } extern "C" void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc) { weighted_delta_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, da, db, ds, dc); check_error(cudaPeekAtLastError()); } __global__ void mult_add_into_kernel(int n, float *a, float *b, float *c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ c[i] += a[i]*b[i]; } } extern "C" void mult_add_into_gpu(int num, float *a, float *b, float *c) { mult_add_into_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, c); check_error(cudaPeekAtLastError()); } __device__ void softmax_device(float *input, int n, float temp, int stride, float *output) { int i; float sum = 0; float largest = -INFINITY; for(i = 0; i < n; ++i){ int val = input[i*stride]; largest = (val>largest) ? val : largest; } for(i = 0; i < n; ++i){ float e = expf(input[i*stride]/temp - largest/temp); sum += e; output[i*stride] = e; } for(i = 0; i < n; ++i){ output[i*stride] /= sum; } } __global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= spatial*batch*groups) return; int s = id % spatial; id = id / spatial; int g = id % groups; int b = id / groups; int goff = group_offset[g]*spatial; int boff = b*stride; softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s); } extern "C" void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier) { int *tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups); int *tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups); /* static int *tree_groups_size = 0; static int *tree_groups_offset = 0; if(!tree_groups_size){ tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups); tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups); } */ int num = spatial*batch*hier.groups; softmax_tree_kernel<<<cuda_gridsize(num), BLOCK>>>(input, spatial, batch, stride, temp, output, hier.groups, tree_groups_size, tree_groups_offset); check_error(cudaPeekAtLastError()); cuda_free((float *)tree_groups_size); cuda_free((float *)tree_groups_offset); } __global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= batch*groups) return; int b = id / groups; int g = id % groups; softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset); } extern "C" void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { softmax_kernel<<<cuda_gridsize(batch*groups), BLOCK>>>(input, n, batch, batch_offset, groups, group_offset, stride, temp, output); check_error(cudaPeekAtLastError()); } __global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int out_index = i; int out_w = i%(w*stride); i = i/(w*stride); int out_h = i%(h*stride); i = i/(h*stride); int out_c = i%c; i = i/c; int b = i%batch; int in_w = out_w / stride; int in_h = out_h / stride; int in_c = out_c; int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w; if(forward) out[out_index] += scale * x[in_index]; else atomicAdd(x+in_index, scale * out[out_index]); } extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t size = w*h*c*batch*stride*stride; upsample_kernel<<<cuda_gridsize(size), BLOCK>>>(size, in, w, h, c, batch, stride, forward, scale, out); check_error(cudaPeekAtLastError()); }
ea7c1e1f60c8c2cc4ea26a19e37701d62a123c2e.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> namespace { template <typename scalar_t> __global__ void shift_cuda_forward_kernel( const scalar_t* __restrict__ input, const int32_t* __restrict__ shift, scalar_t* __restrict__ output, const int32_t B, const int32_t C, const int32_t H, const int32_t W) { const int32_t idx = blockIdx.x * blockDim.x + threadIdx.x; const int32_t size = B*C*H*W; const int32_t CHW = C*H*W; const int32_t HW = H*W; const int32_t b = idx / CHW; const int32_t c = (idx - b*CHW) / HW; const int32_t h = (idx - b*CHW - c*HW) / W; const int32_t w = idx - b*CHW - c*HW - h*W; const int32_t target_w = w + shift[2*c]; const int32_t target_h = h + shift[2*c + 1]; const int32_t target_idx = b*CHW + c*HW + target_h*W + target_w; if (idx < size && target_w >= 0 && target_w < W && target_h >= 0 && target_h < H) { output[target_idx] = input[idx]; } } template <typename scalar_t> __global__ void shift_cuda_backward_kernel( const scalar_t* __restrict__ grad_input, scalar_t* __restrict__ grad_output, const int32_t* __restrict__ shift, const int32_t B, const int32_t C, const int32_t W, const int32_t H) { const int32_t idx = blockIdx.x * blockDim.x + threadIdx.x; const int32_t size = B*C*W*H; const int32_t CWH = C*W*H; const int32_t WH = W*H; const int32_t b = idx / CWH; const int32_t c = (idx - b*CWH) / WH; const int32_t w = (idx - b*CWH - c*WH) / W; const int32_t h = idx - b*CWH - c*WH - w*H; const int32_t target_w = w - shift[2*c]; const int32_t target_h = h - shift[2*c + 1]; const int32_t target_idx = b*CWH + c*WH + target_w*W + target_h; if (idx < size && target_w >= 0 && target_w < W && target_h >= 0 && target_h < H) { grad_output[target_idx] = grad_input[idx]; } } } // namespace at::Tensor shift_cuda_forward( const at::Tensor input, const at::Tensor shift) { const auto B = input.size(0); const auto C = input.size(1); const auto H = input.size(2); const auto W = input.size(3); const auto size = B*C*W*H; const int threads = 1024; const int blocks = (size + threads - 1) / threads; auto output = at::zeros_like(input); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "shift_forward_cuda", ([&] { hipLaunchKernelGGL(( shift_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, input.data<scalar_t>(), shift.data<int32_t>(), output.data<scalar_t>(), B, C, H, W); })); return output; } at::Tensor shift_cuda_backward( const at::Tensor grad_input, const at::Tensor shift) { const auto B = grad_input.size(0); const auto C = grad_input.size(1); const auto H = grad_input.size(2); const auto W = grad_input.size(3); const auto size = B*C*W*H; const int threads = 1024; const int blocks = (size + threads - 1) / threads; auto grad_output = at::zeros_like(grad_input); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_input.type(), "shift_backward_cuda", ([&] { hipLaunchKernelGGL(( shift_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, grad_input.data<scalar_t>(), grad_output.data<scalar_t>(), shift.data<int32_t>(), B, C, H, W); })); return grad_output; }
ea7c1e1f60c8c2cc4ea26a19e37701d62a123c2e.cu
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> namespace { template <typename scalar_t> __global__ void shift_cuda_forward_kernel( const scalar_t* __restrict__ input, const int32_t* __restrict__ shift, scalar_t* __restrict__ output, const int32_t B, const int32_t C, const int32_t H, const int32_t W) { const int32_t idx = blockIdx.x * blockDim.x + threadIdx.x; const int32_t size = B*C*H*W; const int32_t CHW = C*H*W; const int32_t HW = H*W; const int32_t b = idx / CHW; const int32_t c = (idx - b*CHW) / HW; const int32_t h = (idx - b*CHW - c*HW) / W; const int32_t w = idx - b*CHW - c*HW - h*W; const int32_t target_w = w + shift[2*c]; const int32_t target_h = h + shift[2*c + 1]; const int32_t target_idx = b*CHW + c*HW + target_h*W + target_w; if (idx < size && target_w >= 0 && target_w < W && target_h >= 0 && target_h < H) { output[target_idx] = input[idx]; } } template <typename scalar_t> __global__ void shift_cuda_backward_kernel( const scalar_t* __restrict__ grad_input, scalar_t* __restrict__ grad_output, const int32_t* __restrict__ shift, const int32_t B, const int32_t C, const int32_t W, const int32_t H) { const int32_t idx = blockIdx.x * blockDim.x + threadIdx.x; const int32_t size = B*C*W*H; const int32_t CWH = C*W*H; const int32_t WH = W*H; const int32_t b = idx / CWH; const int32_t c = (idx - b*CWH) / WH; const int32_t w = (idx - b*CWH - c*WH) / W; const int32_t h = idx - b*CWH - c*WH - w*H; const int32_t target_w = w - shift[2*c]; const int32_t target_h = h - shift[2*c + 1]; const int32_t target_idx = b*CWH + c*WH + target_w*W + target_h; if (idx < size && target_w >= 0 && target_w < W && target_h >= 0 && target_h < H) { grad_output[target_idx] = grad_input[idx]; } } } // namespace at::Tensor shift_cuda_forward( const at::Tensor input, const at::Tensor shift) { const auto B = input.size(0); const auto C = input.size(1); const auto H = input.size(2); const auto W = input.size(3); const auto size = B*C*W*H; const int threads = 1024; const int blocks = (size + threads - 1) / threads; auto output = at::zeros_like(input); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "shift_forward_cuda", ([&] { shift_cuda_forward_kernel<scalar_t><<<blocks, threads>>>( input.data<scalar_t>(), shift.data<int32_t>(), output.data<scalar_t>(), B, C, H, W); })); return output; } at::Tensor shift_cuda_backward( const at::Tensor grad_input, const at::Tensor shift) { const auto B = grad_input.size(0); const auto C = grad_input.size(1); const auto H = grad_input.size(2); const auto W = grad_input.size(3); const auto size = B*C*W*H; const int threads = 1024; const int blocks = (size + threads - 1) / threads; auto grad_output = at::zeros_like(grad_input); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_input.type(), "shift_backward_cuda", ([&] { shift_cuda_backward_kernel<scalar_t><<<blocks, threads>>>( grad_input.data<scalar_t>(), grad_output.data<scalar_t>(), shift.data<int32_t>(), B, C, H, W); })); return grad_output; }
8a5ce6c1eb1219c1dbda49cc060dde6f8269b765.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/atomic/common.cuh" #include "sg.h" namespace onnxruntime { namespace cuda { template <typename T> __global__ void _SGDOptimizer( const T* eta, const T* weights, const T* gradients, T* weights_out, T* gradients_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); const T delta = -(*eta) * gradients[id]; if (gradients_out) { gradients_out[id] = delta; } if (weights_out) { weights_out[id] = weights[id] + delta; } } template <typename T> void SGDOptimizerImpl( hipStream_t stream, const T* eta, const T* weights, const T* gradients, T* weights_out, T* gradients_out, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); hipLaunchKernelGGL(( _SGDOptimizer<T>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, eta, weights, gradients, weights_out, gradients_out, N); } #define SPECIALIZED_IMPL__SGDOptimizerImpl(T) \ template void SGDOptimizerImpl( \ hipStream_t stream, \ const T* eta, \ const T* weights, \ const T* gradients, \ T* weights_out, \ T* gradients_out, \ size_t count); SPECIALIZED_IMPL__SGDOptimizerImpl(float) } // namespace cuda } // namespace onnxruntime
8a5ce6c1eb1219c1dbda49cc060dde6f8269b765.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/atomic/common.cuh" #include "sg.h" namespace onnxruntime { namespace cuda { template <typename T> __global__ void _SGDOptimizer( const T* eta, const T* weights, const T* gradients, T* weights_out, T* gradients_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); const T delta = -(*eta) * gradients[id]; if (gradients_out) { gradients_out[id] = delta; } if (weights_out) { weights_out[id] = weights[id] + delta; } } template <typename T> void SGDOptimizerImpl( cudaStream_t stream, const T* eta, const T* weights, const T* gradients, T* weights_out, T* gradients_out, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); _SGDOptimizer<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( eta, weights, gradients, weights_out, gradients_out, N); } #define SPECIALIZED_IMPL__SGDOptimizerImpl(T) \ template void SGDOptimizerImpl( \ cudaStream_t stream, \ const T* eta, \ const T* weights, \ const T* gradients, \ T* weights_out, \ T* gradients_out, \ size_t count); SPECIALIZED_IMPL__SGDOptimizerImpl(float) } // namespace cuda } // namespace onnxruntime
nms_kernel.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/nms_kernel.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/math_function.h" static const int64_t threadsPerBlock = sizeof(int64_t) * 8; namespace phi { template <typename T> static __global__ void NMS(const T* boxes_data, float threshold, int64_t num_boxes, uint64_t* masks) { auto raw_start = blockIdx.y; auto col_start = blockIdx.x; if (raw_start > col_start) return; const int raw_last_storage = min(num_boxes - raw_start * threadsPerBlock, threadsPerBlock); const int col_last_storage = min(num_boxes - col_start * threadsPerBlock, threadsPerBlock); if (threadIdx.x < raw_last_storage) { uint64_t mask = 0; auto current_box_idx = raw_start * threadsPerBlock + threadIdx.x; const T* current_box = boxes_data + current_box_idx * 4; for (int i = 0; i < col_last_storage; ++i) { const T* target_box = boxes_data + (col_start * threadsPerBlock + i) * 4; if (CalculateIoU<T>(current_box, target_box, threshold)) { mask |= 1ULL << i; } } const int blocks_per_line = CeilDivide(num_boxes, threadsPerBlock); masks[current_box_idx * blocks_per_line + col_start] = mask; } } template <typename T, typename Context> void NMSKernel(const Context& dev_ctx, const DenseTensor& boxes, float threshold, DenseTensor* output) { const int64_t num_boxes = boxes.dims()[0]; const auto blocks_per_line = CeilDivide(num_boxes, threadsPerBlock); dim3 block(threadsPerBlock); dim3 grid(blocks_per_line, blocks_per_line); auto mask_data = paddle::memory::Alloc( dev_ctx.GetPlace(), num_boxes * blocks_per_line * sizeof(uint64_t), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); uint64_t* mask_dev = reinterpret_cast<uint64_t*>(mask_data->ptr()); hipLaunchKernelGGL(( NMS<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(), boxes.data<T>(), threshold, num_boxes, mask_dev); std::vector<uint64_t> mask_host(num_boxes * blocks_per_line); paddle::memory::Copy(phi::CPUPlace(), mask_host.data(), dev_ctx.GetPlace(), mask_dev, num_boxes * blocks_per_line * sizeof(uint64_t), dev_ctx.stream()); std::vector<int64_t> remv(blocks_per_line); std::vector<int64_t> keep_boxes_idxs(num_boxes); int64_t* output_host = keep_boxes_idxs.data(); int64_t last_box_num = 0; for (int64_t i = 0; i < num_boxes; ++i) { auto remv_element_id = i / threadsPerBlock; auto remv_bit_id = i % threadsPerBlock; if (!(remv[remv_element_id] & 1ULL << remv_bit_id)) { output_host[last_box_num++] = i; uint64_t* current_mask = mask_host.data() + i * blocks_per_line; for (auto j = remv_element_id; j < blocks_per_line; ++j) { remv[j] |= current_mask[j]; } } } output->Resize(phi::make_ddim({last_box_num})); auto* output_data = dev_ctx.template Alloc<int64_t>(output); paddle::memory::Copy(dev_ctx.GetPlace(), output_data, phi::CPUPlace(), output_host, sizeof(int64_t) * last_box_num, dev_ctx.stream()); } } // namespace phi PD_REGISTER_KERNEL(nms, GPU, ALL_LAYOUT, phi::NMSKernel, float, double) {}
nms_kernel.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/nms_kernel.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/math_function.h" static const int64_t threadsPerBlock = sizeof(int64_t) * 8; namespace phi { template <typename T> static __global__ void NMS(const T* boxes_data, float threshold, int64_t num_boxes, uint64_t* masks) { auto raw_start = blockIdx.y; auto col_start = blockIdx.x; if (raw_start > col_start) return; const int raw_last_storage = min(num_boxes - raw_start * threadsPerBlock, threadsPerBlock); const int col_last_storage = min(num_boxes - col_start * threadsPerBlock, threadsPerBlock); if (threadIdx.x < raw_last_storage) { uint64_t mask = 0; auto current_box_idx = raw_start * threadsPerBlock + threadIdx.x; const T* current_box = boxes_data + current_box_idx * 4; for (int i = 0; i < col_last_storage; ++i) { const T* target_box = boxes_data + (col_start * threadsPerBlock + i) * 4; if (CalculateIoU<T>(current_box, target_box, threshold)) { mask |= 1ULL << i; } } const int blocks_per_line = CeilDivide(num_boxes, threadsPerBlock); masks[current_box_idx * blocks_per_line + col_start] = mask; } } template <typename T, typename Context> void NMSKernel(const Context& dev_ctx, const DenseTensor& boxes, float threshold, DenseTensor* output) { const int64_t num_boxes = boxes.dims()[0]; const auto blocks_per_line = CeilDivide(num_boxes, threadsPerBlock); dim3 block(threadsPerBlock); dim3 grid(blocks_per_line, blocks_per_line); auto mask_data = paddle::memory::Alloc( dev_ctx.GetPlace(), num_boxes * blocks_per_line * sizeof(uint64_t), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); uint64_t* mask_dev = reinterpret_cast<uint64_t*>(mask_data->ptr()); NMS<T><<<grid, block, 0, dev_ctx.stream()>>>( boxes.data<T>(), threshold, num_boxes, mask_dev); std::vector<uint64_t> mask_host(num_boxes * blocks_per_line); paddle::memory::Copy(phi::CPUPlace(), mask_host.data(), dev_ctx.GetPlace(), mask_dev, num_boxes * blocks_per_line * sizeof(uint64_t), dev_ctx.stream()); std::vector<int64_t> remv(blocks_per_line); std::vector<int64_t> keep_boxes_idxs(num_boxes); int64_t* output_host = keep_boxes_idxs.data(); int64_t last_box_num = 0; for (int64_t i = 0; i < num_boxes; ++i) { auto remv_element_id = i / threadsPerBlock; auto remv_bit_id = i % threadsPerBlock; if (!(remv[remv_element_id] & 1ULL << remv_bit_id)) { output_host[last_box_num++] = i; uint64_t* current_mask = mask_host.data() + i * blocks_per_line; for (auto j = remv_element_id; j < blocks_per_line; ++j) { remv[j] |= current_mask[j]; } } } output->Resize(phi::make_ddim({last_box_num})); auto* output_data = dev_ctx.template Alloc<int64_t>(output); paddle::memory::Copy(dev_ctx.GetPlace(), output_data, phi::CPUPlace(), output_host, sizeof(int64_t) * last_box_num, dev_ctx.stream()); } } // namespace phi PD_REGISTER_KERNEL(nms, GPU, ALL_LAYOUT, phi::NMSKernel, float, double) {}
459ca2725a97fcda4a1bf0d4f6d6bf4905990299.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** \file \authors \date 15.07.2015 \brief CAIPBlur.cu . (box blur). */ #include "..\include\CAIPKernels.h" #include "..\include\CAIPUtils.h" __global__ void BoxBlur(TColor* inputImage, TColor* outputImage, int imageWidth, int imageHeight) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; unsigned int index = x + y * imageWidth; unsigned short averageOfNeigboursValues = 0; __shared__ int dx[9]; __shared__ int dy[9]; dx[0] = 0; dx[1] = 1; dx[2] = -1; dx[3] = 0; dx[4] = 0; dx[5] = 1; dx[6] = -1; dx[7] = 1; dx[8] = -1; dy[0] = 0; dy[1] = 0; dy[2] = 0; dy[3] = 1; dy[4] = -1; dy[5] = 1; dy[6] = 1; dy[7] = -1; dy[8] = -1; __syncthreads(); for (int i = 0; i < 9; i++) averageOfNeigboursValues += GetColor(inputImage, x + dx[i], y + dy[i], imageWidth, imageHeight).mR; averageOfNeigboursValues /= 9; memset(&outputImage[index], averageOfNeigboursValues, sizeof(TColor)); }
459ca2725a97fcda4a1bf0d4f6d6bf4905990299.cu
/** \file \authors Касимов Ильдар \date 15.07.2015 \brief Файл ядра CAIPBlur.cu содержит определения ядер для выполнения операции размытия изображения. Файл содердит определение ядра для равномерного размытия (box blur). */ #include "..\include\CAIPKernels.h" #include "..\include\CAIPUtils.h" __global__ void BoxBlur(TColor* inputImage, TColor* outputImage, int imageWidth, int imageHeight) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; unsigned int index = x + y * imageWidth; unsigned short averageOfNeigboursValues = 0; __shared__ int dx[9]; __shared__ int dy[9]; dx[0] = 0; dx[1] = 1; dx[2] = -1; dx[3] = 0; dx[4] = 0; dx[5] = 1; dx[6] = -1; dx[7] = 1; dx[8] = -1; dy[0] = 0; dy[1] = 0; dy[2] = 0; dy[3] = 1; dy[4] = -1; dy[5] = 1; dy[6] = 1; dy[7] = -1; dy[8] = -1; __syncthreads(); for (int i = 0; i < 9; i++) averageOfNeigboursValues += GetColor(inputImage, x + dx[i], y + dy[i], imageWidth, imageHeight).mR; averageOfNeigboursValues /= 9; memset(&outputImage[index], averageOfNeigboursValues, sizeof(TColor)); }
dd65aab65362b54653e1c9a2dd74011ce221c190.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <fstream> #include <hip/hip_runtime.h> #include <cmath> #include <string> #include <cstdio> #include <iomanip> #include "dcdread.h" #include<assert.h> #include <nvtx3/roctracer/roctx.h> using namespace std; //additional error handling code static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) //declaration of GPU function __global__ void pair_gpu(const double* d_x, const double* d_y, const double* d_z, unsigned long long int *d_g2, int numatm, int nconf, const double xbox, const double ybox, const double zbox, int d_bin, unsigned long long int bl); int main(int argc , char* argv[] ) { double xbox,ybox,zbox; double* d_x,*d_y,*d_z; unsigned long long int *d_g2; int nbin; int nthreads,device; int numatm,nconf,inconf; unsigned long long int near2; string file; /////////////////////////////////////////////////////////////// inconf = 10; nbin=2000; file = "../input/alk.traj.dcd"; device = 0; nthreads = 128; HANDLE_ERROR (hipSetDevice(device));//pick the device to use /////////////////////////////////////// std::ifstream infile; infile.open(file.c_str()); if(!infile){ cout<<"file "<<file.c_str()<<" not found\n"; return 1; } assert(infile); ofstream pairfile,stwo; pairfile.open("RDF.dat"); stwo.open("Pair_entropy.dat"); ///////////////////////////////////////////////////////// dcdreadhead(&numatm,&nconf,infile); cout<<"Dcd file has "<< numatm << " atoms and " << nconf << " frames"<<endl; if (inconf>nconf) cout << "nconf is reset to "<< nconf <<endl; else {nconf=inconf;} cout<<"Calculating RDF for " << nconf << " frames"<<endl; //////////////////////////////////////////////////////// unsigned long long int sizef= nconf*numatm*sizeof(double); unsigned long long int sizebin= nbin*sizeof(unsigned long long int); // Allocate Unified Memory -- accessible from CPU or GPU hipMallocManaged(&d_x, sizef); hipMallocManaged(&d_y, sizef); hipMallocManaged(&d_z, sizef); hipMallocManaged(&d_g2, sizebin); HANDLE_ERROR (hipPeekAtLastError()); memset(d_g2,0,sizebin); /////////reading cordinates////////////////////////////////////////////// roctxRangePush("Read_File"); double ax[numatm],ay[numatm],az[numatm]; for (int i=0;i<nconf;i++) { dcdreadframe(ax,ay,az,infile,numatm,xbox,ybox,zbox); for (int j=0;j<numatm;j++){ d_x[i*numatm+j]=ax[j]; d_y[i*numatm+j]=ay[j]; d_z[i*numatm+j]=az[j]; } } roctxRangePop(); //pop for Reading file roctxRangePush("Pair_Calculation"); cout<<"Reading of input file and transfer to gpu is completed"<<endl; ////////////////////////////////////////////////////////////////////////// near2=nthreads*(int(0.5*numatm*(numatm-1)/nthreads)+1); unsigned long long int nblock = (near2/nthreads); cout<<"Initial blocks are "<<nblock<<" "<<", now changing to "; int maxblock=65535; int bl; int blockloop= int(nblock/maxblock); if (blockloop != 0) { nblock=maxblock; } cout<<nblock<<" and will run over "<<(blockloop+1)<<" blockloops"<<endl; for (bl=0;bl<(blockloop+1);bl++) { //cout <<bl<<endl; hipLaunchKernelGGL(( pair_gpu), dim3(nblock),dim3(nthreads) , 0, 0, d_x, d_y, d_z, d_g2, numatm, nconf, xbox, ybox, zbox, nbin, bl); HANDLE_ERROR (hipPeekAtLastError()); HANDLE_ERROR(hipDeviceSynchronize()); } roctxRangePop(); //Pop for Pair Calculation double pi=acos(-1.0l); double rho=(numatm)/(xbox*ybox*zbox); double norm=(4.0l*pi*rho)/3.0l; double rl,ru,nideal; double g2[nbin]; double r,gr,lngr,lngrbond,s2=0.0l,s2bond=0.0l; double box=min(xbox,ybox); box=min(box,zbox); double del=box/(2.0l*nbin); roctxRangePush("Entropy_Calculation"); for (int i=0;i<nbin;i++) { // cout<<i+1<<" "<<d_g2[i]<<endl; rl=(i)*del; ru=rl+del; nideal=norm*(ru*ru*ru-rl*rl*rl); g2[i]=(double)d_g2[i]/((double)nconf*(double)numatm*nideal); r=(i)*del; pairfile<<(i+0.5l)*del<<" "<<g2[i]<<endl; if (r<2.0l) { gr=0.0l; } else { gr=g2[i]; } if (gr<1e-5) { lngr=0.0l; } else { lngr=log(gr); } if (g2[i]<1e-6) { lngrbond=0.0l; } else { lngrbond=log(g2[i]); } s2=s2-2.0l*pi*rho*((gr*lngr)-gr+1.0l)*del*r*r; s2bond=s2bond-2.0l*pi*rho*((g2[i]*lngrbond)-g2[i]+1.0l)*del*r*r; } roctxRangePop(); //Pop for Entropy Calculation stwo<<"s2 value is "<<s2<<endl; stwo<<"s2bond value is "<<s2bond<<endl; cout<<"#Freeing memory"<<endl; // Free memory HANDLE_ERROR(hipFree(d_x)); HANDLE_ERROR(hipFree(d_y)); HANDLE_ERROR(hipFree(d_z)); HANDLE_ERROR(hipFree(d_g2)); cout<<"#Number of atoms processed: "<<numatm<<endl<<endl; cout<<"#Number of confs processed: "<<nconf<<endl<<endl; cout<<"#number of threads used: "<<nthreads<<endl<<endl; return 0; } __global__ void pair_gpu( const double* d_x, const double* d_y, const double* d_z, unsigned long long int *d_g2, int numatm, int nconf, const double xbox,const double ybox,const double zbox,int d_bin, unsigned long long int bl) { double r,cut,dx,dy,dz; int ig2,id1,id2; double box; box=min(xbox,ybox); box=min(box,zbox); double del=box/(2.0*d_bin); cut=box*0.5; int thisi; double n; int i = blockIdx.x * blockDim.x + threadIdx.x; int maxi = min(int(0.5*numatm*(numatm-1)-(bl*65535*128)),(65535*128)); if ( i < maxi ) { thisi=bl*65535*128+i; n=(0.5)*(1+ ((double) sqrt (1.0+4.0*2.0*thisi))); id1=int(n); id2=thisi-(0.5*id1*(id1-1)); for (int frame=0;frame<nconf;frame++){ dx=d_x[frame*numatm+id1]-d_x[frame*numatm+id2]; dy=d_y[frame*numatm+id1]-d_y[frame*numatm+id2]; dz=d_z[frame*numatm+id1]-d_z[frame*numatm+id2]; dx=dx-xbox*(round(dx/xbox)); dy=dy-ybox*(round(dy/ybox)); dz=dz-zbox*(round(dz/zbox)); r=sqrtf(dx*dx+dy*dy+dz*dz); if (r<cut) { ig2=(int)(r/del); atomicAdd(&d_g2[ig2],2) ; } } } }
dd65aab65362b54653e1c9a2dd74011ce221c190.cu
#include <stdio.h> #include <iostream> #include <fstream> #include <cuda_runtime.h> #include <cmath> #include <string> #include <cstdio> #include <iomanip> #include "dcdread.h" #include<assert.h> #include <nvtx3/nvToolsExt.h> using namespace std; //additional error handling code static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) //declaration of GPU function __global__ void pair_gpu(const double* d_x, const double* d_y, const double* d_z, unsigned long long int *d_g2, int numatm, int nconf, const double xbox, const double ybox, const double zbox, int d_bin, unsigned long long int bl); int main(int argc , char* argv[] ) { double xbox,ybox,zbox; double* d_x,*d_y,*d_z; unsigned long long int *d_g2; int nbin; int nthreads,device; int numatm,nconf,inconf; unsigned long long int near2; string file; /////////////////////////////////////////////////////////////// inconf = 10; nbin=2000; file = "../input/alk.traj.dcd"; device = 0; nthreads = 128; HANDLE_ERROR (cudaSetDevice(device));//pick the device to use /////////////////////////////////////// std::ifstream infile; infile.open(file.c_str()); if(!infile){ cout<<"file "<<file.c_str()<<" not found\n"; return 1; } assert(infile); ofstream pairfile,stwo; pairfile.open("RDF.dat"); stwo.open("Pair_entropy.dat"); ///////////////////////////////////////////////////////// dcdreadhead(&numatm,&nconf,infile); cout<<"Dcd file has "<< numatm << " atoms and " << nconf << " frames"<<endl; if (inconf>nconf) cout << "nconf is reset to "<< nconf <<endl; else {nconf=inconf;} cout<<"Calculating RDF for " << nconf << " frames"<<endl; //////////////////////////////////////////////////////// unsigned long long int sizef= nconf*numatm*sizeof(double); unsigned long long int sizebin= nbin*sizeof(unsigned long long int); // Allocate Unified Memory -- accessible from CPU or GPU cudaMallocManaged(&d_x, sizef); cudaMallocManaged(&d_y, sizef); cudaMallocManaged(&d_z, sizef); cudaMallocManaged(&d_g2, sizebin); HANDLE_ERROR (cudaPeekAtLastError()); memset(d_g2,0,sizebin); /////////reading cordinates////////////////////////////////////////////// nvtxRangePush("Read_File"); double ax[numatm],ay[numatm],az[numatm]; for (int i=0;i<nconf;i++) { dcdreadframe(ax,ay,az,infile,numatm,xbox,ybox,zbox); for (int j=0;j<numatm;j++){ d_x[i*numatm+j]=ax[j]; d_y[i*numatm+j]=ay[j]; d_z[i*numatm+j]=az[j]; } } nvtxRangePop(); //pop for Reading file nvtxRangePush("Pair_Calculation"); cout<<"Reading of input file and transfer to gpu is completed"<<endl; ////////////////////////////////////////////////////////////////////////// near2=nthreads*(int(0.5*numatm*(numatm-1)/nthreads)+1); unsigned long long int nblock = (near2/nthreads); cout<<"Initial blocks are "<<nblock<<" "<<", now changing to "; int maxblock=65535; int bl; int blockloop= int(nblock/maxblock); if (blockloop != 0) { nblock=maxblock; } cout<<nblock<<" and will run over "<<(blockloop+1)<<" blockloops"<<endl; for (bl=0;bl<(blockloop+1);bl++) { //cout <<bl<<endl; pair_gpu<<< nblock,nthreads >>> (d_x, d_y, d_z, d_g2, numatm, nconf, xbox, ybox, zbox, nbin, bl); HANDLE_ERROR (cudaPeekAtLastError()); HANDLE_ERROR(cudaDeviceSynchronize()); } nvtxRangePop(); //Pop for Pair Calculation double pi=acos(-1.0l); double rho=(numatm)/(xbox*ybox*zbox); double norm=(4.0l*pi*rho)/3.0l; double rl,ru,nideal; double g2[nbin]; double r,gr,lngr,lngrbond,s2=0.0l,s2bond=0.0l; double box=min(xbox,ybox); box=min(box,zbox); double del=box/(2.0l*nbin); nvtxRangePush("Entropy_Calculation"); for (int i=0;i<nbin;i++) { // cout<<i+1<<" "<<d_g2[i]<<endl; rl=(i)*del; ru=rl+del; nideal=norm*(ru*ru*ru-rl*rl*rl); g2[i]=(double)d_g2[i]/((double)nconf*(double)numatm*nideal); r=(i)*del; pairfile<<(i+0.5l)*del<<" "<<g2[i]<<endl; if (r<2.0l) { gr=0.0l; } else { gr=g2[i]; } if (gr<1e-5) { lngr=0.0l; } else { lngr=log(gr); } if (g2[i]<1e-6) { lngrbond=0.0l; } else { lngrbond=log(g2[i]); } s2=s2-2.0l*pi*rho*((gr*lngr)-gr+1.0l)*del*r*r; s2bond=s2bond-2.0l*pi*rho*((g2[i]*lngrbond)-g2[i]+1.0l)*del*r*r; } nvtxRangePop(); //Pop for Entropy Calculation stwo<<"s2 value is "<<s2<<endl; stwo<<"s2bond value is "<<s2bond<<endl; cout<<"#Freeing memory"<<endl; // Free memory HANDLE_ERROR(cudaFree(d_x)); HANDLE_ERROR(cudaFree(d_y)); HANDLE_ERROR(cudaFree(d_z)); HANDLE_ERROR(cudaFree(d_g2)); cout<<"#Number of atoms processed: "<<numatm<<endl<<endl; cout<<"#Number of confs processed: "<<nconf<<endl<<endl; cout<<"#number of threads used: "<<nthreads<<endl<<endl; return 0; } __global__ void pair_gpu( const double* d_x, const double* d_y, const double* d_z, unsigned long long int *d_g2, int numatm, int nconf, const double xbox,const double ybox,const double zbox,int d_bin, unsigned long long int bl) { double r,cut,dx,dy,dz; int ig2,id1,id2; double box; box=min(xbox,ybox); box=min(box,zbox); double del=box/(2.0*d_bin); cut=box*0.5; int thisi; double n; int i = blockIdx.x * blockDim.x + threadIdx.x; int maxi = min(int(0.5*numatm*(numatm-1)-(bl*65535*128)),(65535*128)); if ( i < maxi ) { thisi=bl*65535*128+i; n=(0.5)*(1+ ((double) sqrt (1.0+4.0*2.0*thisi))); id1=int(n); id2=thisi-(0.5*id1*(id1-1)); for (int frame=0;frame<nconf;frame++){ dx=d_x[frame*numatm+id1]-d_x[frame*numatm+id2]; dy=d_y[frame*numatm+id1]-d_y[frame*numatm+id2]; dz=d_z[frame*numatm+id1]-d_z[frame*numatm+id2]; dx=dx-xbox*(round(dx/xbox)); dy=dy-ybox*(round(dy/ybox)); dz=dz-zbox*(round(dz/zbox)); r=sqrtf(dx*dx+dy*dy+dz*dz); if (r<cut) { ig2=(int)(r/del); atomicAdd(&d_g2[ig2],2) ; } } } }
6649f8454719a184013fc31be239e31a143d669d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "VMC.h" //#include "Step.cu" //#include "ExpectationEnergy.cu" VMCSimulation::VMCSimulation(const char* config) { LoadSettings(config); std::cout<<" Created VMC Simulation object"<<std::endl; InitThreadDimensions(); glInstance = make_unique<GLInstance>(settings.simWidth, settings.simHeight); std::cout<<" Created openGL Instance for the Simulation"<<std::endl; InitPaths(); redBlack = 0; } VMCSimulation::Mode VMCSimulation::StringToPotentialMode(std::string input) { std::cout << " Input: " << input << std::endl; if (input == " HarmonicOscillator"){return Mode::HarmonicOscillator;} if (input == " DoubleWell"){return Mode::DoubleWell;} std::cout << " Didn't recognize config mode as a simulation mode, using default: HarmonicOscillator"<<std::endl; return Mode::HarmonicOscillator; } void VMCSimulation::LoadSettings(const char* configFileName) { using namespace std; ifstream configFile; configFile.open(configFileName); string line; std::cout<<" Using Settings:"<<std::endl; while (getline(configFile, line)) { if (line.find("simWidth") != std::string::npos) { settings.simWidth = stoi(line.substr(8)); std::cout<<" simWidth: "<<settings.simWidth<<endl; } else if (line.find("simHeight") != std::string::npos) { settings.simHeight = stoi(line.substr(9)); std::cout<<" simHeight: "<<settings.simHeight<<std::endl; } else if (line.find("numPoints") != std::string::npos) { settings.numPoints = stoi(line.substr(9)); std::cout<<" numPoints: "<<settings.numPoints<<std::endl; } else if (line.find("xRange") != std::string::npos) { settings.xRange = stof(line.substr(6)); std::cout<<" xRange: "<<settings.xRange<<std::endl; } else if (line.find("yRange") != std::string::npos) { settings.yRange = stof(line.substr(6)); std::cout<<" yRange: "<<settings.yRange<<std::endl; } else if (line.find("epsilon") != std::string::npos) { settings.epsilon = stof(line.substr(7)); std::cout<<" epsilon: "<<settings.epsilon<<std::endl; } else if (line.find("tau") != std::string::npos) { settings.tau = stof(line.substr(3)); std::cout<<" tau: "<<settings.tau<<std::endl; } else if (line.find("mode") != std::string::npos) { settings.mode = StringToPotentialMode(line.substr(4)); std::cout<<" Potential Mode: "<<line.substr(4)<<std::endl; } else if (line.find("recording") != std::string::npos) { settings.recording = bool(stoi(line.substr(9))); std::cout<<" Recording: " << settings.recording << std::endl; } else if (line.find("frames") != std::string::npos) { settings.frames = stoi(line.substr(6)); std::cout<<" Frames: " << settings.frames << std::endl; } } settings.dt = settings.tau/settings.numPoints; std::cout<<" dt:"<<settings.dt<<std::endl; std::cout<<std::endl<<" Press Enter to continue"<<std::endl; configFile.close(); cin.ignore(); } void VMCSimulation::InitPaths() { ringPoints.resize(settings.numPoints); for (int i = 0; i < settings.numPoints; i++) { //if (i <settings.numPoints / 2) ringPoints[i] = make_float2(0.1, 0.1); //else // ringPoints[i] = make_float2(-0.01, 0.0); } dRingPoints = ringPoints; rawRingPoints = thrust::raw_pointer_cast(dRingPoints.data()); binSums.resize(settings.simWidth * settings.simHeight); for (int i = 0; i < settings.simWidth * settings.simHeight; i++) binSums[i] = 0; dBinSums = binSums; rawBinSums = thrust::raw_pointer_cast(dBinSums.data()); colorMap.resize(512); std::ifstream colorfile("data/Hot_Cold_No_Zero", std::ifstream::in); std::string colorLine; int i = 0; while(getline(colorfile, colorLine)){ std::stringstream linestream(colorLine); linestream >> colorMap[i].x >> colorMap[i].y >> colorMap[i].z; i++; } colorfile.close(); dColorMap = colorMap; rawColorMap = thrust::raw_pointer_cast(dColorMap.data()); // set pixel count (3x for 3x8 bit color format) and establish // those vectors on host and device pixelData.resize(3 * settings.simWidth * settings.simHeight); dPixelData = pixelData; rawPixelData = thrust::raw_pointer_cast(dPixelData.data()); hipDeviceSynchronize(); } void VMCSimulation::RunSimulation() { // Info stuff hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float fpsTime = 0.0; int steps = 0; int mapMin = 0; int mapMax = 0; thrust::pair<thrust::device_ptr<int>, thrust::device_ptr<int>> minMaxPtrs; // Temporary, don't need these float* hAvgEnergy; float* dAvgEnergy; checkCuda( hipMalloc((void**)&dAvgEnergy, sizeof(float))); checkCuda( hipHostMalloc((void**)&hAvgEnergy, sizeof(float))); hAvgEnergy[0] = 0.0; checkCuda( hipMemcpy(dAvgEnergy, hAvgEnergy, sizeof(float), hipMemcpyHostToDevice)); float* zerofloat; checkCuda( hipHostMalloc((void**)&zerofloat, sizeof(float))); zerofloat[0] = 0.0; std::string frameName; while(!glfwWindowShouldClose(glInstance->window)) { hipEventRecord(start, 0); float3 *devPtr; checkCuda(hipGraphicsMapResources(1, &glInstance->cudaColorResource, 0)); size_t numBytes; checkCuda(hipGraphicsResourceGetMappedPointer((void**)&devPtr, &numBytes, *&glInstance->cudaColorResource)); //std::cout<<numBytes<<std::endl; hipLaunchKernelGGL(( Step), dim3(xBlocks), dim3(threadsPerBlock) , 0, 0, rawRingPoints, settings.epsilon, static_cast<int>(settings.mode), settings.tau, settings.dt, settings.numPoints, redBlack); gpuErrchk(hipPeekAtLastError()); checkCuda(hipDeviceSynchronize()); hipLaunchKernelGGL(( ExpectationEnergy), dim3(xBlocks), dim3(threadsPerBlock) , 0, 0, rawRingPoints, dAvgEnergy, settings.dt, static_cast<int>(settings.mode), settings.numPoints); gpuErrchk(hipPeekAtLastError()); checkCuda(hipDeviceSynchronize()); checkCuda( hipMemcpy(hAvgEnergy, dAvgEnergy, sizeof(float), hipMemcpyDeviceToHost)); checkCuda( hipMemcpy(dAvgEnergy, zerofloat, sizeof(float), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( PopulateBins), dim3(xBlocks), dim3(threadsPerBlock) , 0, 0, rawBinSums, rawRingPoints, settings.xRange, settings.yRange, static_cast<int>(settings.mode), settings.numPoints, settings.simHeight, settings.simWidth); gpuErrchk(hipPeekAtLastError()); checkCuda(hipDeviceSynchronize()); minMaxPtrs = thrust::minmax_element(thrust::device, thrust::device_pointer_cast(rawBinSums), thrust::device_pointer_cast(rawBinSums) + (settings.simHeight * settings.simWidth) - 1 ); mapMin = *minMaxPtrs.first; mapMax = *minMaxPtrs.second; hipLaunchKernelGGL(( Color), dim3(colorBlocks), dim3(tpbColor) , 0, 0, devPtr, rawColorMap, rawBinSums, mapMin, mapMax, settings.simWidth, settings.simHeight); gpuErrchk(hipPeekAtLastError()); hipLaunchKernelGGL(( Zero_Histogram), dim3(colorBlocks), dim3(tpbColor) , 0, 0, rawBinSums, settings.simWidth, settings.simHeight); gpuErrchk(hipPeekAtLastError()); checkCuda(hipDeviceSynchronize()); (redBlack == 0) ? redBlack = 1 : redBlack = 0; glInstance->Draw(); if (settings.recording && steps < settings.frames) { frameName = FrameNameGen(steps, settings.frames); hipLaunchKernelGGL(( FormPNGData), dim3(colorBlocks), dim3(tpbColor) , 0, 0, devPtr, rawPixelData, settings.simWidth, settings.simHeight); gpuErrchk(hipPeekAtLastError()); hipMemcpy(pixelData.data(), rawPixelData, settings.simWidth * settings.simHeight * 3 * sizeof(unsigned char), hipMemcpyDeviceToHost); gpuErrchk(hipPeekAtLastError()); WritePNG(pixelData.data(), frameName, settings.simWidth, settings.simHeight); } steps++; //hipEventRecord(stop, 0); //hipEventElapsedTime(&fpsTime, start, stop); char title[512]; sprintf(title, "Cuda Variational Monte Carlo: %12.2f fps, path count: %u, steps taken: %d", 1.0f/(fpsTime/1000.0f), settings.numPoints, steps); glfwSetWindowTitle(glInstance->window, title); checkCuda(hipGraphicsUnmapResources(1, &glInstance->cudaColorResource, 0)); if(glfwGetKey(glInstance->window, GLFW_KEY_ESCAPE)) { glfwSetWindowShouldClose(glInstance->window, 1); std::cout << "Window Close Set, next loop should end" << std::endl; std::cout <<" wow " << std::endl; } } std::cout << "Exiting simulation function" << std::endl; //free(data); } void VMCSimulation::InitThreadDimensions() { switch(settings.numPoints){ case 256: xBlocks = 16; break; case 1024: xBlocks = 4; break; case 4096: xBlocks = 16; break; case 16384: xBlocks = 64; break; case 32768: xBlocks = 128; break; case 65536: xBlocks = 256; break; case 262144: xBlocks = 1024; // max break; default: std::cout<<"Bad Dimensions"<<std::endl; exit(1); } switch(settings.simWidth * settings.simHeight) { // 128 x 128 case 16384: tpbColor.x = settings.simWidth/1; tpbColor.y = settings.simHeight/128; colorBlocks.x = 1; colorBlocks.y = 128; break; // 256 x 256 case 65536: tpbColor.x = settings.simWidth/1; tpbColor.y = settings.simHeight/256; colorBlocks.x = 1; colorBlocks.y = 256; break; // 512 x 512 case 262144: tpbColor.x = settings.simWidth/2; tpbColor.y = settings.simHeight/256; colorBlocks.x = 2; colorBlocks.y = 256; break; default: std::cout<<"Bad Dimensions"<<std::endl; exit(1); } threadsPerBlock = settings.numPoints/xBlocks; std::cout<<" Calling path algorithm kernels with:"<<std::endl <<" ThreadsPerBlock: ["<<threadsPerBlock<<"]"<<std::endl <<" On a Grid of: ["<<xBlocks<<"] Blocks"<<std::endl<<std::endl; std::cout<<" Calling painting kernels with:"<<std::endl <<" ThreadsPerBlock: ["<<tpbColor.x<<","<<tpbColor.y<<"]"<<std::endl <<" On a Grid of: ["<<colorBlocks.x<<","<<colorBlocks.y<<"]"<<std::endl; } VMCSimulation::~VMCSimulation() { }
6649f8454719a184013fc31be239e31a143d669d.cu
#include "VMC.h" //#include "Step.cu" //#include "ExpectationEnergy.cu" VMCSimulation::VMCSimulation(const char* config) { LoadSettings(config); std::cout<<" Created VMC Simulation object"<<std::endl; InitThreadDimensions(); glInstance = make_unique<GLInstance>(settings.simWidth, settings.simHeight); std::cout<<" Created openGL Instance for the Simulation"<<std::endl; InitPaths(); redBlack = 0; } VMCSimulation::Mode VMCSimulation::StringToPotentialMode(std::string input) { std::cout << " Input: " << input << std::endl; if (input == " HarmonicOscillator"){return Mode::HarmonicOscillator;} if (input == " DoubleWell"){return Mode::DoubleWell;} std::cout << " Didn't recognize config mode as a simulation mode, using default: HarmonicOscillator"<<std::endl; return Mode::HarmonicOscillator; } void VMCSimulation::LoadSettings(const char* configFileName) { using namespace std; ifstream configFile; configFile.open(configFileName); string line; std::cout<<" Using Settings:"<<std::endl; while (getline(configFile, line)) { if (line.find("simWidth") != std::string::npos) { settings.simWidth = stoi(line.substr(8)); std::cout<<" simWidth: "<<settings.simWidth<<endl; } else if (line.find("simHeight") != std::string::npos) { settings.simHeight = stoi(line.substr(9)); std::cout<<" simHeight: "<<settings.simHeight<<std::endl; } else if (line.find("numPoints") != std::string::npos) { settings.numPoints = stoi(line.substr(9)); std::cout<<" numPoints: "<<settings.numPoints<<std::endl; } else if (line.find("xRange") != std::string::npos) { settings.xRange = stof(line.substr(6)); std::cout<<" xRange: "<<settings.xRange<<std::endl; } else if (line.find("yRange") != std::string::npos) { settings.yRange = stof(line.substr(6)); std::cout<<" yRange: "<<settings.yRange<<std::endl; } else if (line.find("epsilon") != std::string::npos) { settings.epsilon = stof(line.substr(7)); std::cout<<" epsilon: "<<settings.epsilon<<std::endl; } else if (line.find("tau") != std::string::npos) { settings.tau = stof(line.substr(3)); std::cout<<" tau: "<<settings.tau<<std::endl; } else if (line.find("mode") != std::string::npos) { settings.mode = StringToPotentialMode(line.substr(4)); std::cout<<" Potential Mode: "<<line.substr(4)<<std::endl; } else if (line.find("recording") != std::string::npos) { settings.recording = bool(stoi(line.substr(9))); std::cout<<" Recording: " << settings.recording << std::endl; } else if (line.find("frames") != std::string::npos) { settings.frames = stoi(line.substr(6)); std::cout<<" Frames: " << settings.frames << std::endl; } } settings.dt = settings.tau/settings.numPoints; std::cout<<" dt:"<<settings.dt<<std::endl; std::cout<<std::endl<<" Press Enter to continue"<<std::endl; configFile.close(); cin.ignore(); } void VMCSimulation::InitPaths() { ringPoints.resize(settings.numPoints); for (int i = 0; i < settings.numPoints; i++) { //if (i <settings.numPoints / 2) ringPoints[i] = make_float2(0.1, 0.1); //else // ringPoints[i] = make_float2(-0.01, 0.0); } dRingPoints = ringPoints; rawRingPoints = thrust::raw_pointer_cast(dRingPoints.data()); binSums.resize(settings.simWidth * settings.simHeight); for (int i = 0; i < settings.simWidth * settings.simHeight; i++) binSums[i] = 0; dBinSums = binSums; rawBinSums = thrust::raw_pointer_cast(dBinSums.data()); colorMap.resize(512); std::ifstream colorfile("data/Hot_Cold_No_Zero", std::ifstream::in); std::string colorLine; int i = 0; while(getline(colorfile, colorLine)){ std::stringstream linestream(colorLine); linestream >> colorMap[i].x >> colorMap[i].y >> colorMap[i].z; i++; } colorfile.close(); dColorMap = colorMap; rawColorMap = thrust::raw_pointer_cast(dColorMap.data()); // set pixel count (3x for 3x8 bit color format) and establish // those vectors on host and device pixelData.resize(3 * settings.simWidth * settings.simHeight); dPixelData = pixelData; rawPixelData = thrust::raw_pointer_cast(dPixelData.data()); cudaThreadSynchronize(); } void VMCSimulation::RunSimulation() { // Info stuff cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float fpsTime = 0.0; int steps = 0; int mapMin = 0; int mapMax = 0; thrust::pair<thrust::device_ptr<int>, thrust::device_ptr<int>> minMaxPtrs; // Temporary, don't need these float* hAvgEnergy; float* dAvgEnergy; checkCuda( cudaMalloc((void**)&dAvgEnergy, sizeof(float))); checkCuda( cudaMallocHost((void**)&hAvgEnergy, sizeof(float))); hAvgEnergy[0] = 0.0; checkCuda( cudaMemcpy(dAvgEnergy, hAvgEnergy, sizeof(float), cudaMemcpyHostToDevice)); float* zerofloat; checkCuda( cudaMallocHost((void**)&zerofloat, sizeof(float))); zerofloat[0] = 0.0; std::string frameName; while(!glfwWindowShouldClose(glInstance->window)) { cudaEventRecord(start, 0); float3 *devPtr; checkCuda(cudaGraphicsMapResources(1, &glInstance->cudaColorResource, 0)); size_t numBytes; checkCuda(cudaGraphicsResourceGetMappedPointer((void**)&devPtr, &numBytes, *&glInstance->cudaColorResource)); //std::cout<<numBytes<<std::endl; Step<<< xBlocks, threadsPerBlock >>> (rawRingPoints, settings.epsilon, static_cast<int>(settings.mode), settings.tau, settings.dt, settings.numPoints, redBlack); gpuErrchk(cudaPeekAtLastError()); checkCuda(cudaDeviceSynchronize()); ExpectationEnergy<<< xBlocks, threadsPerBlock >>> (rawRingPoints, dAvgEnergy, settings.dt, static_cast<int>(settings.mode), settings.numPoints); gpuErrchk(cudaPeekAtLastError()); checkCuda(cudaDeviceSynchronize()); checkCuda( cudaMemcpy(hAvgEnergy, dAvgEnergy, sizeof(float), cudaMemcpyDeviceToHost)); checkCuda( cudaMemcpy(dAvgEnergy, zerofloat, sizeof(float), cudaMemcpyHostToDevice)); PopulateBins<<< xBlocks, threadsPerBlock >>> (rawBinSums, rawRingPoints, settings.xRange, settings.yRange, static_cast<int>(settings.mode), settings.numPoints, settings.simHeight, settings.simWidth); gpuErrchk(cudaPeekAtLastError()); checkCuda(cudaDeviceSynchronize()); minMaxPtrs = thrust::minmax_element(thrust::device, thrust::device_pointer_cast(rawBinSums), thrust::device_pointer_cast(rawBinSums) + (settings.simHeight * settings.simWidth) - 1 ); mapMin = *minMaxPtrs.first; mapMax = *minMaxPtrs.second; Color<<< colorBlocks, tpbColor >>> (devPtr, rawColorMap, rawBinSums, mapMin, mapMax, settings.simWidth, settings.simHeight); gpuErrchk(cudaPeekAtLastError()); Zero_Histogram<<< colorBlocks, tpbColor >>> (rawBinSums, settings.simWidth, settings.simHeight); gpuErrchk(cudaPeekAtLastError()); checkCuda(cudaDeviceSynchronize()); (redBlack == 0) ? redBlack = 1 : redBlack = 0; glInstance->Draw(); if (settings.recording && steps < settings.frames) { frameName = FrameNameGen(steps, settings.frames); FormPNGData<<< colorBlocks, tpbColor >>> (devPtr, rawPixelData, settings.simWidth, settings.simHeight); gpuErrchk(cudaPeekAtLastError()); cudaMemcpy(pixelData.data(), rawPixelData, settings.simWidth * settings.simHeight * 3 * sizeof(unsigned char), cudaMemcpyDeviceToHost); gpuErrchk(cudaPeekAtLastError()); WritePNG(pixelData.data(), frameName, settings.simWidth, settings.simHeight); } steps++; //cudaEventRecord(stop, 0); //cudaEventElapsedTime(&fpsTime, start, stop); char title[512]; sprintf(title, "Cuda Variational Monte Carlo: %12.2f fps, path count: %u, steps taken: %d", 1.0f/(fpsTime/1000.0f), settings.numPoints, steps); glfwSetWindowTitle(glInstance->window, title); checkCuda(cudaGraphicsUnmapResources(1, &glInstance->cudaColorResource, 0)); if(glfwGetKey(glInstance->window, GLFW_KEY_ESCAPE)) { glfwSetWindowShouldClose(glInstance->window, 1); std::cout << "Window Close Set, next loop should end" << std::endl; std::cout <<" wow " << std::endl; } } std::cout << "Exiting simulation function" << std::endl; //free(data); } void VMCSimulation::InitThreadDimensions() { switch(settings.numPoints){ case 256: xBlocks = 16; break; case 1024: xBlocks = 4; break; case 4096: xBlocks = 16; break; case 16384: xBlocks = 64; break; case 32768: xBlocks = 128; break; case 65536: xBlocks = 256; break; case 262144: xBlocks = 1024; // max break; default: std::cout<<"Bad Dimensions"<<std::endl; exit(1); } switch(settings.simWidth * settings.simHeight) { // 128 x 128 case 16384: tpbColor.x = settings.simWidth/1; tpbColor.y = settings.simHeight/128; colorBlocks.x = 1; colorBlocks.y = 128; break; // 256 x 256 case 65536: tpbColor.x = settings.simWidth/1; tpbColor.y = settings.simHeight/256; colorBlocks.x = 1; colorBlocks.y = 256; break; // 512 x 512 case 262144: tpbColor.x = settings.simWidth/2; tpbColor.y = settings.simHeight/256; colorBlocks.x = 2; colorBlocks.y = 256; break; default: std::cout<<"Bad Dimensions"<<std::endl; exit(1); } threadsPerBlock = settings.numPoints/xBlocks; std::cout<<" Calling path algorithm kernels with:"<<std::endl <<" ThreadsPerBlock: ["<<threadsPerBlock<<"]"<<std::endl <<" On a Grid of: ["<<xBlocks<<"] Blocks"<<std::endl<<std::endl; std::cout<<" Calling painting kernels with:"<<std::endl <<" ThreadsPerBlock: ["<<tpbColor.x<<","<<tpbColor.y<<"]"<<std::endl <<" On a Grid of: ["<<colorBlocks.x<<","<<colorBlocks.y<<"]"<<std::endl; } VMCSimulation::~VMCSimulation() { }
330066a5c2ded61897199f23a4379e7aabac6fac.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include <cstdio> #include <sys/time.h> #include <rocblas.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/execution_policy.h> #define CUDA_CHECK_RETURN(value) {\ hipError_t _m_cudaStat = value;\ if (_m_cudaStat != hipSuccess) {\ fprintf(stderr, "Error %s at line %d in file %s\n",\ hipGetErrorString(_m_cudaStat), __LINE__, __FILE__);\ exit(1);\ }\ } using namespace std; __global__ void Copy(float *a, float *b, int vector_size){ int indx = blockIdx.x * blockDim.x + threadIdx.x; if (indx < vector_size){ b[indx] = a[indx]; } } float thrust_copy(int vector_size){ thrust::host_vector<float> hA(vector_size); thrust::host_vector<float> hB(vector_size); for(int i = 0; i < vector_size; i++) { hA[i]=i; } thrust::device_vector<float> dA = hA; thrust::device_vector<float> dB = hB; float ThrustTime; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); thrust::copy(dA.begin(), dA.end(), dB.begin()); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&ThrustTime, start, stop); thrust::copy(dB.begin(), dB.end(), hB.begin()); cout << "THRUST_COPY" << endl; /* cout << "vector A : "; for (int i = 0; i < vector_size; i++){ cout << hA[i] << " "; } cout << endl; cout << "vector B : "; for (int i = 0; i < vector_size; i++){ cout << hB[i] << " "; } cout << endl; */ hipEventDestroy(start); hipEventDestroy(stop); return ThrustTime; } float blas_copy(int vector_size){ float *hA = new float[vector_size]; float *hB = new float[vector_size]; for(int i = 0; i < vector_size; i++) { hA[i]=i; } float *dA, *dB; CUDA_CHECK_RETURN(hipMalloc(&dA, sizeof(float) * vector_size)); CUDA_CHECK_RETURN(hipMalloc(&dB, sizeof(float) * vector_size)); CUDA_CHECK_RETURN(hipMemcpy(dA, hA, sizeof(float) * vector_size, hipMemcpyHostToDevice)); CUDA_CHECK_RETURN(hipMemcpy(dB, hB, sizeof(float) * vector_size, hipMemcpyHostToDevice)); hipblasHandle_t handle; hipblasCreate(&handle); float CublasTime; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipblasScopy(handle, vector_size, dA, 1, dB, 1); hipEventRecord(stop, 0); hipEventSynchronize(stop); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipGetLastError()); hipEventElapsedTime(&CublasTime, start, stop); CUDA_CHECK_RETURN(hipMemcpy(hB, dB, sizeof(float) * vector_size, hipMemcpyDeviceToHost)); cout << "CUBLAS_COPY" << endl; /* cout << "vector A : "; for (int i = 0; i < vector_size; i++){ cout << hA[i] << " "; } cout << endl; cout << "vector B : "; for (int i = 0; i < vector_size; i++){ cout << hB[i] << " "; } cout << endl; */ hipEventDestroy(start); hipEventDestroy(stop); hipblasDestroy(handle); delete [] hA; delete [] hB; hipFree(dA); hipFree(dB); return CublasTime; } int main(int argc, char *argv[]) { cout << "1 arg - vector_size, 2 arg - block_size" << endl << endl; int vector_size = atoi(argv[1]); int block_size = atoi(argv[2]); srand(time(NULL)); float *hA = new float[vector_size]; float *hB = new float[vector_size]; for(int i = 0; i < vector_size; i++) { hA[i]=i; } float *dA, *dB; CUDA_CHECK_RETURN(hipMalloc(&dA, sizeof(float) * vector_size)); CUDA_CHECK_RETURN(hipMalloc(&dB, sizeof(float) * vector_size)); CUDA_CHECK_RETURN(hipMemcpy(dA, hA, sizeof(float) * vector_size, hipMemcpyHostToDevice)); CUDA_CHECK_RETURN(hipMemcpy(dB, hB, sizeof(float) * vector_size, hipMemcpyHostToDevice)); int num_blocks = (int)ceil((float)vector_size / block_size); float elapsedTime; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( Copy) , dim3(num_blocks), dim3(block_size), 0, 0, dA, dB, vector_size); hipEventRecord(stop, 0); hipEventSynchronize(stop); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipGetLastError()); hipEventElapsedTime(&elapsedTime, start, stop); CUDA_CHECK_RETURN(hipMemcpy(hB, dB, sizeof(float) * vector_size, hipMemcpyDeviceToHost)); cout << "CUDA_COPY" << endl; /* cout << "vector A : "; for (int i = 0; i < vector_size; i++){ cout << hA[i] << " "; } cout << endl; cout << "vector B : "; for (int i = 0; i < vector_size; i++){ cout << hB[i] << " "; } cout << endl; */ cout << "Cuda_Time = " << elapsedTime << endl; cout << endl; float cublas = blas_copy(vector_size); cout << "Cublas_Time = " << cublas << endl; cout << endl; float thrust = thrust_copy(vector_size); cout << "Thrust_Time = " << thrust << endl; hipEventDestroy(start); hipEventDestroy(stop); delete [] hA; delete [] hB; hipFree(dA); hipFree(dB); }
330066a5c2ded61897199f23a4379e7aabac6fac.cu
#include <iostream> #include <cuda.h> #include <cstdio> #include <sys/time.h> #include <cublas_v2.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/execution_policy.h> #define CUDA_CHECK_RETURN(value) {\ cudaError_t _m_cudaStat = value;\ if (_m_cudaStat != cudaSuccess) {\ fprintf(stderr, "Error %s at line %d in file %s\n",\ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\ exit(1);\ }\ } using namespace std; __global__ void Copy(float *a, float *b, int vector_size){ int indx = blockIdx.x * blockDim.x + threadIdx.x; if (indx < vector_size){ b[indx] = a[indx]; } } float thrust_copy(int vector_size){ thrust::host_vector<float> hA(vector_size); thrust::host_vector<float> hB(vector_size); for(int i = 0; i < vector_size; i++) { hA[i]=i; } thrust::device_vector<float> dA = hA; thrust::device_vector<float> dB = hB; float ThrustTime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); thrust::copy(dA.begin(), dA.end(), dB.begin()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&ThrustTime, start, stop); thrust::copy(dB.begin(), dB.end(), hB.begin()); cout << "THRUST_COPY" << endl; /* cout << "vector A : "; for (int i = 0; i < vector_size; i++){ cout << hA[i] << " "; } cout << endl; cout << "vector B : "; for (int i = 0; i < vector_size; i++){ cout << hB[i] << " "; } cout << endl; */ cudaEventDestroy(start); cudaEventDestroy(stop); return ThrustTime; } float blas_copy(int vector_size){ float *hA = new float[vector_size]; float *hB = new float[vector_size]; for(int i = 0; i < vector_size; i++) { hA[i]=i; } float *dA, *dB; CUDA_CHECK_RETURN(cudaMalloc(&dA, sizeof(float) * vector_size)); CUDA_CHECK_RETURN(cudaMalloc(&dB, sizeof(float) * vector_size)); CUDA_CHECK_RETURN(cudaMemcpy(dA, hA, sizeof(float) * vector_size, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMemcpy(dB, hB, sizeof(float) * vector_size, cudaMemcpyHostToDevice)); cublasHandle_t handle; cublasCreate(&handle); float CublasTime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cublasScopy(handle, vector_size, dA, 1, dB, 1); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); cudaEventElapsedTime(&CublasTime, start, stop); CUDA_CHECK_RETURN(cudaMemcpy(hB, dB, sizeof(float) * vector_size, cudaMemcpyDeviceToHost)); cout << "CUBLAS_COPY" << endl; /* cout << "vector A : "; for (int i = 0; i < vector_size; i++){ cout << hA[i] << " "; } cout << endl; cout << "vector B : "; for (int i = 0; i < vector_size; i++){ cout << hB[i] << " "; } cout << endl; */ cudaEventDestroy(start); cudaEventDestroy(stop); cublasDestroy(handle); delete [] hA; delete [] hB; cudaFree(dA); cudaFree(dB); return CublasTime; } int main(int argc, char *argv[]) { cout << "1 arg - vector_size, 2 arg - block_size" << endl << endl; int vector_size = atoi(argv[1]); int block_size = atoi(argv[2]); srand(time(NULL)); float *hA = new float[vector_size]; float *hB = new float[vector_size]; for(int i = 0; i < vector_size; i++) { hA[i]=i; } float *dA, *dB; CUDA_CHECK_RETURN(cudaMalloc(&dA, sizeof(float) * vector_size)); CUDA_CHECK_RETURN(cudaMalloc(&dB, sizeof(float) * vector_size)); CUDA_CHECK_RETURN(cudaMemcpy(dA, hA, sizeof(float) * vector_size, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMemcpy(dB, hB, sizeof(float) * vector_size, cudaMemcpyHostToDevice)); int num_blocks = (int)ceil((float)vector_size / block_size); float elapsedTime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); Copy <<<num_blocks, block_size>>> (dA, dB, vector_size); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); cudaEventElapsedTime(&elapsedTime, start, stop); CUDA_CHECK_RETURN(cudaMemcpy(hB, dB, sizeof(float) * vector_size, cudaMemcpyDeviceToHost)); cout << "CUDA_COPY" << endl; /* cout << "vector A : "; for (int i = 0; i < vector_size; i++){ cout << hA[i] << " "; } cout << endl; cout << "vector B : "; for (int i = 0; i < vector_size; i++){ cout << hB[i] << " "; } cout << endl; */ cout << "Cuda_Time = " << elapsedTime << endl; cout << endl; float cublas = blas_copy(vector_size); cout << "Cublas_Time = " << cublas << endl; cout << endl; float thrust = thrust_copy(vector_size); cout << "Thrust_Time = " << thrust << endl; cudaEventDestroy(start); cudaEventDestroy(stop); delete [] hA; delete [] hB; cudaFree(dA); cudaFree(dB); }
283e17d64dd96938217c9b8e9cbc4a00814a8c62.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <cutil_math.h> #include <cstdio> #include <cstdlib> #include <cmath> #define MAX_BOUNCES 16 #define PHOTON_RADIUS .15f #define SHUTTER_TIME_SECONDS 0.01f #define EXPOSURE_CONSTANT 10 #define USE_MORTON_ORDER 0 #include "RandomState.h" #include "Halton.h" #include "Ray.h" #include "Material.h" #include "HitInfo.h" #include "Hemisphere.h" #include "Sampler2D.h" #include "Photonmap.h" #include "Scene.h" namespace { template <typename S, typename L, typename T> static __device__ float3 sampleIncomingDirectRadiance( float3 const& pos, float3 const& normal, S const& scene, L const& light, RandomState& rng, int const nSamples1D) { T sampler(nSamples1D, rng); float3 const directIrradiance = light.sampleIrradiance( pos, normal, scene, rng, sampler, nSamples1D * nSamples1D); return directIrradiance / CUDART_PI_F; // AGI Equation 2.12 } template <typename T> __device__ static inline T sq(T x) { return x * x; } template <typename S, typename L> static __device__ float3 sampleIncidentRadiance( Ray& ray, S const& scene, L const& light, DefaultPhotonMap const* photonMap, RandomState& rng) { HitInfo info; float3 cumulatedReflectance = make_float3(1); float currentRefractiveIndex = 1; for(int nBounces=0; nBounces<MAX_BOUNCES; ++nBounces) { if( scene.trace(ray, info) ) { float3 const hitPosition = ray.pos + ray.dir * info.time; if(info.material.type == MT_EMITTING) { return cumulatedReflectance * info.material.color; // emitted radiance } else if(info.material.type == MT_DIFFUSE) { float3 const hitReflectance = info.material.color; float3 const hitNormal = info.normal; float3 const incomingDirectRadiance = sampleIncomingDirectRadiance<S, L, LatticeSampler2D>( hitPosition, hitNormal, scene, light, rng, 5); float3 score = make_float3(0); int const nSamples1D = 6; int const nSamples2D = sq(nSamples1D); LatticeSampler2D sampler(nSamples1D, rng); for(int i=0; i<nSamples2D; ++i) { float u, v; sampler.next(u, v, i, rng); ray = Ray( hitPosition, Hemisphere::cosineWeightedDirection(hitNormal, u, v)); bool const hit = scene.trace(ray, info); if(hit & info.material.type == MT_DIFFUSE) { score += sampleIncomingDirectRadiance<S, L, SukharevSampler2D>( ray.pos + ray.dir * info.time, info.normal, scene, light, rng, 2) * info.material.color; } } float3 const incomingIndirectRadiance = (score * cumulatedReflectance) / nSamples2D; //float3 const causticsRadiance = photonMap->powerDensity(hitPosition, hitNormal, PHOTON_RADIUS) / CUDART_PI_F; float3 const causticsRadiance = make_float3(0); return (incomingDirectRadiance + incomingIndirectRadiance + causticsRadiance) * hitReflectance; } else if(info.material.type == MT_SPECULAR) { cumulatedReflectance *= info.material.color; ray = Ray(hitPosition, reflect(ray.dir, info.normal)); } else { cumulatedReflectance *= info.material.color; float3 reflectedDirection = reflect(ray.dir, info.normal); float3 refractedDirection; int noTotalInternalReflection = Hemisphere::computeRefraction( info.normal, ray.dir, currentRefractiveIndex, info.material.refractiveIndex, refractedDirection); // TODO: We currently cannot "bifurcate" paths, // so we'll trace the path (refraction or // reflection) with the highest weight. float fresnelCoefficient = Hemisphere::fresnelCoefficient( info.normal, ray.dir, currentRefractiveIndex, info.material.refractiveIndex) * noTotalInternalReflection + (1-noTotalInternalReflection); if(fresnelCoefficient > .5f) { cumulatedReflectance *= fresnelCoefficient; ray = Ray(hitPosition, reflectedDirection); } else { cumulatedReflectance *= (1 - fresnelCoefficient); ray = Ray(hitPosition, refractedDirection); currentRefractiveIndex = info.material.refractiveIndex; } } } else { return make_float3(0); } } return make_float3(0); } static inline __device__ float3 toneMap(float3 const& radiance) { float3 const irradiation = radiance * (SHUTTER_TIME_SECONDS * 2 * CUDART_PI_F); float3 tone = irradiation * (-EXPOSURE_CONSTANT); tone.x = expf(tone.x); tone.y = expf(tone.y); tone.z = expf(tone.z); return 255 + tone * (-255); } template <typename S, typename L> static __device__ float3 pixelTone( int x, int y, int width, int height, float3 eyePos, S const& scene, L const& light, DefaultPhotonMap const* photonMap, RandomState& rng) { float3 front = normalize(-eyePos); float3 up = make_float3(0,1,0); up = normalize(up - front*dot(front,up)); float3 right = cross(front, up); float3 tone = make_float3(0); int const sampleCount = 1; for(int i=0; i<sampleCount; ++i) { float u = ((i&1) + .5f) / 1; float v = ((i>>1) + .5f) / 1; u = 2*(x + u) / width - 1; v = 2*(y + v) / height - 1; v *= (-3.0f/4.0f); Ray eye( eyePos, normalize(right * u + up * v + front)); tone += toneMap(sampleIncidentRadiance(eye, scene, light, photonMap, rng)); } return tone / sampleCount; } static inline int idiv_ceil(int x, int y) { return x/y + ((x%y) ? 1:0); } static void checkCUDAError(const char* msg) { hipError_t err = hipGetLastError(); if(hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } } #if USE_MORTON_ORDER struct MortonOrder { public: __device__ static void indexToCoordinate(int index, int& x, int& y) { x = pack((index >> 1) & 0x55555555); y = pack( index & 0x55555555); } private: __device__ static int pack(int n) { int x = 0; #pragma unroll for(int i=0; i<16; ++i) { x |= ((n >> (2*i)) & 1) << i; } return x; } }; static unsigned int nextPow2(unsigned int v) { v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v++; return v; } #else #define BLOCK_WIDTH 5 #define BLOCK_HEIGHT 32 #endif } // anonymous namespace static __global__ void raytracingKernel( uchar4* color, DefaultPhotonMap const* photonMap, RandomState* random, int width, int height, float timeSeconds) { #if USE_MORTON_ORDER int index = blockIdx.x * blockDim.x + threadIdx.x; int x, y; MortonOrder::indexToCoordinate(index, x, y); #else int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; #endif __shared__ Scene scene; scene.setup(timeSeconds); if(x < width & y < height) { int const index = y * width + x; int const x = index % width; int const y = index / width; RandomState rng(random[index]); // create a local copy rng = random[index]; float3 tone = pixelTone(x, y, width, height, scene.getEyePosition(), scene.getScene(), scene.getLight(), photonMap, rng); random[index] = rng; color[index] = make_uchar4(tone.x, tone.y, tone.z, 0); } } void raytracing( uchar4* color, DefaultPhotonMap const* photonMap, RandomState* random, unsigned int image_width, unsigned int image_height, float timeSeconds) { #if USE_MORTON_ORDER unsigned int temp = max(image_width, image_height); if(temp && (temp & (temp-1))) { temp = nextPow2(temp); } printf("morton size: %dx%d\n", temp, temp); dim3 nBlocks(idiv_ceil(temp*temp, 192), 1); dim3 nThreads(192, 1); #else dim3 nBlocks (idiv_ceil(image_width, BLOCK_WIDTH), idiv_ceil(image_height, BLOCK_HEIGHT)); dim3 nThreads(BLOCK_WIDTH, BLOCK_HEIGHT); #endif hipFuncSetCacheConfig(raytracingKernel, hipFuncCachePreferL1); hipLaunchKernelGGL(( raytracingKernel), dim3(nBlocks), dim3(nThreads) , 0, 0, color, photonMap, random, image_width, image_height, timeSeconds); hipDeviceSynchronize(); checkCUDAError("kernel failed!"); }
283e17d64dd96938217c9b8e9cbc4a00814a8c62.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cutil_math.h> #include <cstdio> #include <cstdlib> #include <cmath> #define MAX_BOUNCES 16 #define PHOTON_RADIUS .15f #define SHUTTER_TIME_SECONDS 0.01f #define EXPOSURE_CONSTANT 10 #define USE_MORTON_ORDER 0 #include "RandomState.h" #include "Halton.h" #include "Ray.h" #include "Material.h" #include "HitInfo.h" #include "Hemisphere.h" #include "Sampler2D.h" #include "Photonmap.h" #include "Scene.h" namespace { template <typename S, typename L, typename T> static __device__ float3 sampleIncomingDirectRadiance( float3 const& pos, float3 const& normal, S const& scene, L const& light, RandomState& rng, int const nSamples1D) { T sampler(nSamples1D, rng); float3 const directIrradiance = light.sampleIrradiance( pos, normal, scene, rng, sampler, nSamples1D * nSamples1D); return directIrradiance / CUDART_PI_F; // AGI Equation 2.12 } template <typename T> __device__ static inline T sq(T x) { return x * x; } template <typename S, typename L> static __device__ float3 sampleIncidentRadiance( Ray& ray, S const& scene, L const& light, DefaultPhotonMap const* photonMap, RandomState& rng) { HitInfo info; float3 cumulatedReflectance = make_float3(1); float currentRefractiveIndex = 1; for(int nBounces=0; nBounces<MAX_BOUNCES; ++nBounces) { if( scene.trace(ray, info) ) { float3 const hitPosition = ray.pos + ray.dir * info.time; if(info.material.type == MT_EMITTING) { return cumulatedReflectance * info.material.color; // emitted radiance } else if(info.material.type == MT_DIFFUSE) { float3 const hitReflectance = info.material.color; float3 const hitNormal = info.normal; float3 const incomingDirectRadiance = sampleIncomingDirectRadiance<S, L, LatticeSampler2D>( hitPosition, hitNormal, scene, light, rng, 5); float3 score = make_float3(0); int const nSamples1D = 6; int const nSamples2D = sq(nSamples1D); LatticeSampler2D sampler(nSamples1D, rng); for(int i=0; i<nSamples2D; ++i) { float u, v; sampler.next(u, v, i, rng); ray = Ray( hitPosition, Hemisphere::cosineWeightedDirection(hitNormal, u, v)); bool const hit = scene.trace(ray, info); if(hit & info.material.type == MT_DIFFUSE) { score += sampleIncomingDirectRadiance<S, L, SukharevSampler2D>( ray.pos + ray.dir * info.time, info.normal, scene, light, rng, 2) * info.material.color; } } float3 const incomingIndirectRadiance = (score * cumulatedReflectance) / nSamples2D; //float3 const causticsRadiance = photonMap->powerDensity(hitPosition, hitNormal, PHOTON_RADIUS) / CUDART_PI_F; float3 const causticsRadiance = make_float3(0); return (incomingDirectRadiance + incomingIndirectRadiance + causticsRadiance) * hitReflectance; } else if(info.material.type == MT_SPECULAR) { cumulatedReflectance *= info.material.color; ray = Ray(hitPosition, reflect(ray.dir, info.normal)); } else { cumulatedReflectance *= info.material.color; float3 reflectedDirection = reflect(ray.dir, info.normal); float3 refractedDirection; int noTotalInternalReflection = Hemisphere::computeRefraction( info.normal, ray.dir, currentRefractiveIndex, info.material.refractiveIndex, refractedDirection); // TODO: We currently cannot "bifurcate" paths, // so we'll trace the path (refraction or // reflection) with the highest weight. float fresnelCoefficient = Hemisphere::fresnelCoefficient( info.normal, ray.dir, currentRefractiveIndex, info.material.refractiveIndex) * noTotalInternalReflection + (1-noTotalInternalReflection); if(fresnelCoefficient > .5f) { cumulatedReflectance *= fresnelCoefficient; ray = Ray(hitPosition, reflectedDirection); } else { cumulatedReflectance *= (1 - fresnelCoefficient); ray = Ray(hitPosition, refractedDirection); currentRefractiveIndex = info.material.refractiveIndex; } } } else { return make_float3(0); } } return make_float3(0); } static inline __device__ float3 toneMap(float3 const& radiance) { float3 const irradiation = radiance * (SHUTTER_TIME_SECONDS * 2 * CUDART_PI_F); float3 tone = irradiation * (-EXPOSURE_CONSTANT); tone.x = expf(tone.x); tone.y = expf(tone.y); tone.z = expf(tone.z); return 255 + tone * (-255); } template <typename S, typename L> static __device__ float3 pixelTone( int x, int y, int width, int height, float3 eyePos, S const& scene, L const& light, DefaultPhotonMap const* photonMap, RandomState& rng) { float3 front = normalize(-eyePos); float3 up = make_float3(0,1,0); up = normalize(up - front*dot(front,up)); float3 right = cross(front, up); float3 tone = make_float3(0); int const sampleCount = 1; for(int i=0; i<sampleCount; ++i) { float u = ((i&1) + .5f) / 1; float v = ((i>>1) + .5f) / 1; u = 2*(x + u) / width - 1; v = 2*(y + v) / height - 1; v *= (-3.0f/4.0f); Ray eye( eyePos, normalize(right * u + up * v + front)); tone += toneMap(sampleIncidentRadiance(eye, scene, light, photonMap, rng)); } return tone / sampleCount; } static inline int idiv_ceil(int x, int y) { return x/y + ((x%y) ? 1:0); } static void checkCUDAError(const char* msg) { cudaError_t err = cudaGetLastError(); if(cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } #if USE_MORTON_ORDER struct MortonOrder { public: __device__ static void indexToCoordinate(int index, int& x, int& y) { x = pack((index >> 1) & 0x55555555); y = pack( index & 0x55555555); } private: __device__ static int pack(int n) { int x = 0; #pragma unroll for(int i=0; i<16; ++i) { x |= ((n >> (2*i)) & 1) << i; } return x; } }; static unsigned int nextPow2(unsigned int v) { v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v++; return v; } #else #define BLOCK_WIDTH 5 #define BLOCK_HEIGHT 32 #endif } // anonymous namespace static __global__ void raytracingKernel( uchar4* color, DefaultPhotonMap const* photonMap, RandomState* random, int width, int height, float timeSeconds) { #if USE_MORTON_ORDER int index = blockIdx.x * blockDim.x + threadIdx.x; int x, y; MortonOrder::indexToCoordinate(index, x, y); #else int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; #endif __shared__ Scene scene; scene.setup(timeSeconds); if(x < width & y < height) { int const index = y * width + x; int const x = index % width; int const y = index / width; RandomState rng(random[index]); // create a local copy rng = random[index]; float3 tone = pixelTone(x, y, width, height, scene.getEyePosition(), scene.getScene(), scene.getLight(), photonMap, rng); random[index] = rng; color[index] = make_uchar4(tone.x, tone.y, tone.z, 0); } } void raytracing( uchar4* color, DefaultPhotonMap const* photonMap, RandomState* random, unsigned int image_width, unsigned int image_height, float timeSeconds) { #if USE_MORTON_ORDER unsigned int temp = max(image_width, image_height); if(temp && (temp & (temp-1))) { temp = nextPow2(temp); } printf("morton size: %dx%d\n", temp, temp); dim3 nBlocks(idiv_ceil(temp*temp, 192), 1); dim3 nThreads(192, 1); #else dim3 nBlocks (idiv_ceil(image_width, BLOCK_WIDTH), idiv_ceil(image_height, BLOCK_HEIGHT)); dim3 nThreads(BLOCK_WIDTH, BLOCK_HEIGHT); #endif cudaFuncSetCacheConfig(raytracingKernel, cudaFuncCachePreferL1); raytracingKernel<<< nBlocks, nThreads >>>(color, photonMap, random, image_width, image_height, timeSeconds); cudaThreadSynchronize(); checkCUDAError("kernel failed!"); }
cc69743d60f80d573ac4eafc78473b60f0750b81.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <unittest/unittest.h> #include <thrust/remove.h> #include <thrust/execution_policy.h> template<typename Iterator, typename T, typename Iterator2> __global__ void remove_kernel(Iterator first, Iterator last, T val, Iterator2 result) { *result = thrust::remove(thrust::seq, first, last, val); } template<typename Iterator, typename Predicate, typename Iterator2> __global__ void remove_if_kernel(Iterator first, Iterator last, Predicate pred, Iterator2 result) { *result = thrust::remove_if(thrust::seq, first, last, pred); } template<typename Iterator1, typename Iterator2, typename Predicate, typename Iterator3> __global__ void remove_if_kernel(Iterator1 first, Iterator1 last, Iterator2 stencil_first, Predicate pred, Iterator3 result) { *result = thrust::remove_if(thrust::seq, first, last, stencil_first, pred); } template<typename Iterator1, typename Iterator2, typename T, typename Iterator3> __global__ void remove_copy_kernel(Iterator1 first, Iterator1 last, Iterator2 result1, T val, Iterator3 result2) { *result2 = thrust::remove_copy(thrust::seq, first, last, result1, val); } template<typename Iterator1, typename Iterator2, typename Predicate, typename Iterator3> __global__ void remove_copy_if_kernel(Iterator1 first, Iterator1 last, Iterator2 result, Predicate pred, Iterator3 result_end) { *result_end = thrust::remove_copy_if(thrust::seq, first, last, result, pred); } template<typename Iterator1, typename Iterator2, typename Iterator3, typename Predicate, typename Iterator4> __global__ void remove_copy_if_kernel(Iterator1 first, Iterator1 last, Iterator2 stencil_first, Iterator3 result, Predicate pred, Iterator4 result_end) { *result_end = thrust::remove_copy_if(thrust::seq, first, last, stencil_first, result, pred); } template<typename T> struct is_even : thrust::unary_function<T,bool> { __host__ __device__ bool operator()(T x) { return (static_cast<unsigned int>(x) & 1) == 0; } }; template<typename T> struct is_true : thrust::unary_function<T,bool> { __host__ __device__ bool operator()(T x) { return x ? true : false; } }; template<typename T> void TestRemoveDeviceSeq(const size_t n) { thrust::host_vector<T> h_data = unittest::random_samples<T>(n); thrust::device_vector<T> d_data = h_data; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<iterator> d_result(1); size_t h_size = thrust::remove(h_data.begin(), h_data.end(), T(0)) - h_data.begin(); hipLaunchKernelGGL(( remove_kernel), dim3(1),dim3(1), 0, 0, d_data.begin(), d_data.end(), T(0), d_result.begin()); size_t d_size = (iterator)d_result[0] - d_data.begin(); ASSERT_EQUAL(h_size, d_size); h_data.resize(h_size); d_data.resize(d_size); ASSERT_EQUAL(h_data, d_data); } DECLARE_VARIABLE_UNITTEST(TestRemoveDeviceSeq); template<typename T> void TestRemoveIfDeviceSeq(const size_t n) { thrust::host_vector<T> h_data = unittest::random_samples<T>(n); thrust::device_vector<T> d_data = h_data; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<iterator> d_result(1); size_t h_size = thrust::remove_if(h_data.begin(), h_data.end(), is_true<T>()) - h_data.begin(); hipLaunchKernelGGL(( remove_if_kernel), dim3(1),dim3(1), 0, 0, d_data.begin(), d_data.end(), is_true<T>(), d_result.begin()); size_t d_size = (iterator)d_result[0] - d_data.begin(); ASSERT_EQUAL(h_size, d_size); h_data.resize(h_size); d_data.resize(d_size); ASSERT_EQUAL(h_data, d_data); } DECLARE_VARIABLE_UNITTEST(TestRemoveIfDeviceSeq); template<typename T> void TestRemoveIfStencilDeviceSeq(const size_t n) { thrust::host_vector<T> h_data = unittest::random_samples<T>(n); thrust::device_vector<T> d_data = h_data; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<iterator> d_result(1); thrust::host_vector<bool> h_stencil = unittest::random_integers<bool>(n); thrust::device_vector<bool> d_stencil = h_stencil; size_t h_size = thrust::remove_if(h_data.begin(), h_data.end(), h_stencil.begin(), is_true<T>()) - h_data.begin(); hipLaunchKernelGGL(( remove_if_kernel), dim3(1),dim3(1), 0, 0, d_data.begin(), d_data.end(), d_stencil.begin(), is_true<T>(), d_result.begin()); size_t d_size = (iterator)d_result[0] - d_data.begin(); ASSERT_EQUAL(h_size, d_size); h_data.resize(h_size); d_data.resize(d_size); ASSERT_EQUAL(h_data, d_data); } DECLARE_VARIABLE_UNITTEST(TestRemoveIfStencilDeviceSeq); template<typename T> void TestRemoveCopyDeviceSeq(const size_t n) { thrust::host_vector<T> h_data = unittest::random_samples<T>(n); thrust::device_vector<T> d_data = h_data; thrust::host_vector<T> h_result(n); thrust::device_vector<T> d_result(n); typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<iterator> d_new_end(1); size_t h_size = thrust::remove_copy(h_data.begin(), h_data.end(), h_result.begin(), T(0)) - h_result.begin(); hipLaunchKernelGGL(( remove_copy_kernel), dim3(1),dim3(1), 0, 0, d_data.begin(), d_data.end(), d_result.begin(), T(0), d_new_end.begin()); size_t d_size = (iterator)d_new_end[0] - d_result.begin(); ASSERT_EQUAL(h_size, d_size); h_result.resize(h_size); d_result.resize(d_size); ASSERT_EQUAL(h_result, d_result); } DECLARE_VARIABLE_UNITTEST(TestRemoveCopyDeviceSeq); template<typename T> void TestRemoveCopyIfDeviceSeq(const size_t n) { thrust::host_vector<T> h_data = unittest::random_samples<T>(n); thrust::device_vector<T> d_data = h_data; thrust::host_vector<T> h_result(n); thrust::device_vector<T> d_result(n); typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<iterator> d_new_end(1); size_t h_size = thrust::remove_copy_if(h_data.begin(), h_data.end(), h_result.begin(), is_true<T>()) - h_result.begin(); hipLaunchKernelGGL(( remove_copy_if_kernel), dim3(1),dim3(1), 0, 0, d_data.begin(), d_data.end(), d_result.begin(), is_true<T>(), d_new_end.begin()); size_t d_size = (iterator)d_new_end[0] - d_result.begin(); ASSERT_EQUAL(h_size, d_size); h_result.resize(h_size); d_result.resize(d_size); ASSERT_EQUAL(h_result, d_result); } DECLARE_VARIABLE_UNITTEST(TestRemoveCopyIfDeviceSeq); template<typename T> void TestRemoveCopyIfStencilDeviceSeq(const size_t n) { thrust::host_vector<T> h_data = unittest::random_samples<T>(n); thrust::device_vector<T> d_data = h_data; thrust::host_vector<T> h_result(n); thrust::device_vector<T> d_result(n); typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<iterator> d_new_end(1); thrust::host_vector<bool> h_stencil = unittest::random_integers<bool>(n); thrust::device_vector<bool> d_stencil = h_stencil; size_t h_size = thrust::remove_copy_if(h_data.begin(), h_data.end(), h_stencil.begin(), h_result.begin(), is_true<T>()) - h_result.begin(); hipLaunchKernelGGL(( remove_copy_if_kernel), dim3(1),dim3(1), 0, 0, d_data.begin(), d_data.end(), d_stencil.begin(), d_result.begin(), is_true<T>(), d_new_end.begin()); size_t d_size = (iterator)d_new_end[0] - d_result.begin(); ASSERT_EQUAL(h_size, d_size); h_result.resize(h_size); d_result.resize(d_size); ASSERT_EQUAL(h_result, d_result); } DECLARE_VARIABLE_UNITTEST(TestRemoveCopyIfStencilDeviceSeq);
cc69743d60f80d573ac4eafc78473b60f0750b81.cu
#include <unittest/unittest.h> #include <thrust/remove.h> #include <thrust/execution_policy.h> template<typename Iterator, typename T, typename Iterator2> __global__ void remove_kernel(Iterator first, Iterator last, T val, Iterator2 result) { *result = thrust::remove(thrust::seq, first, last, val); } template<typename Iterator, typename Predicate, typename Iterator2> __global__ void remove_if_kernel(Iterator first, Iterator last, Predicate pred, Iterator2 result) { *result = thrust::remove_if(thrust::seq, first, last, pred); } template<typename Iterator1, typename Iterator2, typename Predicate, typename Iterator3> __global__ void remove_if_kernel(Iterator1 first, Iterator1 last, Iterator2 stencil_first, Predicate pred, Iterator3 result) { *result = thrust::remove_if(thrust::seq, first, last, stencil_first, pred); } template<typename Iterator1, typename Iterator2, typename T, typename Iterator3> __global__ void remove_copy_kernel(Iterator1 first, Iterator1 last, Iterator2 result1, T val, Iterator3 result2) { *result2 = thrust::remove_copy(thrust::seq, first, last, result1, val); } template<typename Iterator1, typename Iterator2, typename Predicate, typename Iterator3> __global__ void remove_copy_if_kernel(Iterator1 first, Iterator1 last, Iterator2 result, Predicate pred, Iterator3 result_end) { *result_end = thrust::remove_copy_if(thrust::seq, first, last, result, pred); } template<typename Iterator1, typename Iterator2, typename Iterator3, typename Predicate, typename Iterator4> __global__ void remove_copy_if_kernel(Iterator1 first, Iterator1 last, Iterator2 stencil_first, Iterator3 result, Predicate pred, Iterator4 result_end) { *result_end = thrust::remove_copy_if(thrust::seq, first, last, stencil_first, result, pred); } template<typename T> struct is_even : thrust::unary_function<T,bool> { __host__ __device__ bool operator()(T x) { return (static_cast<unsigned int>(x) & 1) == 0; } }; template<typename T> struct is_true : thrust::unary_function<T,bool> { __host__ __device__ bool operator()(T x) { return x ? true : false; } }; template<typename T> void TestRemoveDeviceSeq(const size_t n) { thrust::host_vector<T> h_data = unittest::random_samples<T>(n); thrust::device_vector<T> d_data = h_data; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<iterator> d_result(1); size_t h_size = thrust::remove(h_data.begin(), h_data.end(), T(0)) - h_data.begin(); remove_kernel<<<1,1>>>(d_data.begin(), d_data.end(), T(0), d_result.begin()); size_t d_size = (iterator)d_result[0] - d_data.begin(); ASSERT_EQUAL(h_size, d_size); h_data.resize(h_size); d_data.resize(d_size); ASSERT_EQUAL(h_data, d_data); } DECLARE_VARIABLE_UNITTEST(TestRemoveDeviceSeq); template<typename T> void TestRemoveIfDeviceSeq(const size_t n) { thrust::host_vector<T> h_data = unittest::random_samples<T>(n); thrust::device_vector<T> d_data = h_data; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<iterator> d_result(1); size_t h_size = thrust::remove_if(h_data.begin(), h_data.end(), is_true<T>()) - h_data.begin(); remove_if_kernel<<<1,1>>>(d_data.begin(), d_data.end(), is_true<T>(), d_result.begin()); size_t d_size = (iterator)d_result[0] - d_data.begin(); ASSERT_EQUAL(h_size, d_size); h_data.resize(h_size); d_data.resize(d_size); ASSERT_EQUAL(h_data, d_data); } DECLARE_VARIABLE_UNITTEST(TestRemoveIfDeviceSeq); template<typename T> void TestRemoveIfStencilDeviceSeq(const size_t n) { thrust::host_vector<T> h_data = unittest::random_samples<T>(n); thrust::device_vector<T> d_data = h_data; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<iterator> d_result(1); thrust::host_vector<bool> h_stencil = unittest::random_integers<bool>(n); thrust::device_vector<bool> d_stencil = h_stencil; size_t h_size = thrust::remove_if(h_data.begin(), h_data.end(), h_stencil.begin(), is_true<T>()) - h_data.begin(); remove_if_kernel<<<1,1>>>(d_data.begin(), d_data.end(), d_stencil.begin(), is_true<T>(), d_result.begin()); size_t d_size = (iterator)d_result[0] - d_data.begin(); ASSERT_EQUAL(h_size, d_size); h_data.resize(h_size); d_data.resize(d_size); ASSERT_EQUAL(h_data, d_data); } DECLARE_VARIABLE_UNITTEST(TestRemoveIfStencilDeviceSeq); template<typename T> void TestRemoveCopyDeviceSeq(const size_t n) { thrust::host_vector<T> h_data = unittest::random_samples<T>(n); thrust::device_vector<T> d_data = h_data; thrust::host_vector<T> h_result(n); thrust::device_vector<T> d_result(n); typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<iterator> d_new_end(1); size_t h_size = thrust::remove_copy(h_data.begin(), h_data.end(), h_result.begin(), T(0)) - h_result.begin(); remove_copy_kernel<<<1,1>>>(d_data.begin(), d_data.end(), d_result.begin(), T(0), d_new_end.begin()); size_t d_size = (iterator)d_new_end[0] - d_result.begin(); ASSERT_EQUAL(h_size, d_size); h_result.resize(h_size); d_result.resize(d_size); ASSERT_EQUAL(h_result, d_result); } DECLARE_VARIABLE_UNITTEST(TestRemoveCopyDeviceSeq); template<typename T> void TestRemoveCopyIfDeviceSeq(const size_t n) { thrust::host_vector<T> h_data = unittest::random_samples<T>(n); thrust::device_vector<T> d_data = h_data; thrust::host_vector<T> h_result(n); thrust::device_vector<T> d_result(n); typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<iterator> d_new_end(1); size_t h_size = thrust::remove_copy_if(h_data.begin(), h_data.end(), h_result.begin(), is_true<T>()) - h_result.begin(); remove_copy_if_kernel<<<1,1>>>(d_data.begin(), d_data.end(), d_result.begin(), is_true<T>(), d_new_end.begin()); size_t d_size = (iterator)d_new_end[0] - d_result.begin(); ASSERT_EQUAL(h_size, d_size); h_result.resize(h_size); d_result.resize(d_size); ASSERT_EQUAL(h_result, d_result); } DECLARE_VARIABLE_UNITTEST(TestRemoveCopyIfDeviceSeq); template<typename T> void TestRemoveCopyIfStencilDeviceSeq(const size_t n) { thrust::host_vector<T> h_data = unittest::random_samples<T>(n); thrust::device_vector<T> d_data = h_data; thrust::host_vector<T> h_result(n); thrust::device_vector<T> d_result(n); typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<iterator> d_new_end(1); thrust::host_vector<bool> h_stencil = unittest::random_integers<bool>(n); thrust::device_vector<bool> d_stencil = h_stencil; size_t h_size = thrust::remove_copy_if(h_data.begin(), h_data.end(), h_stencil.begin(), h_result.begin(), is_true<T>()) - h_result.begin(); remove_copy_if_kernel<<<1,1>>>(d_data.begin(), d_data.end(), d_stencil.begin(), d_result.begin(), is_true<T>(), d_new_end.begin()); size_t d_size = (iterator)d_new_end[0] - d_result.begin(); ASSERT_EQUAL(h_size, d_size); h_result.resize(h_size); d_result.resize(d_size); ASSERT_EQUAL(h_result, d_result); } DECLARE_VARIABLE_UNITTEST(TestRemoveCopyIfStencilDeviceSeq);
ef71c2eccbcb28a5d768e8003bb50296fb5ed64e.hip
// !!! This is a file automatically generated by hipify!!! #include <string.h> #include <stdio.h> #include <hip/hip_runtime.h> #define CHECK(call) { \ const hipError_t error = call; \ if (error != hipSuccess) { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code: %d, reason: %s\n", error, hipGetErrorString(error)); \ exit(1); \ } \ } \ // reference: // https://devblogs.nvidia.com/unified-memory-in-cuda-6/ /**********class definition**********/ // Managed Base Class // inherit from this to automatically allocate objects in Unified Memory class Managed { public: void *operator new(size_t len) { void *ptr; CHECK(hipMallocManaged(&ptr, len)); CHECK(hipDeviceSynchronize()); return ptr; } void operator delete(void *ptr) { CHECK(hipDeviceSynchronize()); CHECK(hipFree(ptr)); } }; // String Class for Managed Memory class String : public Managed { private: int length; char *data; public: String() : length(0), data(0) {} // Constructor for C-string initializer String(const char *s) : length(0), data(0) { _realloc(strlen(s)); strcpy(data, s); } // Copy constructor String(const String& s) : length(0), data(0) { _realloc(s.length); strcpy(data, s.data); } ~String() { hipFree(data); } // Assignment operator String& operator = (const char* s) { _realloc(strlen(s)); strcpy(data, s); return *this; } // Element access (from host or device) __host__ __device__ char& operator[](int pos) { return data[pos]; } // C-string access __host__ __device__ const char* c_str() const { return data; } private: void _realloc(int len) { hipFree(data); length = len; CHECK(hipMallocManaged(&data, length+1)); } }; class DataElement : public Managed { public: String name; int value; }; /**********CUDA kernels**********/ __global__ void Kernel_by_pointer(DataElement *elem) { printf("On device by pointer: name=%s, value=%d\n", elem->name.c_str(), elem->value); elem->name[0] = 'p'; elem->value++; } __global__ void Kernel_by_ref(DataElement &elem) { printf("On device by ref: name=%s, value=%d\n", elem.name.c_str(), elem.value); elem.name[0] = 'r'; elem.value++; } __global__ void Kernel_by_value(DataElement elem) { printf("On device by value: name=%s, value=%d\n", elem.name.c_str(), elem.value); elem.name[0] = 'v'; elem.value++; } /**********main function**********/ int main(int argc, char **argv) { DataElement *e = new DataElement; e->value = 10; e->name = "hello"; hipLaunchKernelGGL(( Kernel_by_pointer), dim3(1), dim3(1) , 0, 0, e); CHECK(hipDeviceSynchronize()); printf("On host (after by-pointer): name=%s, value=%d\n\n", e->name.c_str(), e->value); hipLaunchKernelGGL(( Kernel_by_ref), dim3(1), dim3(1) , 0, 0, *e); CHECK(hipDeviceSynchronize()); printf("On host (after by-ref): name=%s, value=%d\n\n", e->name.c_str(), e->value); hipLaunchKernelGGL(( Kernel_by_value), dim3(1), dim3(1) , 0, 0, *e); CHECK(hipDeviceSynchronize()); printf("On host (after by-value): name=%s, value=%d\n\n", e->name.c_str(), e->value); delete e; CHECK(hipDeviceReset()); return 0; }
ef71c2eccbcb28a5d768e8003bb50296fb5ed64e.cu
#include <string.h> #include <stdio.h> #include <cuda_runtime.h> #define CHECK(call) { \ const cudaError_t error = call; \ if (error != cudaSuccess) { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \ exit(1); \ } \ } \ // reference: // https://devblogs.nvidia.com/unified-memory-in-cuda-6/ /**********class definition**********/ // Managed Base Class // inherit from this to automatically allocate objects in Unified Memory class Managed { public: void *operator new(size_t len) { void *ptr; CHECK(cudaMallocManaged(&ptr, len)); CHECK(cudaDeviceSynchronize()); return ptr; } void operator delete(void *ptr) { CHECK(cudaDeviceSynchronize()); CHECK(cudaFree(ptr)); } }; // String Class for Managed Memory class String : public Managed { private: int length; char *data; public: String() : length(0), data(0) {} // Constructor for C-string initializer String(const char *s) : length(0), data(0) { _realloc(strlen(s)); strcpy(data, s); } // Copy constructor String(const String& s) : length(0), data(0) { _realloc(s.length); strcpy(data, s.data); } ~String() { cudaFree(data); } // Assignment operator String& operator = (const char* s) { _realloc(strlen(s)); strcpy(data, s); return *this; } // Element access (from host or device) __host__ __device__ char& operator[](int pos) { return data[pos]; } // C-string access __host__ __device__ const char* c_str() const { return data; } private: void _realloc(int len) { cudaFree(data); length = len; CHECK(cudaMallocManaged(&data, length+1)); } }; class DataElement : public Managed { public: String name; int value; }; /**********CUDA kernels**********/ __global__ void Kernel_by_pointer(DataElement *elem) { printf("On device by pointer: name=%s, value=%d\n", elem->name.c_str(), elem->value); elem->name[0] = 'p'; elem->value++; } __global__ void Kernel_by_ref(DataElement &elem) { printf("On device by ref: name=%s, value=%d\n", elem.name.c_str(), elem.value); elem.name[0] = 'r'; elem.value++; } __global__ void Kernel_by_value(DataElement elem) { printf("On device by value: name=%s, value=%d\n", elem.name.c_str(), elem.value); elem.name[0] = 'v'; elem.value++; } /**********main function**********/ int main(int argc, char **argv) { DataElement *e = new DataElement; e->value = 10; e->name = "hello"; Kernel_by_pointer<<< 1, 1 >>>(e); CHECK(cudaDeviceSynchronize()); printf("On host (after by-pointer): name=%s, value=%d\n\n", e->name.c_str(), e->value); Kernel_by_ref<<< 1, 1 >>>(*e); CHECK(cudaDeviceSynchronize()); printf("On host (after by-ref): name=%s, value=%d\n\n", e->name.c_str(), e->value); Kernel_by_value<<< 1, 1 >>>(*e); CHECK(cudaDeviceSynchronize()); printf("On host (after by-value): name=%s, value=%d\n\n", e->name.c_str(), e->value); delete e; CHECK(cudaDeviceReset()); return 0; }
fd03e4ecd217e531d9ed239f55f6eded90ea2fb9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <assert.h> #define THREADS_PER_BLOCK 768 #define ARRAY_SIZE THREADS_PER_BLOCK * 1024 #define OPTIM 0 static void HandleError(hipError_t error, const char *file, int line) { if (error != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(error), file, line); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__ )) __global__ void reverseArray(int *inArray, int *outArray) { int inOffset = blockDim.x * blockIdx.x; // int inIndex = inOffset + blockIdx.x; // ^ // print'ujac inIndex zauwazylem w gdb, ze // w obrebie bloku z threadu na thread nie // zmienienia sie jego wartosc, a co wiecej: // wychodzimy poza adres pamieci (cuda-memcheck) // np. dla bloku 1023. // int inIndex = inOffset + threadIdx.x; // ^ poprawne przesuniecie // int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x); // int outIndex = outOffset + (blockDim.x - 1 - blockIdx.x); // ^ analogicznie jak wyzej. int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x); int outIndex = outOffset + (blockDim.x - 1 - threadIdx.x); outArray[outIndex] = inArray[inIndex]; } __global__ void reverseArrayOptim(int *inArray, int *outArray) { __shared__ int shared_memory[THREADS_PER_BLOCK]; int inOffset = blockDim.x * blockIdx.x; int inIndex = inOffset + threadIdx.x; shared_memory[blockDim.x - 1 - threadIdx.x] = inArray[inIndex]; // ^ reverse __syncthreads(); int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x); int outIndex = outOffset + threadIdx.x; outArray[outIndex] = shared_memory[threadIdx.x]; } void print_vec(int n, int *array) { printf(" array = { "); for (int i = 0; i < n; ++i) { printf("%d, ", array[i]); } printf("}\n"); } int main(void) { int *hostArray; int *devInArray, *devOutArray; int numBlocks = ARRAY_SIZE / THREADS_PER_BLOCK; size_t memSize = ARRAY_SIZE * sizeof(int); hostArray = (int *)malloc(memSize); for (int i = 0; i < ARRAY_SIZE; i++) { hostArray[i] = i; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); HANDLE_ERROR(hipMalloc((void **)&devInArray, memSize)); HANDLE_ERROR(hipMalloc((void **)&devOutArray, memSize)); HANDLE_ERROR(hipMemcpy(devInArray, hostArray, memSize, hipMemcpyHostToDevice)); dim3 dimGrid(numBlocks); dim3 dimBlock(THREADS_PER_BLOCK); #if OPTIM hipLaunchKernelGGL(( reverseArrayOptim), dim3(dimGrid), dim3(dimBlock), 0, 0, devInArray, devOutArray); #else hipLaunchKernelGGL(( reverseArray), dim3(dimGrid), dim3(dimBlock), 0, 0, devInArray, devOutArray); #endif HANDLE_ERROR(hipMemcpy(hostArray, devOutArray, memSize, hipMemcpyDeviceToHost)); hipEventRecord(stop); hipEventSynchronize(stop); float exec_time = 0; hipEventElapsedTime(&exec_time, start, stop); #if OPTIM printf("[GPU/OPTIM] Execution time (ms): %3.1f\n", exec_time); #else printf("[GPU] Execution time (ms): %3.1f\n", exec_time); #endif for (int i = 0; i < ARRAY_SIZE; i++) { assert(hostArray[i] == ARRAY_SIZE - 1 - i); } HANDLE_ERROR(hipFree(devInArray)); HANDLE_ERROR(hipFree(devOutArray)); free(hostArray); printf("Correct!\n"); return 0; }
fd03e4ecd217e531d9ed239f55f6eded90ea2fb9.cu
#include <stdio.h> #include <assert.h> #define THREADS_PER_BLOCK 768 #define ARRAY_SIZE THREADS_PER_BLOCK * 1024 #define OPTIM 0 static void HandleError(cudaError_t error, const char *file, int line) { if (error != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(error), file, line); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__ )) __global__ void reverseArray(int *inArray, int *outArray) { int inOffset = blockDim.x * blockIdx.x; // int inIndex = inOffset + blockIdx.x; // ^ // print'ujac inIndex zauwazylem w gdb, ze // w obrebie bloku z threadu na thread nie // zmienienia sie jego wartosc, a co wiecej: // wychodzimy poza adres pamieci (cuda-memcheck) // np. dla bloku 1023. // int inIndex = inOffset + threadIdx.x; // ^ poprawne przesuniecie // int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x); // int outIndex = outOffset + (blockDim.x - 1 - blockIdx.x); // ^ analogicznie jak wyzej. int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x); int outIndex = outOffset + (blockDim.x - 1 - threadIdx.x); outArray[outIndex] = inArray[inIndex]; } __global__ void reverseArrayOptim(int *inArray, int *outArray) { __shared__ int shared_memory[THREADS_PER_BLOCK]; int inOffset = blockDim.x * blockIdx.x; int inIndex = inOffset + threadIdx.x; shared_memory[blockDim.x - 1 - threadIdx.x] = inArray[inIndex]; // ^ reverse __syncthreads(); int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x); int outIndex = outOffset + threadIdx.x; outArray[outIndex] = shared_memory[threadIdx.x]; } void print_vec(int n, int *array) { printf(" array = { "); for (int i = 0; i < n; ++i) { printf("%d, ", array[i]); } printf("}\n"); } int main(void) { int *hostArray; int *devInArray, *devOutArray; int numBlocks = ARRAY_SIZE / THREADS_PER_BLOCK; size_t memSize = ARRAY_SIZE * sizeof(int); hostArray = (int *)malloc(memSize); for (int i = 0; i < ARRAY_SIZE; i++) { hostArray[i] = i; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); HANDLE_ERROR(cudaMalloc((void **)&devInArray, memSize)); HANDLE_ERROR(cudaMalloc((void **)&devOutArray, memSize)); HANDLE_ERROR(cudaMemcpy(devInArray, hostArray, memSize, cudaMemcpyHostToDevice)); dim3 dimGrid(numBlocks); dim3 dimBlock(THREADS_PER_BLOCK); #if OPTIM reverseArrayOptim<<<dimGrid, dimBlock>>> (devInArray, devOutArray); #else reverseArray<<<dimGrid, dimBlock>>> (devInArray, devOutArray); #endif HANDLE_ERROR(cudaMemcpy(hostArray, devOutArray, memSize, cudaMemcpyDeviceToHost)); cudaEventRecord(stop); cudaEventSynchronize(stop); float exec_time = 0; cudaEventElapsedTime(&exec_time, start, stop); #if OPTIM printf("[GPU/OPTIM] Execution time (ms): %3.1f\n", exec_time); #else printf("[GPU] Execution time (ms): %3.1f\n", exec_time); #endif for (int i = 0; i < ARRAY_SIZE; i++) { assert(hostArray[i] == ARRAY_SIZE - 1 - i); } HANDLE_ERROR(cudaFree(devInArray)); HANDLE_ERROR(cudaFree(devOutArray)); free(hostArray); printf("Correct!\n"); return 0; }
c3bffb6c6c358f79e3411c5b109f3fd78492b2ab.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "BoxReciprocalGPU.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *gpu_prefact = NULL; hipMalloc(&gpu_prefact, XSIZE*YSIZE); double *gpu_sumRnew = NULL; hipMalloc(&gpu_sumRnew, XSIZE*YSIZE); double *gpu_sumInew = NULL; hipMalloc(&gpu_sumInew, XSIZE*YSIZE); double *gpu_energyRecip = NULL; hipMalloc(&gpu_energyRecip, XSIZE*YSIZE); int imageSize = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( BoxReciprocalGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, gpu_prefact,gpu_sumRnew,gpu_sumInew,gpu_energyRecip,imageSize); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( BoxReciprocalGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, gpu_prefact,gpu_sumRnew,gpu_sumInew,gpu_energyRecip,imageSize); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( BoxReciprocalGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, gpu_prefact,gpu_sumRnew,gpu_sumInew,gpu_energyRecip,imageSize); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c3bffb6c6c358f79e3411c5b109f3fd78492b2ab.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "BoxReciprocalGPU.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *gpu_prefact = NULL; cudaMalloc(&gpu_prefact, XSIZE*YSIZE); double *gpu_sumRnew = NULL; cudaMalloc(&gpu_sumRnew, XSIZE*YSIZE); double *gpu_sumInew = NULL; cudaMalloc(&gpu_sumInew, XSIZE*YSIZE); double *gpu_energyRecip = NULL; cudaMalloc(&gpu_energyRecip, XSIZE*YSIZE); int imageSize = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); BoxReciprocalGPU<<<gridBlock,threadBlock>>>(gpu_prefact,gpu_sumRnew,gpu_sumInew,gpu_energyRecip,imageSize); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { BoxReciprocalGPU<<<gridBlock,threadBlock>>>(gpu_prefact,gpu_sumRnew,gpu_sumInew,gpu_energyRecip,imageSize); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { BoxReciprocalGPU<<<gridBlock,threadBlock>>>(gpu_prefact,gpu_sumRnew,gpu_sumInew,gpu_energyRecip,imageSize); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
05b91c2dcfa069d59322187bfec51fb66a4b0b19.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #define wbCheck(stmt) do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ return -1; \ } \ } while(0) #define TILE_WIDTH 16 // Compute C = A * B __global__ void matrixMultiplyShared(float * A, float * B, float * C, int numARows, int numAColumns, int numBColumns ) { //@@ Insert code to implement matrix multiplication here //@@ You have to use shared memory for this MP __shared__ float sharedA[ TILE_WIDTH ][ TILE_WIDTH ]; __shared__ float sharedB[ TILE_WIDTH ][ TILE_WIDTH ]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * blockDim.y + ty; int Col = bx * blockDim.x + tx; float Cvalue = .0; for( int t = 0; t < (numAColumns-1)/TILE_WIDTH+1; ++t ) { if( Row < numARows && t*TILE_WIDTH+tx < numAColumns ) sharedA[ ty ][ tx ] = A[ Row*numAColumns + t*TILE_WIDTH+tx ]; else sharedA[ ty ][ tx ] = .0; if( t*TILE_WIDTH+ty < numAColumns && Col < numBColumns ) sharedB[ ty ][ tx ] = B[ (t*TILE_WIDTH+ty)*numBColumns + Col ]; else sharedB[ ty ][ tx ] = .0; __syncthreads(); for( int i = 0; i < TILE_WIDTH; ++i ) Cvalue += sharedA[ty][i] * sharedB[i][tx]; __syncthreads(); } if( Row < numARows && Col < numBColumns ) C[Row*numBColumns+Col] = Cvalue; } int main(int argc, char ** argv) { wbArg_t args; float * hostA; // The A matrix float * hostB; // The B matrix float * hostC; // The output C matrix float * deviceA; float * deviceB; float * deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *) wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *) wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; size_t bytesA, bytesB, bytesC; bytesA = numARows*numAColumns*sizeof(float); bytesB = numBRows*numBColumns*sizeof(float); bytesC = numCRows*numCColumns*sizeof(float); //@@ Allocate the hostC matrix hostC = (float*) calloc( numCRows * numCColumns, sizeof(float) ); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here wbCheck(hipMalloc((void**)&deviceA, bytesA)); wbCheck(hipMalloc((void**)&deviceB, bytesB)); wbCheck(hipMalloc((void**)&deviceC, bytesC)); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here wbCheck(hipMemcpy(deviceA,hostA,bytesA,hipMemcpyHostToDevice)); wbCheck(hipMemcpy(deviceB,hostB,bytesB,hipMemcpyHostToDevice)); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 dimGrid( (numBColumns-1)/TILE_WIDTH+1, (numARows-1)/TILE_WIDTH+1, 1 ); dim3 dimBlock( TILE_WIDTH, TILE_WIDTH, 1 ); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here hipLaunchKernelGGL(( matrixMultiplyShared), dim3(dimGrid), dim3(dimBlock) , 0, 0, deviceA, deviceB, deviceC , numARows, numAColumns, numBColumns ); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here wbCheck(hipMemcpy( hostC, deviceC, bytesC, hipMemcpyDeviceToHost )); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here wbCheck( hipFree(deviceA) ); wbCheck( hipFree(deviceB) ); wbCheck( hipFree(deviceC) ); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
05b91c2dcfa069d59322187bfec51fb66a4b0b19.cu
#include <wb.h> #define wbCheck(stmt) do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ return -1; \ } \ } while(0) #define TILE_WIDTH 16 // Compute C = A * B __global__ void matrixMultiplyShared(float * A, float * B, float * C, int numARows, int numAColumns, int numBColumns ) { //@@ Insert code to implement matrix multiplication here //@@ You have to use shared memory for this MP __shared__ float sharedA[ TILE_WIDTH ][ TILE_WIDTH ]; __shared__ float sharedB[ TILE_WIDTH ][ TILE_WIDTH ]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * blockDim.y + ty; int Col = bx * blockDim.x + tx; float Cvalue = .0; for( int t = 0; t < (numAColumns-1)/TILE_WIDTH+1; ++t ) { if( Row < numARows && t*TILE_WIDTH+tx < numAColumns ) sharedA[ ty ][ tx ] = A[ Row*numAColumns + t*TILE_WIDTH+tx ]; else sharedA[ ty ][ tx ] = .0; if( t*TILE_WIDTH+ty < numAColumns && Col < numBColumns ) sharedB[ ty ][ tx ] = B[ (t*TILE_WIDTH+ty)*numBColumns + Col ]; else sharedB[ ty ][ tx ] = .0; __syncthreads(); for( int i = 0; i < TILE_WIDTH; ++i ) Cvalue += sharedA[ty][i] * sharedB[i][tx]; __syncthreads(); } if( Row < numARows && Col < numBColumns ) C[Row*numBColumns+Col] = Cvalue; } int main(int argc, char ** argv) { wbArg_t args; float * hostA; // The A matrix float * hostB; // The B matrix float * hostC; // The output C matrix float * deviceA; float * deviceB; float * deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *) wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *) wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; size_t bytesA, bytesB, bytesC; bytesA = numARows*numAColumns*sizeof(float); bytesB = numBRows*numBColumns*sizeof(float); bytesC = numCRows*numCColumns*sizeof(float); //@@ Allocate the hostC matrix hostC = (float*) calloc( numCRows * numCColumns, sizeof(float) ); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here wbCheck(cudaMalloc((void**)&deviceA, bytesA)); wbCheck(cudaMalloc((void**)&deviceB, bytesB)); wbCheck(cudaMalloc((void**)&deviceC, bytesC)); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here wbCheck(cudaMemcpy(deviceA,hostA,bytesA,cudaMemcpyHostToDevice)); wbCheck(cudaMemcpy(deviceB,hostB,bytesB,cudaMemcpyHostToDevice)); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 dimGrid( (numBColumns-1)/TILE_WIDTH+1, (numARows-1)/TILE_WIDTH+1, 1 ); dim3 dimBlock( TILE_WIDTH, TILE_WIDTH, 1 ); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here matrixMultiplyShared<<< dimGrid, dimBlock >>>( deviceA, deviceB, deviceC , numARows, numAColumns, numBColumns ); cudaThreadSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here wbCheck(cudaMemcpy( hostC, deviceC, bytesC, cudaMemcpyDeviceToHost )); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here wbCheck( cudaFree(deviceA) ); wbCheck( cudaFree(deviceB) ); wbCheck( cudaFree(deviceC) ); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
67e0cc83472d2bd41a8d10f92e2e344b21af7c8c.hip
// !!! This is a file automatically generated by hipify!!! #include "mex.h" #include "gpu/mxGPUArray.h" #include <npp.h> #include <hip/hip_runtime.h> #include <Exceptions.h> #include <helper_cuda.h> #include <math.h> #define MATLAB_ASSERT(expr,msg) if (!(expr)) { mexErrMsgTxt(msg);} #if !defined(MX_API_VER) || MX_API_VER < 0x07030000 typedef size_t mwSize; typedef size_t mwIndex; #endif void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { MATLAB_ASSERT( nrhs == 3, "cropRectanglesMex: Wrong number of input parameters: expected 3"); MATLAB_ASSERT( nlhs == 1, "cropRectanglesMex: Wrong number of output arguments: expected 1"); // Fix input parameter order: const mxArray *imInPtr = (nrhs >= 0) ? prhs[0] : NULL; // image const mxArray *bbInPtr = (nrhs >= 1) ? prhs[1] : NULL; // bounding boxes const mxArray *szInPtr = (nrhs >= 2) ? prhs[2] : NULL; // output image size // Fix output parameter order: mxArray **cropsOutPtr = (nlhs >= 1) ? &plhs[0] : NULL; // croped and resized patches // Get the image MATLAB_ASSERT(mxGetNumberOfDimensions(imInPtr) == 3, "cropRectanglesMex: the image is not 3-dimensional"); MATLAB_ASSERT(mxGetClassID(imInPtr) == mxSINGLE_CLASS, "cropRectanglesMex: the image should be of type SINGLE"); MATLAB_ASSERT(mxGetPi(imInPtr) == NULL, "cropRectanglesMex: image should not be complex"); const mwSize* dimensions = mxGetDimensions(imInPtr); mwSize imageHeight = dimensions[0]; mwSize imageWidth = dimensions[1]; mwSize numChannels = dimensions[2]; MATLAB_ASSERT(numChannels == 3, "cropRectanglesMex: image should contain 3 channels"); float* imageData = (float*) mxGetData(imInPtr); // get bounding boxes MATLAB_ASSERT(mxGetNumberOfDimensions(bbInPtr) == 2, "cropRectanglesMex: <boundingBoxes> input is not 2-dimensional"); MATLAB_ASSERT(mxGetClassID(bbInPtr) == mxDOUBLE_CLASS, "cropRectanglesMex: <boundingBoxes> input is not of type double"); MATLAB_ASSERT(mxGetPi(bbInPtr) == NULL, "cropRectanglesMex: <boundingBoxes> input should not be complex"); MATLAB_ASSERT(mxGetN(bbInPtr) == 4, "cropRectanglesMex: <boundingBoxes> input should be of size #boundingBoxes x 4"); mwSize numBb = mxGetM(bbInPtr); double* bbData = (double*) mxGetData(bbInPtr); // y1, x1, y2, x2 // get output size MATLAB_ASSERT(mxGetNumberOfElements(szInPtr) == 2, "cropRectanglesMex: <outputSize> input should contain 2 numbers"); MATLAB_ASSERT(mxGetClassID(szInPtr) == mxDOUBLE_CLASS, "cropRectanglesMex: <outputSize> input is not of type double"); MATLAB_ASSERT(mxGetPi(szInPtr) == NULL, "cropRectanglesMex: <outputSize> input should not be complex"); double* outputSizeData = (double*) mxGetData(szInPtr); int targetHeight = (int) (outputSizeData[0] + 0.5); int targetWidth = (int) (outputSizeData[1] + 0.5); // initialize GPU mxInitGPU(); // copy image to the GPU mxGPUArray const *inputImage; float const *d_inputImage; inputImage = mxGPUCreateFromMxArray(imInPtr); d_inputImage = (float const *)(mxGPUGetDataReadOnly(inputImage)); // allocate memory for the output mxGPUArray *outputData; float *d_outputData; const mwSize outputDimensions[4] = { targetHeight, targetWidth, numChannels, numBb }; outputData = mxGPUCreateGPUArray(4, outputDimensions, mxSINGLE_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES) ; //MX_GPU_DO_NOT_INITIALIZE); d_outputData = (float *)(mxGPUGetData(outputData)); // initialize some cropping arguments NppiSize nppiImageSize = {}; nppiImageSize.width = imageHeight; // CAUTION: NPPI thinks that the image is transposed nppiImageSize.height = imageWidth; int channelValueSize = sizeof(float); int imageStep = imageHeight * channelValueSize; int targetStep = targetHeight * channelValueSize; NppiRect targetRect = {}; targetRect.x = 0; targetRect.y = 0; targetRect.width = targetHeight; targetRect.height = targetWidth; // the main loop over bounding boxes for(int iBb = 0; iBb < numBb; ++iBb) { double y1 = bbData[ iBb ] - 1; double x1 = bbData[ iBb + numBb ] - 1; double y2 = bbData[ iBb + numBb * 2 ] - 1; double x2 = bbData[ iBb + numBb * 3 ] - 1; double nXFactor = double( targetHeight ) / ( y2 - y1 + 1 ); double nYFactor = double( targetWidth ) / ( x2 - x1 + 1 ); double nXShift = -nXFactor * (double(y1) + 0.5) + 0.5; double nYShift = -nYFactor * (double(x1) + 0.5) + 0.5; NppiRect sourceRect = {}; sourceRect.x = (int) floor(y1); sourceRect.y = (int) floor(x1); sourceRect.width = (int) ceil(y2 - y1 + 1); sourceRect.height = (int) ceil(x2 - x1 + 1); if (sourceRect.width <= 1) { sourceRect.width = 2; } if (sourceRect.height <= 1) { sourceRect.height = 2; } // adjust bounding box bounds if it is outside of the image if (sourceRect.x < 0) { sourceRect.width = sourceRect.width + sourceRect.x; sourceRect.x = 0.0; } if (sourceRect.y < 0) { sourceRect.height = sourceRect.height + sourceRect.y; sourceRect.y = 0.0; } if (sourceRect.width > imageHeight - sourceRect.x + 1) { sourceRect.width = imageHeight - sourceRect.x + 1; } if (sourceRect.height > imageWidth - sourceRect.y + 1) { sourceRect.height = imageWidth - sourceRect.y + 1; } float *curOutput = d_outputData + numChannels * targetHeight * targetWidth * iBb; const float *pSrc[3] = { d_inputImage, d_inputImage + imageHeight * imageWidth, d_inputImage + 2 * imageHeight * imageWidth}; float *pDst[3] = { curOutput, curOutput + targetHeight * targetWidth, curOutput + 2 * targetHeight * targetWidth}; NPP_CHECK_NPP( nppiResizeSqrPixel_32f_P3R ( pSrc, // const Npp32f *pSrc, nppiImageSize, // nppiSize oSrcSize, imageStep, // int nSrcStep, sourceRect, // NppiRect oSrcROI, pDst, // Npp8u *pDst, targetStep, // int nDstStep, targetRect, // NppiRect oDstROI, nXFactor, nYFactor, nXShift, nYShift, NPPI_INTER_CUBIC //int eInterpolation ) ); } *cropsOutPtr = mxGPUCreateMxArrayOnGPU(outputData); // do not forget to free GPU memory mxGPUDestroyGPUArray(outputData); mxGPUDestroyGPUArray(inputImage); }
67e0cc83472d2bd41a8d10f92e2e344b21af7c8c.cu
#include "mex.h" #include "gpu/mxGPUArray.h" #include <npp.h> #include <cuda_runtime.h> #include <Exceptions.h> #include <helper_cuda.h> #include <math.h> #define MATLAB_ASSERT(expr,msg) if (!(expr)) { mexErrMsgTxt(msg);} #if !defined(MX_API_VER) || MX_API_VER < 0x07030000 typedef size_t mwSize; typedef size_t mwIndex; #endif void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { MATLAB_ASSERT( nrhs == 3, "cropRectanglesMex: Wrong number of input parameters: expected 3"); MATLAB_ASSERT( nlhs == 1, "cropRectanglesMex: Wrong number of output arguments: expected 1"); // Fix input parameter order: const mxArray *imInPtr = (nrhs >= 0) ? prhs[0] : NULL; // image const mxArray *bbInPtr = (nrhs >= 1) ? prhs[1] : NULL; // bounding boxes const mxArray *szInPtr = (nrhs >= 2) ? prhs[2] : NULL; // output image size // Fix output parameter order: mxArray **cropsOutPtr = (nlhs >= 1) ? &plhs[0] : NULL; // croped and resized patches // Get the image MATLAB_ASSERT(mxGetNumberOfDimensions(imInPtr) == 3, "cropRectanglesMex: the image is not 3-dimensional"); MATLAB_ASSERT(mxGetClassID(imInPtr) == mxSINGLE_CLASS, "cropRectanglesMex: the image should be of type SINGLE"); MATLAB_ASSERT(mxGetPi(imInPtr) == NULL, "cropRectanglesMex: image should not be complex"); const mwSize* dimensions = mxGetDimensions(imInPtr); mwSize imageHeight = dimensions[0]; mwSize imageWidth = dimensions[1]; mwSize numChannels = dimensions[2]; MATLAB_ASSERT(numChannels == 3, "cropRectanglesMex: image should contain 3 channels"); float* imageData = (float*) mxGetData(imInPtr); // get bounding boxes MATLAB_ASSERT(mxGetNumberOfDimensions(bbInPtr) == 2, "cropRectanglesMex: <boundingBoxes> input is not 2-dimensional"); MATLAB_ASSERT(mxGetClassID(bbInPtr) == mxDOUBLE_CLASS, "cropRectanglesMex: <boundingBoxes> input is not of type double"); MATLAB_ASSERT(mxGetPi(bbInPtr) == NULL, "cropRectanglesMex: <boundingBoxes> input should not be complex"); MATLAB_ASSERT(mxGetN(bbInPtr) == 4, "cropRectanglesMex: <boundingBoxes> input should be of size #boundingBoxes x 4"); mwSize numBb = mxGetM(bbInPtr); double* bbData = (double*) mxGetData(bbInPtr); // y1, x1, y2, x2 // get output size MATLAB_ASSERT(mxGetNumberOfElements(szInPtr) == 2, "cropRectanglesMex: <outputSize> input should contain 2 numbers"); MATLAB_ASSERT(mxGetClassID(szInPtr) == mxDOUBLE_CLASS, "cropRectanglesMex: <outputSize> input is not of type double"); MATLAB_ASSERT(mxGetPi(szInPtr) == NULL, "cropRectanglesMex: <outputSize> input should not be complex"); double* outputSizeData = (double*) mxGetData(szInPtr); int targetHeight = (int) (outputSizeData[0] + 0.5); int targetWidth = (int) (outputSizeData[1] + 0.5); // initialize GPU mxInitGPU(); // copy image to the GPU mxGPUArray const *inputImage; float const *d_inputImage; inputImage = mxGPUCreateFromMxArray(imInPtr); d_inputImage = (float const *)(mxGPUGetDataReadOnly(inputImage)); // allocate memory for the output mxGPUArray *outputData; float *d_outputData; const mwSize outputDimensions[4] = { targetHeight, targetWidth, numChannels, numBb }; outputData = mxGPUCreateGPUArray(4, outputDimensions, mxSINGLE_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES) ; //MX_GPU_DO_NOT_INITIALIZE); d_outputData = (float *)(mxGPUGetData(outputData)); // initialize some cropping arguments NppiSize nppiImageSize = {}; nppiImageSize.width = imageHeight; // CAUTION: NPPI thinks that the image is transposed nppiImageSize.height = imageWidth; int channelValueSize = sizeof(float); int imageStep = imageHeight * channelValueSize; int targetStep = targetHeight * channelValueSize; NppiRect targetRect = {}; targetRect.x = 0; targetRect.y = 0; targetRect.width = targetHeight; targetRect.height = targetWidth; // the main loop over bounding boxes for(int iBb = 0; iBb < numBb; ++iBb) { double y1 = bbData[ iBb ] - 1; double x1 = bbData[ iBb + numBb ] - 1; double y2 = bbData[ iBb + numBb * 2 ] - 1; double x2 = bbData[ iBb + numBb * 3 ] - 1; double nXFactor = double( targetHeight ) / ( y2 - y1 + 1 ); double nYFactor = double( targetWidth ) / ( x2 - x1 + 1 ); double nXShift = -nXFactor * (double(y1) + 0.5) + 0.5; double nYShift = -nYFactor * (double(x1) + 0.5) + 0.5; NppiRect sourceRect = {}; sourceRect.x = (int) floor(y1); sourceRect.y = (int) floor(x1); sourceRect.width = (int) ceil(y2 - y1 + 1); sourceRect.height = (int) ceil(x2 - x1 + 1); if (sourceRect.width <= 1) { sourceRect.width = 2; } if (sourceRect.height <= 1) { sourceRect.height = 2; } // adjust bounding box bounds if it is outside of the image if (sourceRect.x < 0) { sourceRect.width = sourceRect.width + sourceRect.x; sourceRect.x = 0.0; } if (sourceRect.y < 0) { sourceRect.height = sourceRect.height + sourceRect.y; sourceRect.y = 0.0; } if (sourceRect.width > imageHeight - sourceRect.x + 1) { sourceRect.width = imageHeight - sourceRect.x + 1; } if (sourceRect.height > imageWidth - sourceRect.y + 1) { sourceRect.height = imageWidth - sourceRect.y + 1; } float *curOutput = d_outputData + numChannels * targetHeight * targetWidth * iBb; const float *pSrc[3] = { d_inputImage, d_inputImage + imageHeight * imageWidth, d_inputImage + 2 * imageHeight * imageWidth}; float *pDst[3] = { curOutput, curOutput + targetHeight * targetWidth, curOutput + 2 * targetHeight * targetWidth}; NPP_CHECK_NPP( nppiResizeSqrPixel_32f_P3R ( pSrc, // const Npp32f *pSrc, nppiImageSize, // nppiSize oSrcSize, imageStep, // int nSrcStep, sourceRect, // NppiRect oSrcROI, pDst, // Npp8u *pDst, targetStep, // int nDstStep, targetRect, // NppiRect oDstROI, nXFactor, nYFactor, nXShift, nYShift, NPPI_INTER_CUBIC //int eInterpolation ) ); } *cropsOutPtr = mxGPUCreateMxArrayOnGPU(outputData); // do not forget to free GPU memory mxGPUDestroyGPUArray(outputData); mxGPUDestroyGPUArray(inputImage); }
9bb4ce9d0b0325bb6d47e08eb2f3bc7e1d51ce6a.hip
// !!! This is a file automatically generated by hipify!!! // Copyright 2019 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "qtrajectory_testfixture.h" #include <rocblas.h> #include <custatevec.h> #include "gtest/gtest.h" #include "../lib/simulator_custatevec.h" namespace qsim { template <typename FP> struct Factory { using fp_type = FP; using Simulator = qsim::SimulatorCuStateVec<fp_type>; using StateSpace = typename Simulator::StateSpace; Factory() { ErrorCheck(hipblasCreate(&cublas_handle)); ErrorCheck(custatevecCreate(&custatevec_handle)); } ~Factory() { ErrorCheck(hipblasDestroy(cublas_handle)); ErrorCheck(custatevecDestroy(custatevec_handle)); } StateSpace CreateStateSpace() const { return StateSpace(cublas_handle, custatevec_handle); } Simulator CreateSimulator() const { return Simulator(custatevec_handle); } hipblasHandle_t cublas_handle; custatevecHandle_t custatevec_handle; }; TEST(QTrajectoryCuStateVecTest, BitFlip) { TestBitFlip(qsim::Factory<float>()); } TEST(QTrajectoryCuStateVecTest, GenDump) { TestGenDump(qsim::Factory<float>()); } TEST(QTrajectoryCuStateVecTest, ReusingResults) { TestReusingResults(qsim::Factory<float>()); } TEST(QTrajectoryCuStateVecTest, CollectKopStat) { TestCollectKopStat(qsim::Factory<float>()); } TEST(QTrajectoryCuStateVecTest, CleanCircuit) { TestCleanCircuit(qsim::Factory<float>()); } TEST(QTrajectoryCuStateVecTest, InitialState) { TestInitialState(qsim::Factory<float>()); } TEST(QTrajectoryCuStateVecTest, UncomputeFinalState) { TestUncomputeFinalState(qsim::Factory<float>()); } } // namespace qsim int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
9bb4ce9d0b0325bb6d47e08eb2f3bc7e1d51ce6a.cu
// Copyright 2019 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "qtrajectory_testfixture.h" #include <cublas_v2.h> #include <custatevec.h> #include "gtest/gtest.h" #include "../lib/simulator_custatevec.h" namespace qsim { template <typename FP> struct Factory { using fp_type = FP; using Simulator = qsim::SimulatorCuStateVec<fp_type>; using StateSpace = typename Simulator::StateSpace; Factory() { ErrorCheck(cublasCreate(&cublas_handle)); ErrorCheck(custatevecCreate(&custatevec_handle)); } ~Factory() { ErrorCheck(cublasDestroy(cublas_handle)); ErrorCheck(custatevecDestroy(custatevec_handle)); } StateSpace CreateStateSpace() const { return StateSpace(cublas_handle, custatevec_handle); } Simulator CreateSimulator() const { return Simulator(custatevec_handle); } cublasHandle_t cublas_handle; custatevecHandle_t custatevec_handle; }; TEST(QTrajectoryCuStateVecTest, BitFlip) { TestBitFlip(qsim::Factory<float>()); } TEST(QTrajectoryCuStateVecTest, GenDump) { TestGenDump(qsim::Factory<float>()); } TEST(QTrajectoryCuStateVecTest, ReusingResults) { TestReusingResults(qsim::Factory<float>()); } TEST(QTrajectoryCuStateVecTest, CollectKopStat) { TestCollectKopStat(qsim::Factory<float>()); } TEST(QTrajectoryCuStateVecTest, CleanCircuit) { TestCleanCircuit(qsim::Factory<float>()); } TEST(QTrajectoryCuStateVecTest, InitialState) { TestInitialState(qsim::Factory<float>()); } TEST(QTrajectoryCuStateVecTest, UncomputeFinalState) { TestUncomputeFinalState(qsim::Factory<float>()); } } // namespace qsim int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
d614dc29ea5d250ec79ac3bd4740c1cba20b92e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #include<stdint.h> #include<stdlib.h> #include<cuda.h> #define WID 1024 #define HEI 1024 #pragma pack(push,1) typedef struct tagBITMAPFILEHEADER { unsigned short bfType; uint32_t bfSize; unsigned short bfReserved1; unsigned short bfReserved2; uint32_t bf0ffBits; }BITMAPFILEHEADER; #pragma pack(pop) typedef struct tagBITMAPINFOHEADER { uint32_t biSize; int32_t biWidth; int32_t biHeight; unsigned short biPlanes; unsigned short biBitCount; uint32_t biCompression; uint32_t biSizeImage; int32_t biXPelsPerMeter; int32_t biYPelsPerMeter; uint32_t biCirUsed; uint32_t biCirImportant; }BITMAPINFOHEADER; typedef struct tagRGBQUAD { unsigned char rgbBlue; unsigned char rgbGreen; unsigned char rgbRed; unsigned char rgbReserved; }RGBQUAD; typedef struct tagBITMAPINFO { BITMAPINFOHEADER bmiHeader; RGBQUAD bmiColors[1]; }BITMAPINFO; __global__ void distance_gpu(int *x_d,int *y_d,double *z_d,double *img_buf_d,int *tensuu_d) { int i,j,k; i=blockIdx.x*128+threadIdx.x; double hatyou,goukei; hatyou=0.0633; goukei=M_PI/hatyou; for(j=0;j<WID;j++){ for(k=0;k<*tensuu_d;k++){ img_buf_d[i*WID+j]=img_buf_d[i*WID+j]+cos(goukei*((j-x_d[k])*(j-x_d[k])+(i-y_d[k])*(i-y_d[k]))/z_d[k]); } } } int main(){ int tensuu; BITMAPFILEHEADER BmpFileHeader; BITMAPINFOHEADER BmpInfoHeader; RGBQUAD RGBQuad[256]; FILE *fp; int i,j; BmpFileHeader.bfType =19778; BmpFileHeader.bfSize =14+40+1024+(WID*HEI); BmpFileHeader.bfReserved1 =0; BmpFileHeader.bfReserved2 =0; BmpFileHeader.bf0ffBits =14+40+1024; BmpInfoHeader.biSize =40; BmpInfoHeader.biWidth =WID; BmpInfoHeader.biHeight =HEI; BmpInfoHeader.biPlanes =1; BmpInfoHeader.biBitCount =8; //256 BmpInfoHeader.biCompression =0L; BmpInfoHeader.biSizeImage =0L; BmpInfoHeader.biXPelsPerMeter =0L; BmpInfoHeader.biYPelsPerMeter =0L; BmpInfoHeader.biCirUsed =0L; BmpInfoHeader.biCirImportant =0L; for(i=0;i<256;i++){ RGBQuad[i].rgbBlue =i; RGBQuad[i].rgbGreen =i; RGBQuad[i].rgbRed =i; RGBQuad[i].rgbReserved =0; } char filename[20]={}; printf(" : "); scanf("%s",filename); fp=fopen(filename,"rb"); if(fp==NULL){ printf("\n"); } fread(&tensuu,sizeof(int),1,fp); printf("%d\n",tensuu); int x[tensuu]; int y[tensuu]; double z[tensuu]; int *tensuu_d; hipMalloc((void**)&tensuu_d,sizeof(int)); hipMemcpy(tensuu_d,&tensuu,sizeof(int),hipMemcpyHostToDevice); int *x_d,*y_d; double *z_d; double *img_buf_d; dim3 blocks(8,1,1); dim3 threads(128,1,1); int x_buf,y_buf,z_buf; for(i=0;i<tensuu;i++){ fread(&x_buf,sizeof(int),1,fp); fread(&y_buf,sizeof(int),1,fp); fread(&z_buf,sizeof(int),1,fp); x[i]=x_buf*40+512; y[i]=y_buf*40+512; z[i]=((double)z_buf)*40+100000.0; } fclose(fp); hipMalloc((void**)&x_d,tensuu*sizeof(int)); hipMalloc((void**)&y_d,tensuu*sizeof(int)); hipMalloc((void**)&z_d,tensuu*sizeof(double)); hipMalloc((void**)&img_buf_d,WID*HEI*sizeof(double)); double *img_buf; img_buf=(double *)malloc(sizeof(double)*WID*HEI); for(i=0;i<WID*HEI;i++){ img_buf[i]=0.0; } hipMemcpy(x_d,x,tensuu*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(y_d,y,tensuu*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(z_d,z,tensuu*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(img_buf_d,img_buf,WID*HEI*sizeof(double),hipMemcpyHostToDevice); hipLaunchKernelGGL(( distance_gpu), dim3(blocks),dim3(threads), 0, 0, x_d,y_d,z_d,img_buf_d,tensuu_d); hipMemcpy(img_buf,img_buf_d,WID*HEI*sizeof(double),hipMemcpyDeviceToHost); double min,max,mid; min=img_buf[0]; max=img_buf[0]; for(i=0;i<HEI;i++){ for(j=0;j<WID;j++){ if(min>img_buf[i*WID+j]){ min=img_buf[i*WID+j]; } if(max<img_buf[i*WID+j]){ max=img_buf[i*WID+j]; } } } mid=0.5*(min+max); printf("min = %lf max = %lf mid = %lf\n",min,max,mid); unsigned char *img; img=(unsigned char *)malloc(sizeof(unsigned char)*WID*HEI); for(i=0;i<WID*HEI;i++){ if(img_buf[i]<mid){ img[i]=0; } if(img_buf[i]>mid){ img[i]=255; } } FILE *fp1; fp1=fopen("cgh_fresnel_gpu.bmp","wb"); if(fp1==NULL){ printf("\n"); } fwrite(&BmpFileHeader, sizeof(BmpFileHeader) , 1 ,fp1); fwrite(&BmpInfoHeader, sizeof(BmpInfoHeader) , 1 ,fp1); fwrite(&RGBQuad[0], sizeof(RGBQuad[0]) , 256 ,fp1); fwrite(img,sizeof(unsigned char),WID*HEI,fp1); free(img); free(img_buf); fclose(fp1); hipFree(tensuu_d); hipFree(x_d); hipFree(y_d); hipFree(z_d); hipFree(img_buf_d); return 0; }
d614dc29ea5d250ec79ac3bd4740c1cba20b92e2.cu
#include <stdio.h> #include <math.h> #include<stdint.h> #include<stdlib.h> #include<cuda.h> #define WID 1024 #define HEI 1024 #pragma pack(push,1) typedef struct tagBITMAPFILEHEADER { unsigned short bfType; uint32_t bfSize; unsigned short bfReserved1; unsigned short bfReserved2; uint32_t bf0ffBits; }BITMAPFILEHEADER; #pragma pack(pop) typedef struct tagBITMAPINFOHEADER { uint32_t biSize; int32_t biWidth; int32_t biHeight; unsigned short biPlanes; unsigned short biBitCount; uint32_t biCompression; uint32_t biSizeImage; int32_t biXPelsPerMeter; int32_t biYPelsPerMeter; uint32_t biCirUsed; uint32_t biCirImportant; }BITMAPINFOHEADER; typedef struct tagRGBQUAD { unsigned char rgbBlue; unsigned char rgbGreen; unsigned char rgbRed; unsigned char rgbReserved; }RGBQUAD; typedef struct tagBITMAPINFO { BITMAPINFOHEADER bmiHeader; RGBQUAD bmiColors[1]; }BITMAPINFO; __global__ void distance_gpu(int *x_d,int *y_d,double *z_d,double *img_buf_d,int *tensuu_d) { int i,j,k; i=blockIdx.x*128+threadIdx.x; double hatyou,goukei; hatyou=0.0633; goukei=M_PI/hatyou; for(j=0;j<WID;j++){ for(k=0;k<*tensuu_d;k++){ img_buf_d[i*WID+j]=img_buf_d[i*WID+j]+cos(goukei*((j-x_d[k])*(j-x_d[k])+(i-y_d[k])*(i-y_d[k]))/z_d[k]); } } } int main(){ int tensuu; BITMAPFILEHEADER BmpFileHeader; BITMAPINFOHEADER BmpInfoHeader; RGBQUAD RGBQuad[256]; FILE *fp; int i,j; BmpFileHeader.bfType =19778; BmpFileHeader.bfSize =14+40+1024+(WID*HEI); BmpFileHeader.bfReserved1 =0; BmpFileHeader.bfReserved2 =0; BmpFileHeader.bf0ffBits =14+40+1024; BmpInfoHeader.biSize =40; BmpInfoHeader.biWidth =WID; BmpInfoHeader.biHeight =HEI; BmpInfoHeader.biPlanes =1; BmpInfoHeader.biBitCount =8; //256階調 BmpInfoHeader.biCompression =0L; BmpInfoHeader.biSizeImage =0L; BmpInfoHeader.biXPelsPerMeter =0L; BmpInfoHeader.biYPelsPerMeter =0L; BmpInfoHeader.biCirUsed =0L; BmpInfoHeader.biCirImportant =0L; for(i=0;i<256;i++){ RGBQuad[i].rgbBlue =i; RGBQuad[i].rgbGreen =i; RGBQuad[i].rgbRed =i; RGBQuad[i].rgbReserved =0; } char filename[20]={}; printf("ファイル名を入力してください : "); scanf("%s",filename); fp=fopen(filename,"rb"); if(fp==NULL){ printf("ファイルオープンエラー\n"); } fread(&tensuu,sizeof(int),1,fp); printf("物体点数は%dです\n",tensuu); int x[tensuu]; int y[tensuu]; double z[tensuu]; int *tensuu_d; cudaMalloc((void**)&tensuu_d,sizeof(int)); cudaMemcpy(tensuu_d,&tensuu,sizeof(int),cudaMemcpyHostToDevice); int *x_d,*y_d; double *z_d; double *img_buf_d; dim3 blocks(8,1,1); dim3 threads(128,1,1); int x_buf,y_buf,z_buf; for(i=0;i<tensuu;i++){ fread(&x_buf,sizeof(int),1,fp); fread(&y_buf,sizeof(int),1,fp); fread(&z_buf,sizeof(int),1,fp); x[i]=x_buf*40+512; y[i]=y_buf*40+512; z[i]=((double)z_buf)*40+100000.0; } fclose(fp); cudaMalloc((void**)&x_d,tensuu*sizeof(int)); cudaMalloc((void**)&y_d,tensuu*sizeof(int)); cudaMalloc((void**)&z_d,tensuu*sizeof(double)); cudaMalloc((void**)&img_buf_d,WID*HEI*sizeof(double)); double *img_buf; img_buf=(double *)malloc(sizeof(double)*WID*HEI); for(i=0;i<WID*HEI;i++){ img_buf[i]=0.0; } cudaMemcpy(x_d,x,tensuu*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(y_d,y,tensuu*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(z_d,z,tensuu*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(img_buf_d,img_buf,WID*HEI*sizeof(double),cudaMemcpyHostToDevice); distance_gpu<<<blocks,threads>>>(x_d,y_d,z_d,img_buf_d,tensuu_d); cudaMemcpy(img_buf,img_buf_d,WID*HEI*sizeof(double),cudaMemcpyDeviceToHost); double min,max,mid; min=img_buf[0]; max=img_buf[0]; for(i=0;i<HEI;i++){ for(j=0;j<WID;j++){ if(min>img_buf[i*WID+j]){ min=img_buf[i*WID+j]; } if(max<img_buf[i*WID+j]){ max=img_buf[i*WID+j]; } } } mid=0.5*(min+max); printf("min = %lf max = %lf mid = %lf\n",min,max,mid); unsigned char *img; img=(unsigned char *)malloc(sizeof(unsigned char)*WID*HEI); for(i=0;i<WID*HEI;i++){ if(img_buf[i]<mid){ img[i]=0; } if(img_buf[i]>mid){ img[i]=255; } } FILE *fp1; fp1=fopen("cgh_fresnel_gpu.bmp","wb"); if(fp1==NULL){ printf("ファイルオープンエラー\n"); } fwrite(&BmpFileHeader, sizeof(BmpFileHeader) , 1 ,fp1); fwrite(&BmpInfoHeader, sizeof(BmpInfoHeader) , 1 ,fp1); fwrite(&RGBQuad[0], sizeof(RGBQuad[0]) , 256 ,fp1); fwrite(img,sizeof(unsigned char),WID*HEI,fp1); free(img); free(img_buf); fclose(fp1); cudaFree(tensuu_d); cudaFree(x_d); cudaFree(y_d); cudaFree(z_d); cudaFree(img_buf_d); return 0; }
ee3e4c6754993b5d64116e13056115463c4164e0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "softmaxloss.h" __global__ void crossbowKernelSoftMaxLossCompute ( const int nthreads, const float* prob_data, const int* label, float* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, float* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = label[n * spatial_dim + s]; if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], FLT_MIN)); counts[index] = 1; } } } void crossbowKernelSoftMaxLoss (void *args) { /* Kernel configuration parameters */ int ignorelabelvalue; bool ignorelabel; int outer, inner, dim, nthreads; float alpha; float *C; /* Input and output variables */ crossbowVariableP theInput, theLabels; crossbowDataBufferP input, labels; crossbowDataBufferP output; int labels_offset; /* Local variables */ crossbowDataBufferP losses, counts; int losses_length, counts_length; /* struct hipPointerAttribute_t attributes; */ crossbowStreamP s = (crossbowStreamP) args; /* checkCublasStatus(hipblasSetStream (s->cublasHandle, s->stream)); */ /* Get input variable */ theInput = (crossbowVariableP) s->op->kernel->inputs[0]; if (crossbowDataflowMostUpstream(s->dataflow, s->op)) illegalStateException(); input = crossbowStreamGetCurrentInput (s); /* Get labels */ theLabels = (crossbowVariableP) s->op->kernel->inputs[1]; labels = crossbowVariableGetDataBuffer (s->labels, &labels_offset, NULL); /* checkCudaErrors(hipPointerGetAttributes (&attributes, labels->dev)); info("labels->dev at %p: device %d device pointer %p host pointer %p managed %d\n", labels->dev, attributes.device, attributes.devicePointer, attributes.hostPointer, attributes.isManaged); */ /* Get read-write local variables */ losses = crossbowLocalVariableGetDataBuffer ((crossbowLocalVariableP) crossbowArrayListGet (s->op->kernel->variables, 0), s->deviceId, s->id, NULL, &losses_length); counts = crossbowLocalVariableGetDataBuffer ((crossbowLocalVariableP) crossbowArrayListGet (s->op->kernel->variables, 1), s->deviceId, s->id, NULL, &counts_length); /* Get an output buffer */ output = crossbowStreamGetCurrentOutput (s); /* Get kernel configuration parameters */ ignorelabelvalue = crossbowKernelConfigParamGetIntValue ((crossbowKernelConfigParamP) crossbowArrayListGet(s->op->kernel->parameters, 0)); ignorelabel = (ignorelabelvalue >= 0); nthreads = theLabels->schema->elements; outer = crossbowVariableSchemaCountElementsInRange (theInput->schema, 0, 1); inner = crossbowVariableSchemaCountElementsFrom (theInput->schema, 2); dim = theInput->schema->elements / outer; #ifndef KERNEL_NOOP hipLaunchKernelGGL(( crossbowKernelSoftMaxLossCompute), dim3(GET_BLOCKS(nthreads)), dim3(CUDA_NUM_THREADS), 0, s->stream[s->op->branch], nthreads, (float *) (input->dev), (int *) (labels->dev) + (labels_offset / 4), (float *) (losses->dev), outer, dim, inner, ignorelabel, ignorelabelvalue, (float *) (counts->dev)); #else /* Subterfuge unused parameter warnings */ UNUSED (nthreads); UNUSED (input); UNUSED (labels); UNUSED (labels_offset); UNUSED (losses); UNUSED (outer); UNUSED (dim); UNUSED (inner); UNUSED (ignorelabel); UNUSED (ignorelabelvalue); UNUSED (counts); #endif alpha = 1 / (float) nthreads; #ifndef CUBLAS_NOOP checkCublasStatus(hipblasSscal (s->cublasHandle[s->op->branch], nthreads, &alpha, (float *) losses->dev, 1)); #else /* Subterfuge unused parameter warnings */ UNUSED (nthreads); UNUSED (alpha); UNUSED (losses); #endif C = (float *) output->dev; /* checkCudaErrors(hipPointerGetAttributes (&attributes, output->dev)); dbg("output->dev at %p: device %d device pointer %p host pointer %p managed %d\n", output->dev, attributes.device, attributes.devicePointer, attributes.hostPointer, attributes.isManaged); checkCudaErrors(hipPointerGetAttributes (&attributes, losses->dev)); dbg("losses->dev at %p: device %d device pointer %p host pointer %p managed %d\n", losses->dev, attributes.device, attributes.devicePointer, attributes.hostPointer, attributes.isManaged); */ #ifndef CUBLAS_NOOP checkCublasStatus(hipblasSetPointerMode(s->cublasHandle[s->op->branch], HIPBLAS_POINTER_MODE_DEVICE)); checkCublasStatus(hipblasSasum (s->cublasHandle[s->op->branch], nthreads, (float *) losses->dev, 1, C)); checkCublasStatus(hipblasSetPointerMode(s->cublasHandle[s->op->branch], HIPBLAS_POINTER_MODE_HOST)); #else /* Subterfuge unused parameter warnings */ UNUSED (nthreads); UNUSED (losses); UNUSED (C); #endif /* Store output in stream */ crossbowListAppend (s->outputs[s->op->id], output); /* Return read-write local variables to kernel when the dataflow execution completes */ crossbowListAppend (s->locals[s->op->id], counts); crossbowListAppend (s->locals[s->op->id], losses); return; }
ee3e4c6754993b5d64116e13056115463c4164e0.cu
#include "softmaxloss.h" __global__ void crossbowKernelSoftMaxLossCompute ( const int nthreads, const float* prob_data, const int* label, float* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, float* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = label[n * spatial_dim + s]; if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], FLT_MIN)); counts[index] = 1; } } } void crossbowKernelSoftMaxLoss (void *args) { /* Kernel configuration parameters */ int ignorelabelvalue; bool ignorelabel; int outer, inner, dim, nthreads; float alpha; float *C; /* Input and output variables */ crossbowVariableP theInput, theLabels; crossbowDataBufferP input, labels; crossbowDataBufferP output; int labels_offset; /* Local variables */ crossbowDataBufferP losses, counts; int losses_length, counts_length; /* struct cudaPointerAttributes attributes; */ crossbowStreamP s = (crossbowStreamP) args; /* checkCublasStatus(cublasSetStream (s->cublasHandle, s->stream)); */ /* Get input variable */ theInput = (crossbowVariableP) s->op->kernel->inputs[0]; if (crossbowDataflowMostUpstream(s->dataflow, s->op)) illegalStateException(); input = crossbowStreamGetCurrentInput (s); /* Get labels */ theLabels = (crossbowVariableP) s->op->kernel->inputs[1]; labels = crossbowVariableGetDataBuffer (s->labels, &labels_offset, NULL); /* checkCudaErrors(cudaPointerGetAttributes (&attributes, labels->dev)); info("labels->dev at %p: device %d device pointer %p host pointer %p managed %d\n", labels->dev, attributes.device, attributes.devicePointer, attributes.hostPointer, attributes.isManaged); */ /* Get read-write local variables */ losses = crossbowLocalVariableGetDataBuffer ((crossbowLocalVariableP) crossbowArrayListGet (s->op->kernel->variables, 0), s->deviceId, s->id, NULL, &losses_length); counts = crossbowLocalVariableGetDataBuffer ((crossbowLocalVariableP) crossbowArrayListGet (s->op->kernel->variables, 1), s->deviceId, s->id, NULL, &counts_length); /* Get an output buffer */ output = crossbowStreamGetCurrentOutput (s); /* Get kernel configuration parameters */ ignorelabelvalue = crossbowKernelConfigParamGetIntValue ((crossbowKernelConfigParamP) crossbowArrayListGet(s->op->kernel->parameters, 0)); ignorelabel = (ignorelabelvalue >= 0); nthreads = theLabels->schema->elements; outer = crossbowVariableSchemaCountElementsInRange (theInput->schema, 0, 1); inner = crossbowVariableSchemaCountElementsFrom (theInput->schema, 2); dim = theInput->schema->elements / outer; #ifndef KERNEL_NOOP crossbowKernelSoftMaxLossCompute<<<GET_BLOCKS(nthreads), CUDA_NUM_THREADS, 0, s->stream[s->op->branch]>>>( nthreads, (float *) (input->dev), (int *) (labels->dev) + (labels_offset / 4), (float *) (losses->dev), outer, dim, inner, ignorelabel, ignorelabelvalue, (float *) (counts->dev)); #else /* Subterfuge unused parameter warnings */ UNUSED (nthreads); UNUSED (input); UNUSED (labels); UNUSED (labels_offset); UNUSED (losses); UNUSED (outer); UNUSED (dim); UNUSED (inner); UNUSED (ignorelabel); UNUSED (ignorelabelvalue); UNUSED (counts); #endif alpha = 1 / (float) nthreads; #ifndef CUBLAS_NOOP checkCublasStatus(cublasSscal (s->cublasHandle[s->op->branch], nthreads, &alpha, (float *) losses->dev, 1)); #else /* Subterfuge unused parameter warnings */ UNUSED (nthreads); UNUSED (alpha); UNUSED (losses); #endif C = (float *) output->dev; /* checkCudaErrors(cudaPointerGetAttributes (&attributes, output->dev)); dbg("output->dev at %p: device %d device pointer %p host pointer %p managed %d\n", output->dev, attributes.device, attributes.devicePointer, attributes.hostPointer, attributes.isManaged); checkCudaErrors(cudaPointerGetAttributes (&attributes, losses->dev)); dbg("losses->dev at %p: device %d device pointer %p host pointer %p managed %d\n", losses->dev, attributes.device, attributes.devicePointer, attributes.hostPointer, attributes.isManaged); */ #ifndef CUBLAS_NOOP checkCublasStatus(cublasSetPointerMode(s->cublasHandle[s->op->branch], CUBLAS_POINTER_MODE_DEVICE)); checkCublasStatus(cublasSasum (s->cublasHandle[s->op->branch], nthreads, (float *) losses->dev, 1, C)); checkCublasStatus(cublasSetPointerMode(s->cublasHandle[s->op->branch], CUBLAS_POINTER_MODE_HOST)); #else /* Subterfuge unused parameter warnings */ UNUSED (nthreads); UNUSED (losses); UNUSED (C); #endif /* Store output in stream */ crossbowListAppend (s->outputs[s->op->id], output); /* Return read-write local variables to kernel when the dataflow execution completes */ crossbowListAppend (s->locals[s->op->id], counts); crossbowListAppend (s->locals[s->op->id], losses); return; }
771135d522b41f2c1b644b31224802837e0582e3.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <hip/hip_runtime.h> #include <stdio.h> #include <cassert> #include <hipcub/hipcub.hpp> // NOLINT #include <vector> #include "glog/logging.h" #include "paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { template <typename T> __global__ void SliceKernel(int num, int dims, const T *input, const int *offsets_info, T *output) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ int shared_data[]; for (int i = threadIdx.x; i < dims * 3; i += blockDim.x) { shared_data[i] = offsets_info[i]; } __syncthreads(); if (idx < num) { int t_idx = idx; int in_idx = 0; for (int i = dims - 1; i >= 0; i--) { // output_shape auto t = t_idx % shared_data[i * 3 + 1]; // out offset auto s = t + shared_data[i * 3]; // input_seg_offset in_idx = in_idx + shared_data[i * 3 + 2] * s; t_idx = t_idx / shared_data[i * 3 + 1]; } output[idx] = input[in_idx]; } } SlicePlugin::SlicePlugin(std::vector<int> starts, std::vector<int> ends, std::vector<int> axes, bool with_fp16) : starts_(starts), ends_(ends), axes_(axes) { with_fp16_ = with_fp16; hipEventCreate(&copy_event_); hipStreamCreate(&copy_stream_); } SlicePlugin::SlicePlugin(void const *serial_data, size_t serial_length) { deserializeBase(serial_data, serial_length); DeserializeValue(&serial_data, &serial_length, &starts_); DeserializeValue(&serial_data, &serial_length, &ends_); DeserializeValue(&serial_data, &serial_length, &axes_); DeserializeValue(&serial_data, &serial_length, &with_fp16_); hipEventCreate(&copy_event_); hipStreamCreate(&copy_stream_); } SlicePlugin::~SlicePlugin() { hipStreamDestroy(copy_stream_); hipEventDestroy(copy_event_); hipFree(offset_temp_data_); } SlicePlugin *SlicePlugin::clone() const TRT_NOEXCEPT { return new SlicePlugin(starts_, ends_, axes_, with_fp16_); } bool SlicePlugin::supportsFormat( nvinfer1::DataType type, nvinfer1::PluginFormat format) const TRT_NOEXCEPT { if (with_fp16_) { return (( #if IS_TRT_VERSION_LT(8000) type == nvinfer1::DataType::kFLOAT || #endif type == nvinfer1::DataType::kHALF) && (format == nvinfer1::PluginFormat::kLINEAR)); } else { return ((type == nvinfer1::DataType::kFLOAT) && (format == nvinfer1::PluginFormat::kLINEAR)); } } nvinfer1::Dims SlicePlugin::getOutputDimensions( int index, const nvinfer1::Dims *inputs, int nb_input_dims) TRT_NOEXCEPT { auto in_dims = inputs[0]; nvinfer1::Dims out_dims = in_dims; for (size_t i = 0; i < axes_.size(); i++) { int start = starts_[i]; int end = ends_[i]; out_dims.d[axes_[i] - 1] = end - start; } return out_dims; } int SlicePlugin::enqueue(int batch_size, const void *const *inputs, #if IS_TRT_VERSION_LT(8000) void **outputs, void *workspace, hipStream_t stream) { #else void *const *outputs, void *workspace, hipStream_t stream) TRT_NOEXCEPT { #endif auto input_dims = getInputDims(0); // notice input dims is [C, H, W], add input batch dim here auto out_dims = getOutputDimensions(0, &input_dims, 1); input_dims.nbDims += 1; out_dims.nbDims += 1; for (auto i = input_dims.nbDims; i > 0; --i) { input_dims.d[i] = input_dims.d[i - 1]; out_dims.d[i] = out_dims.d[i - 1]; } input_dims.d[0] = batch_size; out_dims.d[0] = batch_size; auto num_dims = input_dims.nbDims; size_t out_num = ProductDim(out_dims); std::vector<int> seg_offsets; std::vector<int> offsets; std::vector<int> extends; offsets.resize(num_dims); extends.resize(num_dims); seg_offsets.resize(num_dims); seg_offsets[num_dims - 1] = 1; for (int i = num_dims - 2; i >= 0; i--) { seg_offsets[i] = input_dims.d[i + 1] * seg_offsets[i + 1]; } for (size_t i = 0; i < num_dims; ++i) { offsets[i] = 0; extends[i] = out_dims.d[i]; } for (size_t i = 0; i < axes_.size(); ++i) { offsets[axes_[i]] = starts_[i]; } std::vector<int> offset_info; for (size_t i = 0; i < num_dims; ++i) { offset_info.push_back(offsets[i]); offset_info.push_back(extends[i]); offset_info.push_back(seg_offsets[i]); } if (offset_temp_data_ == nullptr) { hipMalloc(&offset_temp_data_, 3 * num_dims * sizeof(int)); } hipMemcpyAsync(offset_temp_data_, offset_info.data(), sizeof(int) * 3 * num_dims, hipMemcpyHostToDevice, copy_stream_); hipEventRecord(copy_event_, copy_stream_); hipStreamWaitEvent(stream, copy_event_, 0); int threads = 256; int blocks = (out_num + threads - 1) / threads; auto input_type = getDataType(); if (input_type == nvinfer1::DataType::kFLOAT) { VLOG(1) << "TRT Plugin DataType selected. Slice-->fp32"; const float *input1 = static_cast<const float *>(inputs[0]); float *output = static_cast<float *>(outputs[0]); hipLaunchKernelGGL(( SliceKernel<float>), dim3(blocks), dim3(threads), 3 * num_dims * sizeof(int), stream, out_num, num_dims, input1, offset_temp_data_, output); } else if (input_type == nvinfer1::DataType::kHALF) { VLOG(1) << "TRT Plugin DataType selected. Slice-->fp16"; const half *input1 = static_cast<const half *>(inputs[0]); half *output = static_cast<half *>(outputs[0]); hipLaunchKernelGGL(( SliceKernel<half>), dim3(blocks), dim3(threads), 3 * num_dims * sizeof(int), stream, out_num, num_dims, input1, offset_temp_data_, output); } else { PADDLE_THROW(platform::errors::Fatal( "The Slice TRT Plugin's input type should be float or half.")); } return hipGetLastError() != hipSuccess; } size_t SlicePlugin::getSerializationSize() const TRT_NOEXCEPT { return getBaseSerializationSize() + SerializedSize(starts_) + SerializedSize(ends_) + SerializedSize(axes_) + SerializedSize(with_fp16_); } void SlicePlugin::serialize(void *buffer) const TRT_NOEXCEPT { serializeBase(buffer); SerializeValue(&buffer, starts_); SerializeValue(&buffer, ends_); SerializeValue(&buffer, axes_); SerializeValue(&buffer, with_fp16_); } // Dynamic Plugin below. #if IS_TRT_VERSION_GE(6000) SlicePluginDynamic::SlicePluginDynamic(std::vector<int> starts, std::vector<int> ends, std::vector<int> axes, bool with_fp16) : starts_(starts), ends_(ends), axes_(axes) { with_fp16_ = with_fp16; hipEventCreate(&copy_event_); hipStreamCreate(&copy_stream_); } SlicePluginDynamic::SlicePluginDynamic(void const *serialData, size_t serialLength) { DeserializeValue(&serialData, &serialLength, &starts_); DeserializeValue(&serialData, &serialLength, &ends_); DeserializeValue(&serialData, &serialLength, &axes_); DeserializeValue(&serialData, &serialLength, &with_fp16_); hipEventCreate(&copy_event_); hipStreamCreate(&copy_stream_); } void SlicePluginDynamic::destroy() TRT_NOEXCEPT { hipStreamDestroy(copy_stream_); hipEventDestroy(copy_event_); hipFree(offset_temp_data_); delete this; } int SlicePluginDynamic::initialize() TRT_NOEXCEPT { return 0; } size_t SlicePluginDynamic::getSerializationSize() const TRT_NOEXCEPT { size_t size = SerializedSize(starts_) + SerializedSize(ends_) + SerializedSize(axes_) + SerializedSize(with_fp16_); return size; } void SlicePluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT { SerializeValue(&buffer, starts_); SerializeValue(&buffer, ends_); SerializeValue(&buffer, axes_); SerializeValue(&buffer, with_fp16_); } nvinfer1::DimsExprs SlicePluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { auto in_dims = inputs[0]; nvinfer1::DimsExprs ret = in_dims; // start, ends should greater 0 for (size_t i = 0; i < axes_.size(); i++) { int start = starts_[i]; int end = ends_[i]; #if IS_TRT_VERSION_GE(7200) ret.d[axes_[i]] = expr_builder.operation( nvinfer1::DimensionOperation::kSUB, *expr_builder.operation(nvinfer1::DimensionOperation::kMIN, *expr_builder.constant(ends_[i]), *in_dims.d[axes_[i]]), *expr_builder.constant(start)); #else ret.d[axes_[i]] = expr_builder.constant(end - start); #endif } return ret; } bool SlicePluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) TRT_NOEXCEPT { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of swish plugin shoule not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); const nvinfer1::PluginTensorDesc &in = in_out[pos]; if (pos == 0) { if (with_fp16_) { return ( #if IS_TRT_VERSION_LT(8000) in.type == nvinfer1::DataType::kFLOAT || #endif in.type == nvinfer1::DataType::kHALF) && (in.format == nvinfer1::TensorFormat::kLINEAR); } else { return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); } } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; // output return in.type == prev.type && in.format == prev.format; } nvinfer1::DataType SlicePluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const TRT_NOEXCEPT { PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( "The Slice Plugin only has one input, so the " "index value should be 0, but get %d.", index)); PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT || input_types[0] == nvinfer1::DataType::kHALF), true, platform::errors::InvalidArgument( "The input type should be half or float")); return input_types[0]; } int SlicePluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, hipStream_t stream) TRT_NOEXCEPT { auto input_dims = input_desc[0].dims; auto out_dims = output_desc[0].dims; auto num_dims = input_dims.nbDims; size_t out_num = ProductDim(out_dims); std::vector<int> seg_offsets; std::vector<int> offsets; std::vector<int> extends; offsets.resize(num_dims); extends.resize(num_dims); seg_offsets.resize(num_dims); seg_offsets[num_dims - 1] = 1; for (int i = num_dims - 2; i >= 0; i--) { seg_offsets[i] = input_dims.d[i + 1] * seg_offsets[i + 1]; } for (size_t i = 0; i < num_dims; ++i) { offsets[i] = 0; extends[i] = out_dims.d[i]; } for (size_t i = 0; i < axes_.size(); ++i) { offsets[axes_[i]] = starts_[i]; } std::vector<int> offset_info; for (size_t i = 0; i < num_dims; ++i) { offset_info.push_back(offsets[i]); offset_info.push_back(extends[i]); offset_info.push_back(seg_offsets[i]); } if (offset_temp_data_ == nullptr) { hipMalloc(&offset_temp_data_, 3 * num_dims * sizeof(int)); } hipMemcpyAsync(offset_temp_data_, offset_info.data(), sizeof(int) * 3 * num_dims, hipMemcpyHostToDevice, copy_stream_); hipEventRecord(copy_event_, copy_stream_); hipStreamWaitEvent(stream, copy_event_, 0); int threads = 256; int blocks = (out_num + threads - 1) / threads; auto input_type = input_desc[0].type; if (input_type == nvinfer1::DataType::kFLOAT) { VLOG(1) << "TRT Plugin DataType selected. Slice-->fp32"; const float *input1 = static_cast<const float *>(inputs[0]); float *output = static_cast<float *>(outputs[0]); hipLaunchKernelGGL(( SliceKernel<float>), dim3(blocks), dim3(threads), 3 * num_dims * sizeof(int), stream, out_num, num_dims, input1, offset_temp_data_, output); } else if (input_type == nvinfer1::DataType::kHALF) { VLOG(1) << "TRT Plugin DataType selected. Slice-->fp16"; const half *input1 = static_cast<const half *>(inputs[0]); half *output = static_cast<half *>(outputs[0]); hipLaunchKernelGGL(( SliceKernel<half>), dim3(blocks), dim3(threads), 3 * num_dims * sizeof(int), stream, out_num, num_dims, input1, offset_temp_data_, output); } else { PADDLE_THROW(platform::errors::Fatal( "The Slice TRT Plugin's input type should be float or half.")); } return hipGetLastError() != hipSuccess; } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
771135d522b41f2c1b644b31224802837e0582e3.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <cuda_runtime.h> #include <stdio.h> #include <cassert> #include <cub/cub.cuh> // NOLINT #include <vector> #include "glog/logging.h" #include "paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { template <typename T> __global__ void SliceKernel(int num, int dims, const T *input, const int *offsets_info, T *output) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ int shared_data[]; for (int i = threadIdx.x; i < dims * 3; i += blockDim.x) { shared_data[i] = offsets_info[i]; } __syncthreads(); if (idx < num) { int t_idx = idx; int in_idx = 0; for (int i = dims - 1; i >= 0; i--) { // output_shape auto t = t_idx % shared_data[i * 3 + 1]; // out offset auto s = t + shared_data[i * 3]; // input_seg_offset in_idx = in_idx + shared_data[i * 3 + 2] * s; t_idx = t_idx / shared_data[i * 3 + 1]; } output[idx] = input[in_idx]; } } SlicePlugin::SlicePlugin(std::vector<int> starts, std::vector<int> ends, std::vector<int> axes, bool with_fp16) : starts_(starts), ends_(ends), axes_(axes) { with_fp16_ = with_fp16; cudaEventCreate(&copy_event_); cudaStreamCreate(&copy_stream_); } SlicePlugin::SlicePlugin(void const *serial_data, size_t serial_length) { deserializeBase(serial_data, serial_length); DeserializeValue(&serial_data, &serial_length, &starts_); DeserializeValue(&serial_data, &serial_length, &ends_); DeserializeValue(&serial_data, &serial_length, &axes_); DeserializeValue(&serial_data, &serial_length, &with_fp16_); cudaEventCreate(&copy_event_); cudaStreamCreate(&copy_stream_); } SlicePlugin::~SlicePlugin() { cudaStreamDestroy(copy_stream_); cudaEventDestroy(copy_event_); cudaFree(offset_temp_data_); } SlicePlugin *SlicePlugin::clone() const TRT_NOEXCEPT { return new SlicePlugin(starts_, ends_, axes_, with_fp16_); } bool SlicePlugin::supportsFormat( nvinfer1::DataType type, nvinfer1::PluginFormat format) const TRT_NOEXCEPT { if (with_fp16_) { return (( #if IS_TRT_VERSION_LT(8000) type == nvinfer1::DataType::kFLOAT || #endif type == nvinfer1::DataType::kHALF) && (format == nvinfer1::PluginFormat::kLINEAR)); } else { return ((type == nvinfer1::DataType::kFLOAT) && (format == nvinfer1::PluginFormat::kLINEAR)); } } nvinfer1::Dims SlicePlugin::getOutputDimensions( int index, const nvinfer1::Dims *inputs, int nb_input_dims) TRT_NOEXCEPT { auto in_dims = inputs[0]; nvinfer1::Dims out_dims = in_dims; for (size_t i = 0; i < axes_.size(); i++) { int start = starts_[i]; int end = ends_[i]; out_dims.d[axes_[i] - 1] = end - start; } return out_dims; } int SlicePlugin::enqueue(int batch_size, const void *const *inputs, #if IS_TRT_VERSION_LT(8000) void **outputs, void *workspace, cudaStream_t stream) { #else void *const *outputs, void *workspace, cudaStream_t stream) TRT_NOEXCEPT { #endif auto input_dims = getInputDims(0); // notice input dims is [C, H, W], add input batch dim here auto out_dims = getOutputDimensions(0, &input_dims, 1); input_dims.nbDims += 1; out_dims.nbDims += 1; for (auto i = input_dims.nbDims; i > 0; --i) { input_dims.d[i] = input_dims.d[i - 1]; out_dims.d[i] = out_dims.d[i - 1]; } input_dims.d[0] = batch_size; out_dims.d[0] = batch_size; auto num_dims = input_dims.nbDims; size_t out_num = ProductDim(out_dims); std::vector<int> seg_offsets; std::vector<int> offsets; std::vector<int> extends; offsets.resize(num_dims); extends.resize(num_dims); seg_offsets.resize(num_dims); seg_offsets[num_dims - 1] = 1; for (int i = num_dims - 2; i >= 0; i--) { seg_offsets[i] = input_dims.d[i + 1] * seg_offsets[i + 1]; } for (size_t i = 0; i < num_dims; ++i) { offsets[i] = 0; extends[i] = out_dims.d[i]; } for (size_t i = 0; i < axes_.size(); ++i) { offsets[axes_[i]] = starts_[i]; } std::vector<int> offset_info; for (size_t i = 0; i < num_dims; ++i) { offset_info.push_back(offsets[i]); offset_info.push_back(extends[i]); offset_info.push_back(seg_offsets[i]); } if (offset_temp_data_ == nullptr) { cudaMalloc(&offset_temp_data_, 3 * num_dims * sizeof(int)); } cudaMemcpyAsync(offset_temp_data_, offset_info.data(), sizeof(int) * 3 * num_dims, cudaMemcpyHostToDevice, copy_stream_); cudaEventRecord(copy_event_, copy_stream_); cudaStreamWaitEvent(stream, copy_event_, 0); int threads = 256; int blocks = (out_num + threads - 1) / threads; auto input_type = getDataType(); if (input_type == nvinfer1::DataType::kFLOAT) { VLOG(1) << "TRT Plugin DataType selected. Slice-->fp32"; const float *input1 = static_cast<const float *>(inputs[0]); float *output = static_cast<float *>(outputs[0]); SliceKernel<float><<<blocks, threads, 3 * num_dims * sizeof(int), stream>>>( out_num, num_dims, input1, offset_temp_data_, output); } else if (input_type == nvinfer1::DataType::kHALF) { VLOG(1) << "TRT Plugin DataType selected. Slice-->fp16"; const half *input1 = static_cast<const half *>(inputs[0]); half *output = static_cast<half *>(outputs[0]); SliceKernel<half><<<blocks, threads, 3 * num_dims * sizeof(int), stream>>>( out_num, num_dims, input1, offset_temp_data_, output); } else { PADDLE_THROW(platform::errors::Fatal( "The Slice TRT Plugin's input type should be float or half.")); } return cudaGetLastError() != cudaSuccess; } size_t SlicePlugin::getSerializationSize() const TRT_NOEXCEPT { return getBaseSerializationSize() + SerializedSize(starts_) + SerializedSize(ends_) + SerializedSize(axes_) + SerializedSize(with_fp16_); } void SlicePlugin::serialize(void *buffer) const TRT_NOEXCEPT { serializeBase(buffer); SerializeValue(&buffer, starts_); SerializeValue(&buffer, ends_); SerializeValue(&buffer, axes_); SerializeValue(&buffer, with_fp16_); } // Dynamic Plugin below. #if IS_TRT_VERSION_GE(6000) SlicePluginDynamic::SlicePluginDynamic(std::vector<int> starts, std::vector<int> ends, std::vector<int> axes, bool with_fp16) : starts_(starts), ends_(ends), axes_(axes) { with_fp16_ = with_fp16; cudaEventCreate(&copy_event_); cudaStreamCreate(&copy_stream_); } SlicePluginDynamic::SlicePluginDynamic(void const *serialData, size_t serialLength) { DeserializeValue(&serialData, &serialLength, &starts_); DeserializeValue(&serialData, &serialLength, &ends_); DeserializeValue(&serialData, &serialLength, &axes_); DeserializeValue(&serialData, &serialLength, &with_fp16_); cudaEventCreate(&copy_event_); cudaStreamCreate(&copy_stream_); } void SlicePluginDynamic::destroy() TRT_NOEXCEPT { cudaStreamDestroy(copy_stream_); cudaEventDestroy(copy_event_); cudaFree(offset_temp_data_); delete this; } int SlicePluginDynamic::initialize() TRT_NOEXCEPT { return 0; } size_t SlicePluginDynamic::getSerializationSize() const TRT_NOEXCEPT { size_t size = SerializedSize(starts_) + SerializedSize(ends_) + SerializedSize(axes_) + SerializedSize(with_fp16_); return size; } void SlicePluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT { SerializeValue(&buffer, starts_); SerializeValue(&buffer, ends_); SerializeValue(&buffer, axes_); SerializeValue(&buffer, with_fp16_); } nvinfer1::DimsExprs SlicePluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { auto in_dims = inputs[0]; nvinfer1::DimsExprs ret = in_dims; // start, ends should greater 0 for (size_t i = 0; i < axes_.size(); i++) { int start = starts_[i]; int end = ends_[i]; #if IS_TRT_VERSION_GE(7200) ret.d[axes_[i]] = expr_builder.operation( nvinfer1::DimensionOperation::kSUB, *expr_builder.operation(nvinfer1::DimensionOperation::kMIN, *expr_builder.constant(ends_[i]), *in_dims.d[axes_[i]]), *expr_builder.constant(start)); #else ret.d[axes_[i]] = expr_builder.constant(end - start); #endif } return ret; } bool SlicePluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) TRT_NOEXCEPT { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of swish plugin shoule not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); const nvinfer1::PluginTensorDesc &in = in_out[pos]; if (pos == 0) { if (with_fp16_) { return ( #if IS_TRT_VERSION_LT(8000) in.type == nvinfer1::DataType::kFLOAT || #endif in.type == nvinfer1::DataType::kHALF) && (in.format == nvinfer1::TensorFormat::kLINEAR); } else { return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); } } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; // output return in.type == prev.type && in.format == prev.format; } nvinfer1::DataType SlicePluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const TRT_NOEXCEPT { PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( "The Slice Plugin only has one input, so the " "index value should be 0, but get %d.", index)); PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT || input_types[0] == nvinfer1::DataType::kHALF), true, platform::errors::InvalidArgument( "The input type should be half or float")); return input_types[0]; } int SlicePluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, cudaStream_t stream) TRT_NOEXCEPT { auto input_dims = input_desc[0].dims; auto out_dims = output_desc[0].dims; auto num_dims = input_dims.nbDims; size_t out_num = ProductDim(out_dims); std::vector<int> seg_offsets; std::vector<int> offsets; std::vector<int> extends; offsets.resize(num_dims); extends.resize(num_dims); seg_offsets.resize(num_dims); seg_offsets[num_dims - 1] = 1; for (int i = num_dims - 2; i >= 0; i--) { seg_offsets[i] = input_dims.d[i + 1] * seg_offsets[i + 1]; } for (size_t i = 0; i < num_dims; ++i) { offsets[i] = 0; extends[i] = out_dims.d[i]; } for (size_t i = 0; i < axes_.size(); ++i) { offsets[axes_[i]] = starts_[i]; } std::vector<int> offset_info; for (size_t i = 0; i < num_dims; ++i) { offset_info.push_back(offsets[i]); offset_info.push_back(extends[i]); offset_info.push_back(seg_offsets[i]); } if (offset_temp_data_ == nullptr) { cudaMalloc(&offset_temp_data_, 3 * num_dims * sizeof(int)); } cudaMemcpyAsync(offset_temp_data_, offset_info.data(), sizeof(int) * 3 * num_dims, cudaMemcpyHostToDevice, copy_stream_); cudaEventRecord(copy_event_, copy_stream_); cudaStreamWaitEvent(stream, copy_event_, 0); int threads = 256; int blocks = (out_num + threads - 1) / threads; auto input_type = input_desc[0].type; if (input_type == nvinfer1::DataType::kFLOAT) { VLOG(1) << "TRT Plugin DataType selected. Slice-->fp32"; const float *input1 = static_cast<const float *>(inputs[0]); float *output = static_cast<float *>(outputs[0]); SliceKernel<float><<<blocks, threads, 3 * num_dims * sizeof(int), stream>>>( out_num, num_dims, input1, offset_temp_data_, output); } else if (input_type == nvinfer1::DataType::kHALF) { VLOG(1) << "TRT Plugin DataType selected. Slice-->fp16"; const half *input1 = static_cast<const half *>(inputs[0]); half *output = static_cast<half *>(outputs[0]); SliceKernel<half><<<blocks, threads, 3 * num_dims * sizeof(int), stream>>>( out_num, num_dims, input1, offset_temp_data_, output); } else { PADDLE_THROW(platform::errors::Fatal( "The Slice TRT Plugin's input type should be float or half.")); } return cudaGetLastError() != cudaSuccess; } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
29679305ccf806213d6fa5baba9a4cf086970759.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "WarpSelectImpl_hip.cuh" namespace faiss { namespace gpu { WARP_SELECT_IMPL(float, true, 256, 4); WARP_SELECT_IMPL(float, false, 256, 4); } } // namespace
29679305ccf806213d6fa5baba9a4cf086970759.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "WarpSelectImpl.cuh" namespace faiss { namespace gpu { WARP_SELECT_IMPL(float, true, 256, 4); WARP_SELECT_IMPL(float, false, 256, 4); } } // namespace
d7e4b06f2ebfe03900a14ce7cd1f430bdc1fc1d8.hip
// !!! This is a file automatically generated by hipify!!! #ifndef MEANSHIFT_CU #define MEANSHIFT_CU #include <hip/hip_runtime.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/hip_runtime_api.h" #include "vector_functions.hpp" #include "hip/hip_vector_types.h" #include "helper_math.h" #include "hip/device_functions.h" #include "commonDefines.h" #define MYASSERT(condition, ERROR) if (!(condition)) { printf("ERROR: %s \n", ERROR); return; } #define rev_sqrt_two_pi 0.3989422804 #define rev_two_pi rev_sqrt_two_pi*rev_sqrt_two_pi __device__ __host__ float gaussian_kernel(float dist2, float bandwidth) { const float rev_bandwidth = 1. / bandwidth; const float d2_frac_b2 = dist2 * rev_bandwidth * rev_bandwidth; float div = 1. / rev_two_pi * rev_bandwidth; float exp_ = div * expf(-0.5 * d2_frac_b2); return exp_; } __global__ void cuda_MeanShift_SharedMemory_2D(float* X, const float* I, const float* originalPoints, const int N, const int dim) { extern __shared__ float tile[TILE_WIDTH][2]; // for each pixel int tx = threadIdx.x; int row = blockIdx.x * blockDim.x + tx; float2 numerator = make_float2(0.0, 0.0); float denominator = 0.0; int it = row * dim; for (int tile_i = 0; tile_i < (N - 1) / TILE_WIDTH + 1; ++tile_i) { //loading phase - each thread load something into shared memory int row_t = tile_i * TILE_WIDTH + tx; int index = row_t * dim; if (row_t < N) { tile[tx][0] = originalPoints[index]; tile[tx][1] = originalPoints[index + 1]; } else { tile[tx][0] = 0.0; tile[tx][1] = 0.0; } __syncthreads(); //end of loading into shared memory if (row < N) // only the threads inside the bounds do some computation { float2 x_i = make_float2(I[it], I[it + 1]); //load input point //computing phase for (int j = 0; j < TILE_WIDTH; ++j) { float2 x_j = make_float2(tile[j][0], tile[j][1]); //from shared memory float2 sub = x_i - x_j; float distance2 = dot(sub, sub); float weight = gaussian_kernel(distance2, BW); numerator += x_j * weight; //accumulating denominator += weight; } } __syncthreads(); //end of computing phase for tile_ij } if (row < N) { //storing numerator /= denominator; X[it] = numerator.x; X[it + 1] = numerator.y; } } extern "C" void cudaMeanShift_sharedMemory_2D_wrapper(float* X, const float* I, const float* originalPoints, const int N, const int vecDim, dim3 gridDim, dim3 blockDim) { cuda_MeanShift_SharedMemory_2D << <gridDim, blockDim >> > (X, I, originalPoints, N, vecDim); } __global__ void cuda_MeanShift_2D(float* X, const float* I, const float* originalPoints, const int N, const int dim) { // for every pixel int tx = threadIdx.x; int row = blockIdx.x * blockDim.x + tx; float2 numerator = make_float2(0.0, 0.0); float denominator = 0.0; int it = row * dim; float2 y_i; if (row < N) { y_i = make_float2(I[it], I[it + 1]); //load input point //computing mean shift for (int j = 0; j < N; ++j) { float2 x_j = make_float2(originalPoints[j * dim], originalPoints[j * dim + 1]); //from central gpu memory float2 sub = y_i - x_j; float distance2 = dot(sub, sub); float weight = gaussian_kernel(distance2, BW); numerator += x_j * weight; //accumulating denominator += weight; } //storing numerator /= denominator; X[it] = 0; X[it + 1] = numerator.y; } } extern "C" void cudaMeanShift_2D_wrapper(float* X, const float* I, const float* originalPoints, const int N, const int vecDim, dim3 gridDim, dim3 blockDim) { cuda_MeanShift_2D << <gridDim, blockDim >> > (X, I, originalPoints, N, vecDim); } #endif // !MEANSHIFT_CU
d7e4b06f2ebfe03900a14ce7cd1f430bdc1fc1d8.cu
#ifndef MEANSHIFT_CU #define MEANSHIFT_CU #include <cuda.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda_runtime_api.h" #include "vector_functions.hpp" #include "vector_types.h" #include "helper_math.h" #include "device_functions.h" #include "commonDefines.h" #define MYASSERT(condition, ERROR) if (!(condition)) { printf("ERROR: %s \n", ERROR); return; } #define rev_sqrt_two_pi 0.3989422804 #define rev_two_pi rev_sqrt_two_pi*rev_sqrt_two_pi __device__ __host__ float gaussian_kernel(float dist2, float bandwidth) { const float rev_bandwidth = 1. / bandwidth; const float d2_frac_b2 = dist2 * rev_bandwidth * rev_bandwidth; float div = 1. / rev_two_pi * rev_bandwidth; float exp_ = div * expf(-0.5 * d2_frac_b2); return exp_; } __global__ void cuda_MeanShift_SharedMemory_2D(float* X, const float* I, const float* originalPoints, const int N, const int dim) { extern __shared__ float tile[TILE_WIDTH][2]; // for each pixel int tx = threadIdx.x; int row = blockIdx.x * blockDim.x + tx; float2 numerator = make_float2(0.0, 0.0); float denominator = 0.0; int it = row * dim; for (int tile_i = 0; tile_i < (N - 1) / TILE_WIDTH + 1; ++tile_i) { //loading phase - each thread load something into shared memory int row_t = tile_i * TILE_WIDTH + tx; int index = row_t * dim; if (row_t < N) { tile[tx][0] = originalPoints[index]; tile[tx][1] = originalPoints[index + 1]; } else { tile[tx][0] = 0.0; tile[tx][1] = 0.0; } __syncthreads(); //end of loading into shared memory if (row < N) // only the threads inside the bounds do some computation { float2 x_i = make_float2(I[it], I[it + 1]); //load input point //computing phase for (int j = 0; j < TILE_WIDTH; ++j) { float2 x_j = make_float2(tile[j][0], tile[j][1]); //from shared memory float2 sub = x_i - x_j; float distance2 = dot(sub, sub); float weight = gaussian_kernel(distance2, BW); numerator += x_j * weight; //accumulating denominator += weight; } } __syncthreads(); //end of computing phase for tile_ij } if (row < N) { //storing numerator /= denominator; X[it] = numerator.x; X[it + 1] = numerator.y; } } extern "C" void cudaMeanShift_sharedMemory_2D_wrapper(float* X, const float* I, const float* originalPoints, const int N, const int vecDim, dim3 gridDim, dim3 blockDim) { cuda_MeanShift_SharedMemory_2D << <gridDim, blockDim >> > (X, I, originalPoints, N, vecDim); } __global__ void cuda_MeanShift_2D(float* X, const float* I, const float* originalPoints, const int N, const int dim) { // for every pixel int tx = threadIdx.x; int row = blockIdx.x * blockDim.x + tx; float2 numerator = make_float2(0.0, 0.0); float denominator = 0.0; int it = row * dim; float2 y_i; if (row < N) { y_i = make_float2(I[it], I[it + 1]); //load input point //computing mean shift for (int j = 0; j < N; ++j) { float2 x_j = make_float2(originalPoints[j * dim], originalPoints[j * dim + 1]); //from central gpu memory float2 sub = y_i - x_j; float distance2 = dot(sub, sub); float weight = gaussian_kernel(distance2, BW); numerator += x_j * weight; //accumulating denominator += weight; } //storing numerator /= denominator; X[it] = 0; X[it + 1] = numerator.y; } } extern "C" void cudaMeanShift_2D_wrapper(float* X, const float* I, const float* originalPoints, const int N, const int vecDim, dim3 gridDim, dim3 blockDim) { cuda_MeanShift_2D << <gridDim, blockDim >> > (X, I, originalPoints, N, vecDim); } #endif // !MEANSHIFT_CU
20933ef612ed07ef5e92477ca251a75593cc3a9d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*********************************************** * # Copyright 2011. Thuy Diem Nguyen & Zejun Zheng * # Contact: [email protected] or [email protected] * # * # GPL 3.0 applies. * # * ************************************************/ // Note: don't use_fast_math option #include "kmerKernel.h" texture<uint, 2, hipReadModeElementType> texRef; texture<uint, 2, hipReadModeElementType> &getTexRef(void) { return texRef; } __global__ void kmerKernel(int * pairArray, float *distArray, int maxNumTuples, int numPairs) { uint matches = 0; uint x, y, k, l; uint tuple1Length, tuple2Length; int row, col; int pairIndex = blockIdx.x * blockDim.x + threadIdx.x; if (pairIndex < numPairs) { row = pairArray[pairIndex*2]; col = pairArray[pairIndex*2+1]; tuple1Length = tex2D(texRef, maxNumTuples * (row & 63) + (maxNumTuples-1), row/64); // tex2D( texRef, width, height ) tuple2Length = tex2D(texRef, maxNumTuples * (col & 63) + (maxNumTuples-1), col/64); // tex2D( texRef, width, height ) for (k = 0, l = 0; (k < tuple1Length) && (l < tuple2Length);) { x = tex2D(texRef, maxNumTuples * (row & 63) + k, row/64); y = tex2D(texRef, maxNumTuples * (col & 63) + l, col/64); matches = matches + (uint)(x==y); k = k + (uint)(x<=y); l = l + (uint)(x>=y); } distArray[pairIndex] = 1.0f - (float) matches / min(tuple1Length, tuple2Length); } else distArray[pairIndex] = -1.0f; } void launchKmerKernel(hipStream_t stream, dim3 blocksPerGrid, dim3 threadsPerBlock, int * d_pairArray, float* d_distArray, int maxNumTuples, int numPairs) { hipLaunchKernelGGL(( kmerKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, stream, d_pairArray, d_distArray, maxNumTuples, numPairs); } // General GPU Device CUDA Initialization int gpuDeviceInit(int devID) { int deviceCount; checkCudaErrors(hipGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n"); exit(-1); } if (devID < 0) devID = 0; if (devID > deviceCount-1) { fprintf(stderr, "\n"); fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount); fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID); fprintf(stderr, "\n"); return -devID; } hipDeviceProp_t deviceProp; checkCudaErrors( hipGetDeviceProperties(&deviceProp, devID) ); if (deviceProp.major < 1) { fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n"); exit(-1); \ } checkCudaErrors( hipSetDevice(devID) ); printf("> gpuDeviceInit() CUDA device [%d]: %s\n", devID, deviceProp.name); return devID; }
20933ef612ed07ef5e92477ca251a75593cc3a9d.cu
/*********************************************** * # Copyright 2011. Thuy Diem Nguyen & Zejun Zheng * # Contact: [email protected] or [email protected] * # * # GPL 3.0 applies. * # * ************************************************/ // Note: don't use_fast_math option #include "kmerKernel.h" texture<uint, 2, cudaReadModeElementType> texRef; texture<uint, 2, cudaReadModeElementType> &getTexRef(void) { return texRef; } __global__ void kmerKernel(int * pairArray, float *distArray, int maxNumTuples, int numPairs) { uint matches = 0; uint x, y, k, l; uint tuple1Length, tuple2Length; int row, col; int pairIndex = blockIdx.x * blockDim.x + threadIdx.x; if (pairIndex < numPairs) { row = pairArray[pairIndex*2]; col = pairArray[pairIndex*2+1]; tuple1Length = tex2D(texRef, maxNumTuples * (row & 63) + (maxNumTuples-1), row/64); // tex2D( texRef, width, height ) tuple2Length = tex2D(texRef, maxNumTuples * (col & 63) + (maxNumTuples-1), col/64); // tex2D( texRef, width, height ) for (k = 0, l = 0; (k < tuple1Length) && (l < tuple2Length);) { x = tex2D(texRef, maxNumTuples * (row & 63) + k, row/64); y = tex2D(texRef, maxNumTuples * (col & 63) + l, col/64); matches = matches + (uint)(x==y); k = k + (uint)(x<=y); l = l + (uint)(x>=y); } distArray[pairIndex] = 1.0f - (float) matches / min(tuple1Length, tuple2Length); } else distArray[pairIndex] = -1.0f; } void launchKmerKernel(cudaStream_t stream, dim3 blocksPerGrid, dim3 threadsPerBlock, int * d_pairArray, float* d_distArray, int maxNumTuples, int numPairs) { kmerKernel<<<blocksPerGrid, threadsPerBlock, 0, stream>>>(d_pairArray, d_distArray, maxNumTuples, numPairs); } // General GPU Device CUDA Initialization int gpuDeviceInit(int devID) { int deviceCount; checkCudaErrors(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n"); exit(-1); } if (devID < 0) devID = 0; if (devID > deviceCount-1) { fprintf(stderr, "\n"); fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount); fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID); fprintf(stderr, "\n"); return -devID; } cudaDeviceProp deviceProp; checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) ); if (deviceProp.major < 1) { fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n"); exit(-1); \ } checkCudaErrors( cudaSetDevice(devID) ); printf("> gpuDeviceInit() CUDA device [%d]: %s\n", devID, deviceProp.name); return devID; }
6d43bb3f57284d4ff1faae1c92398d2a70b86035.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void gamma_norm_kernel(float* img, int image_height, int image_width, int image_step) { // The thread block has size (3,n). The first dimension of the thread block // corresponds to color channels. int channel = threadIdx.x; // The columns of the image are mapped to the first dimension of the block // grid, but to the second dimension of the thread block, as the first // already corresponds to color channels. int pixel_x = blockIdx.x * blockDim.y + threadIdx.y; // If current position is outside the image, stop here if(pixel_x >= image_width) { return; } // The columns of the image are mapped to the second dimension of the block // grid, but to the third dimension of the thread block. int pixel_y = blockIdx.y * blockDim.z + threadIdx.z; // If current position is outside the image, stop here if(pixel_y >= image_height) { return; } // Each row has image_step pixels and each pixel has three channels int in_pixel_idx = pixel_y * image_step + pixel_x * 3 + channel; // Finally perform the normalization img[in_pixel_idx] = sqrt(img[in_pixel_idx] / 256.0f); }
6d43bb3f57284d4ff1faae1c92398d2a70b86035.cu
#include "includes.h" __global__ void gamma_norm_kernel(float* img, int image_height, int image_width, int image_step) { // The thread block has size (3,n). The first dimension of the thread block // corresponds to color channels. int channel = threadIdx.x; // The columns of the image are mapped to the first dimension of the block // grid, but to the second dimension of the thread block, as the first // already corresponds to color channels. int pixel_x = blockIdx.x * blockDim.y + threadIdx.y; // If current position is outside the image, stop here if(pixel_x >= image_width) { return; } // The columns of the image are mapped to the second dimension of the block // grid, but to the third dimension of the thread block. int pixel_y = blockIdx.y * blockDim.z + threadIdx.z; // If current position is outside the image, stop here if(pixel_y >= image_height) { return; } // Each row has image_step pixels and each pixel has three channels int in_pixel_idx = pixel_y * image_step + pixel_x * 3 + channel; // Finally perform the normalization img[in_pixel_idx] = sqrt(img[in_pixel_idx] / 256.0f); }
b561d7cf89692163d705f5b8bd6deae05aa73e4e.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "test_utils.h" #include <algorithm> #include <gtest/gtest.h> #include <iostream> #include <metrics/completeness_score.cuh> #include <raft/cudart_utils.h> #include <random> namespace MLCommon { namespace Metrics { // parameter structure definition struct completenessParam { int nElements; int lowerLabelRange; int upperLabelRange; bool sameArrays; double tolerance; }; // test fixture class template <typename T> class completenessTest : public ::testing::TestWithParam<completenessParam> { protected: // the constructor void SetUp() override { // getting the parameters params = ::testing::TestWithParam<completenessParam>::GetParam(); nElements = params.nElements; lowerLabelRange = params.lowerLabelRange; upperLabelRange = params.upperLabelRange; // generating random value test input std::vector<int> arr1(nElements, 0); std::vector<int> arr2(nElements, 0); std::random_device rd; std::default_random_engine dre(rd()); std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange); std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); }); if (params.sameArrays) { arr2 = arr1; } else { std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); }); } // allocating and initializing memory to the GPU RAFT_CUDA_TRY(hipStreamCreate(&stream)); rmm::device_uvector<T> truthClusterArray(nElements, stream); rmm::device_uvector<T> predClusterArray(nElements, stream); raft::update_device(truthClusterArray.data(), arr1.data(), (int)nElements, stream); raft::update_device(predClusterArray.data(), arr2.data(), (int)nElements, stream); // calculating the golden output double truthMI, truthEntropy; truthMI = MLCommon::Metrics::mutual_info_score(truthClusterArray.data(), predClusterArray.data(), nElements, lowerLabelRange, upperLabelRange, stream); truthEntropy = MLCommon::Metrics::entropy( predClusterArray.data(), nElements, lowerLabelRange, upperLabelRange, stream); if (truthEntropy) { truthCompleteness = truthMI / truthEntropy; } else truthCompleteness = 1.0; if (nElements == 0) truthCompleteness = 1.0; // calling the completeness CUDA implementation computedCompleteness = MLCommon::Metrics::completeness_score(truthClusterArray.data(), predClusterArray.data(), nElements, lowerLabelRange, upperLabelRange, stream); } // the destructor void TearDown() override { RAFT_CUDA_TRY(hipStreamDestroy(stream)); } // declaring the data values completenessParam params; T lowerLabelRange, upperLabelRange; int nElements = 0; double truthCompleteness = 0; double computedCompleteness = 0; hipStream_t stream = 0; }; // setting test parameter values const std::vector<completenessParam> inputs = {{199, 1, 10, false, 0.000001}, {200, 15, 100, false, 0.000001}, {100, 1, 20, false, 0.000001}, {10, 1, 10, false, 0.000001}, {198, 1, 100, false, 0.000001}, {300, 3, 99, false, 0.000001}, {199, 1, 10, true, 0.000001}, {200, 15, 100, true, 0.000001}, {100, 1, 20, true, 0.000001}, {10, 1, 10, true, 0.000001}, {198, 1, 100, true, 0.000001}, {300, 3, 99, true, 0.000001}}; // writing the test suite typedef completenessTest<int> completenessTestClass; TEST_P(completenessTestClass, Result) { ASSERT_NEAR(computedCompleteness, truthCompleteness, params.tolerance); } INSTANTIATE_TEST_CASE_P(completeness, completenessTestClass, ::testing::ValuesIn(inputs)); } // end namespace Metrics } // end namespace MLCommon
b561d7cf89692163d705f5b8bd6deae05aa73e4e.cu
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "test_utils.h" #include <algorithm> #include <gtest/gtest.h> #include <iostream> #include <metrics/completeness_score.cuh> #include <raft/cudart_utils.h> #include <random> namespace MLCommon { namespace Metrics { // parameter structure definition struct completenessParam { int nElements; int lowerLabelRange; int upperLabelRange; bool sameArrays; double tolerance; }; // test fixture class template <typename T> class completenessTest : public ::testing::TestWithParam<completenessParam> { protected: // the constructor void SetUp() override { // getting the parameters params = ::testing::TestWithParam<completenessParam>::GetParam(); nElements = params.nElements; lowerLabelRange = params.lowerLabelRange; upperLabelRange = params.upperLabelRange; // generating random value test input std::vector<int> arr1(nElements, 0); std::vector<int> arr2(nElements, 0); std::random_device rd; std::default_random_engine dre(rd()); std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange); std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); }); if (params.sameArrays) { arr2 = arr1; } else { std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); }); } // allocating and initializing memory to the GPU RAFT_CUDA_TRY(cudaStreamCreate(&stream)); rmm::device_uvector<T> truthClusterArray(nElements, stream); rmm::device_uvector<T> predClusterArray(nElements, stream); raft::update_device(truthClusterArray.data(), arr1.data(), (int)nElements, stream); raft::update_device(predClusterArray.data(), arr2.data(), (int)nElements, stream); // calculating the golden output double truthMI, truthEntropy; truthMI = MLCommon::Metrics::mutual_info_score(truthClusterArray.data(), predClusterArray.data(), nElements, lowerLabelRange, upperLabelRange, stream); truthEntropy = MLCommon::Metrics::entropy( predClusterArray.data(), nElements, lowerLabelRange, upperLabelRange, stream); if (truthEntropy) { truthCompleteness = truthMI / truthEntropy; } else truthCompleteness = 1.0; if (nElements == 0) truthCompleteness = 1.0; // calling the completeness CUDA implementation computedCompleteness = MLCommon::Metrics::completeness_score(truthClusterArray.data(), predClusterArray.data(), nElements, lowerLabelRange, upperLabelRange, stream); } // the destructor void TearDown() override { RAFT_CUDA_TRY(cudaStreamDestroy(stream)); } // declaring the data values completenessParam params; T lowerLabelRange, upperLabelRange; int nElements = 0; double truthCompleteness = 0; double computedCompleteness = 0; cudaStream_t stream = 0; }; // setting test parameter values const std::vector<completenessParam> inputs = {{199, 1, 10, false, 0.000001}, {200, 15, 100, false, 0.000001}, {100, 1, 20, false, 0.000001}, {10, 1, 10, false, 0.000001}, {198, 1, 100, false, 0.000001}, {300, 3, 99, false, 0.000001}, {199, 1, 10, true, 0.000001}, {200, 15, 100, true, 0.000001}, {100, 1, 20, true, 0.000001}, {10, 1, 10, true, 0.000001}, {198, 1, 100, true, 0.000001}, {300, 3, 99, true, 0.000001}}; // writing the test suite typedef completenessTest<int> completenessTestClass; TEST_P(completenessTestClass, Result) { ASSERT_NEAR(computedCompleteness, truthCompleteness, params.tolerance); } INSTANTIATE_TEST_CASE_P(completeness, completenessTestClass, ::testing::ValuesIn(inputs)); } // end namespace Metrics } // end namespace MLCommon
0f41b6cb360b41aba560c561af25b90c40b20c01.hip
// !!! This is a file automatically generated by hipify!!! /** * @brief Utilities for creating FSAs. * * Note that serializations are done in Python. * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * Mobvoi Inc. (authors: Fangjun Kuang) * Guoguo Chen * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include <limits> #include <sstream> #include <utility> #include <vector> #include "k2/csrc/array.h" #include "k2/csrc/context.h" #include "k2/csrc/fsa.h" #include "k2/csrc/fsa_utils.h" #include "k2/csrc/math.h" #include "k2/csrc/ragged.h" namespace k2 { // field separator within a line for a text form FSA static constexpr const char *kDelim = " \t"; // Convert a string to an integer. Abort the program on failure. static int32_t StringToInt(const std::string &s) { NVTX_RANGE(K2_FUNC); K2_CHECK(!s.empty()); bool ok = false; char *p = nullptr; // std::strtol requires a `long` type long n = std::strtol(s.c_str(), &p, 10); // NOLINT if (*p == '\0') ok = true; auto res = static_cast<int32_t>(n); if (n != res) ok = false; // out of range K2_CHECK(ok) << "Failed to convert " << s << " to an integer"; return res; } // Convert a string to a float. Abort the program on failure. // TODO(guoguo): We may run into locale problems, with comma vs. period for // decimals. We have to test if the C code will behave the same // w.r.t. locale as Python does. static float StringToFloat(const std::string &s) { NVTX_RANGE(K2_FUNC); K2_CHECK(!s.empty()); char *p = nullptr; float f = std::strtof(s.c_str(), &p); if (*p != '\0') K2_LOG(FATAL) << "Failed to convert " << s << " to a float"; return f; } // Trim leading and trailing spaces of a string. static void TrimString(std::string *s) { NVTX_RANGE(K2_FUNC); K2_CHECK_NE(s, nullptr); auto not_space = [](int32_t c) -> bool { return std::isspace(c) == 0; }; s->erase(s->begin(), std::find_if(s->begin(), s->end(), not_space)); s->erase(std::find_if(s->rbegin(), s->rend(), not_space).base(), s->end()); } /* Split a string to a vector of strings using a set of delimiters. Example usage: @code std::string in = "1 2 3"; const char *delim = " \t"; std::vector<std::string> out; SplitStringToVector(in, delim, &out); @endcode @param [in] in The input string to be split. @param [in] delim A string of delimiters. @param [out] out It saves the split result. */ static void SplitStringToVector(const std::string &in, const char *delim, std::vector<std::string> *out) { NVTX_RANGE(K2_FUNC); K2_CHECK_NE(delim, nullptr); K2_CHECK_NE(out, nullptr); out->clear(); std::size_t start = 0; while (true) { auto pos = in.find_first_of(delim, start); if (pos == std::string::npos) break; auto sub = in.substr(start, pos - start); start = pos + 1; TrimString(&sub); if (!sub.empty()) out->emplace_back(std::move(sub)); } if (start < in.size()) { auto sub = in.substr(start); TrimString(&sub); if (!sub.empty()) out->emplace_back(std::move(sub)); } } /* Create an acceptor from a stream, assuming the acceptor is in the k2 format: src_state1 dest_state1 label1 score1 src_state2 dest_state2 label2 score2 ... ... final_state The source states will be in non-descending order, and the final state does not bear a cost/score -- we put the cost/score on the arc that connects to the final state and set its label to -1. @param [in] is The input stream that contains the acceptor. @return It returns an Fsa on CPU. */ static Fsa K2AcceptorFromStream(std::istringstream &is) { NVTX_RANGE(K2_FUNC); std::vector<Arc> arcs; std::vector<std::string> splits; std::string line; bool finished = false; // when the final state is read, set it to true. while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line K2_CHECK_EQ(finished, false); auto num_fields = splits.size(); if (num_fields == 4u) { // 0 1 2 3 // src_state dest_state label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); float score = StringToFloat(splits[3]); arcs.emplace_back(src_state, dest_state, symbol, score); } else if (num_fields == 1u) { // 0 // final_state (void)StringToInt(splits[0]); // this is a final state finished = true; // set finish } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nk2 acceptor expects a line with 1 (final_state) or " "4 (src_state dest_state label score) fields"; } } K2_CHECK_EQ(finished, true) << "The last line should be the final state"; bool error = true; Array1<Arc> array(GetCpuContext(), arcs); auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } /* Create a transducer from a stream, assuming the transducer is in the K2 format: src_state1 dest_state1 label1 aux_label1 score1 src_state2 dest_state2 label2 aux_label2 score2 ... ... final_state The source states will be in non-descending order, and the final state does not bear a cost/score -- we put the cost/score on the arc that connects to the final state and set its label to -1. @param [in] is The input stream that contains the transducer. @return It returns an Fsa on CPU. */ static Fsa K2TransducerFromStream(std::istringstream &is, Array1<int32_t> *aux_labels) { NVTX_RANGE(K2_FUNC); K2_CHECK(aux_labels != nullptr); std::vector<int32_t> aux_labels_internal; std::vector<Arc> arcs; std::vector<std::string> splits; std::string line; bool finished = false; // when the final state is read, set it to true. while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line K2_CHECK_EQ(finished, false); auto num_fields = splits.size(); if (num_fields == 5u) { // 0 1 2 3 4 // src_state dest_state label aux_label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); int32_t aux_label = StringToInt(splits[3]); float score = StringToFloat(splits[4]); arcs.emplace_back(src_state, dest_state, symbol, score); aux_labels_internal.push_back(aux_label); } else if (num_fields == 1u) { // 0 // final_state (void)StringToInt(splits[0]); finished = true; // set finish } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nk2 transducer expects a line with 1 (final_state) or " "5 (src_state dest_state label aux_label score) fields"; } } K2_CHECK_EQ(finished, true) << "The last line should be the final state"; auto cpu_context = GetCpuContext(); *aux_labels = Array1<int32_t>(cpu_context, aux_labels_internal); Array1<Arc> array(cpu_context, arcs); bool error = true; auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } /* Create an acceptor from a stream, assuming the acceptor is in the OpenFST format: src_state1 dest_state1 label1 score1 src_state2 dest_state2 label2 score2 ... ... final_state final_score We will negate the cost/score when we read them in. Also note, OpenFST may omit the cost/score if it is 0.0. We always create the super final state. If there are final state(s) in the original FSA, then we add arc(s) from the original final state(s) to the super final state, with the (negated) old final state cost/score as its cost/score, and -1 as its label. @param [in] is The input stream that contains the acceptor. @return It returns an Fsa on CPU. */ static Fsa OpenFstAcceptorFromStream(std::istringstream &is) { NVTX_RANGE(K2_FUNC); std::vector<Arc> arcs; std::vector<std::vector<Arc>> state_to_arcs; // indexed by states std::vector<std::string> splits; std::string line; int32_t max_state = -1; int32_t num_arcs = 0; std::vector<int32_t> original_final_states; std::vector<float> original_final_weights; while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line auto num_fields = splits.size(); if (num_fields == 3u || num_fields == 4u) { // 0 1 2 // src_state dest_state label // // or // // 0 1 2 3 // src_state dest_state label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); float score = 0.0f; if (num_fields == 4u) score = -1.0f * StringToFloat(splits[3]); // Add the arc to "state_to_arcs". ++num_arcs; max_state = ::max(max_state, ::max(src_state, dest_state)); if (static_cast<int32_t>(state_to_arcs.size()) <= src_state) state_to_arcs.resize(src_state + 1); state_to_arcs[src_state].emplace_back(src_state, dest_state, symbol, score); } else if (num_fields == 1u || num_fields == 2u) { // 0 1 // final_state score float score = 0.0f; if (num_fields == 2u) score = -1.0f * StringToFloat(splits[1]); original_final_states.push_back(StringToInt(splits[0])); original_final_weights.push_back(score); max_state = ::max(max_state, original_final_states.back()); } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nOpenFST acceptor expects a line with 1 (final_state)," " 2 (final_state score), 3 (src_state dest_state label) " "or 4 (src_state dest_state label score) fields."; } } K2_CHECK(is.eof()); // Post processing on final states. If there are final state(s) in the // original FSA, we add the super final state as well as arc(s) from original // final state(s) to the super final state. Otherwise, the super final state // will be added by FsaFromArray1 (since there's no arc with label // kFinalSymbol). if (original_final_states.size() > 0) { K2_CHECK_EQ(original_final_states.size(), original_final_weights.size()); int32_t super_final_state = max_state + 1; state_to_arcs.resize(super_final_state); for (std::size_t i = 0; i != original_final_states.size(); ++i) { state_to_arcs[original_final_states[i]].emplace_back( original_final_states[i], super_final_state, -1, // kFinalSymbol original_final_weights[i]); ++num_arcs; } } // Move arcs from "state_to_arcs" to "arcs". int32_t arc_index = 0; arcs.resize(num_arcs); for (std::size_t s = 0; s < state_to_arcs.size(); ++s) { for (std::size_t a = 0; a < state_to_arcs[s].size(); ++a) { K2_CHECK_GT(num_arcs, arc_index); arcs[arc_index] = state_to_arcs[s][a]; ++arc_index; } } K2_CHECK_EQ(num_arcs, arc_index); bool error = true; Array1<Arc> array(GetCpuContext(), arcs); // FsaFromArray1 will add a super final state if the original FSA doesn't have // a final state. auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } /* Create a transducer from a stream, assuming the transducer is in the OpenFST format: src_state1 dest_state1 label1 aux_label1 score1 src_state2 dest_state2 label2 aux_label2 score2 ... ... final_state final_score We will negate the cost/score when we read them in. Also note, OpenFST may omit the cost/score if it is 0.0. We always create the super final state. If there are final state(s) in the original FST, then we add arc(s) from the original final state(s) to the super final state, with the (negated) old final state cost/score as its cost/score, -1 as its label and -1 as its aux_label. @param [in] is The input stream that contains the transducer. @return It returns an Fsa on CPU. */ static Fsa OpenFstTransducerFromStream(std::istringstream &is, Array1<int32_t> *aux_labels) { NVTX_RANGE(K2_FUNC); K2_CHECK(aux_labels != nullptr); std::vector<std::vector<int32_t>> state_to_aux_labels; // indexed by states std::vector<std::vector<Arc>> state_to_arcs; // indexed by states std::vector<int32_t> aux_labels_internal; std::vector<Arc> arcs; std::vector<std::string> splits; std::string line; int32_t max_state = -1; int32_t num_arcs = 0; std::vector<int32_t> original_final_states; std::vector<float> original_final_weights; while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line auto num_fields = splits.size(); if (num_fields == 4u || num_fields == 5u) { // 0 1 2 3 // src_state dest_state label aux_label // // or // // 0 1 2 3 4 // src_state dest_state label aux_label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); int32_t aux_label = StringToInt(splits[3]); float score = 0.0f; if (num_fields == 5u) score = -1.0f * StringToFloat(splits[4]); // Add the arc to "state_to_arcs", and aux_label to "state_to_aux_labels" ++num_arcs; max_state = ::max(max_state, ::max(src_state, dest_state)); if (static_cast<int32_t>(state_to_arcs.size()) <= src_state) { state_to_arcs.resize(src_state + 1); state_to_aux_labels.resize(src_state + 1); } state_to_arcs[src_state].emplace_back(src_state, dest_state, symbol, score); state_to_aux_labels[src_state].push_back(aux_label); } else if (num_fields == 1u || num_fields == 2u) { // 0 // final_state // // or // // 0 1 // final_state score // There could be multiple final states, so we first have to collect all // the final states, and then work out the super final state. float score = 0.0f; if (num_fields == 2u) score = -1.0f * StringToFloat(splits[1]); original_final_states.push_back(StringToInt(splits[0])); original_final_weights.push_back(score); max_state = ::max(max_state, original_final_states.back()); } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nOpenFST transducer expects a line with " "1 (final_state), 2 (final_state score), " "4 (src_state dest_state label aux_label) or " "5 (src_state dest_state label aux_label score) fields."; } } K2_CHECK(is.eof()); // Post processing on final states. If there are final state(s) in the // original FST, we add the super final state as well as arc(s) from original // final state(s) to the super final state. Otherwise, the super final state // will be added by FsaFromArray1 (since there's no arc with label // kFinalSymbol). if (original_final_states.size() > 0) { K2_CHECK_EQ(original_final_states.size(), original_final_weights.size()); int32_t super_final_state = max_state + 1; state_to_arcs.resize(super_final_state); state_to_aux_labels.resize(super_final_state); for (std::size_t i = 0; i != original_final_states.size(); ++i) { state_to_arcs[original_final_states[i]].emplace_back( original_final_states[i], super_final_state, -1, // kFinalSymbol original_final_weights[i]); state_to_aux_labels[original_final_states[i]].push_back( -1); // kFinalSymbol ++num_arcs; } } // Move arcs from "state_to_arcs" to "arcs", and aux_labels from // "state_to_aux_labels" to "aux_labels_internal" int32_t arc_index = 0; arcs.resize(num_arcs); aux_labels_internal.resize(num_arcs); K2_CHECK_EQ(state_to_arcs.size(), state_to_aux_labels.size()); for (std::size_t s = 0; s < state_to_arcs.size(); ++s) { K2_CHECK_EQ(state_to_arcs[s].size(), state_to_aux_labels[s].size()); for (std::size_t a = 0; a < state_to_arcs[s].size(); ++a) { K2_CHECK_GT(num_arcs, arc_index); arcs[arc_index] = state_to_arcs[s][a]; aux_labels_internal[arc_index] = state_to_aux_labels[s][a]; ++arc_index; } } K2_CHECK_EQ(num_arcs, arc_index); auto cpu_context = GetCpuContext(); *aux_labels = Array1<int32_t>(cpu_context, aux_labels_internal); Array1<Arc> array(cpu_context, arcs); bool error = true; // FsaFromArray1 will add a super final state if the original FSA doesn't have // a final state. auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } Fsa FsaFromString(const std::string &s, bool openfst /*= false*/, Array1<int32_t> *aux_labels /*= nullptr*/) { NVTX_RANGE(K2_FUNC); std::istringstream is(s); K2_CHECK(is); if (openfst == false && aux_labels == nullptr) return K2AcceptorFromStream(is); else if (openfst == false && aux_labels != nullptr) return K2TransducerFromStream(is, aux_labels); else if (openfst == true && aux_labels == nullptr) return OpenFstAcceptorFromStream(is); else if (openfst == true && aux_labels != nullptr) return OpenFstTransducerFromStream(is, aux_labels); return Fsa(); // unreachable code } std::string FsaToString(const Fsa &fsa, bool openfst /*= false*/, const Array1<int32_t> *aux_labels /*= nullptr*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsa.NumAxes(), 2); if (fsa.Context()->GetDeviceType() != kCpu) { Fsa _fsa = fsa.To(GetCpuContext()); Array1<int32_t> _aux_labels; if (aux_labels) _aux_labels = aux_labels->To(_fsa.Context()); return FsaToString(_fsa, openfst, aux_labels ? &_aux_labels : nullptr); } K2_CHECK_EQ(fsa.Context()->GetDeviceType(), kCpu); const Array1<int32_t> &row_splits = fsa.shape.RowSplits(1); const Array1<Arc> &arcs = fsa.values; const int32_t *p = nullptr; if (aux_labels != nullptr) { K2_CHECK(IsCompatible(fsa, *aux_labels)); K2_CHECK_EQ(aux_labels->Dim(), arcs.Dim()); p = aux_labels->Data(); } float scale = 1; if (openfst) scale = -1; std::ostringstream os; int32_t n = arcs.Dim(); char sep = ' '; char line_sep = '\n'; for (int32_t i = 0; i != n; ++i) { const auto &arc = arcs[i]; os << arc.src_state << sep << arc.dest_state << sep << arc.label << sep; if (p != nullptr) os << p[i] << sep; os << (scale * arc.score) << line_sep; } os << (fsa.shape.Dim0() - 1) << line_sep; return os.str(); } Array1<int32_t> GetDestStates(FsaVec &fsas, bool as_idx01) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_arcs = fsas.NumElements(); Array1<int32_t> ans(c, num_arcs); const Arc *arcs_data = fsas.values.Data(); int32_t *ans_data = ans.Data(); if (!as_idx01) { K2_EVAL( c, num_arcs, lambda_set_dest_states1, (int32_t arc_idx012) { ans_data[arc_idx012] = arcs_data[arc_idx012].dest_state; }); } else { const int32_t *row_ids2 = fsas.RowIds(2).Data(); K2_EVAL( c, num_arcs, lambda_set_dest_states01, (int32_t arc_idx012) { int32_t src_state = arcs_data[arc_idx012].src_state, dest_state = arcs_data[arc_idx012].dest_state; // (row_ids2[arc_idx012] - src_state) is the same as // row_splits1[row_ids1[row_ids2[arc_idx012]]]; it's the idx01 of the // 1st state in this FSA. ans_data[arc_idx012] = dest_state + (row_ids2[arc_idx012] - src_state); }); } return ans; } Ragged<int32_t> GetStateBatches(FsaVec &fsas, bool transpose) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); Array1<int32_t> arc_dest_states = GetDestStates(fsas, true); MonotonicLowerBound(arc_dest_states, &arc_dest_states); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); // We can tune `log_power` as a tradeoff between work done and clock time on // GPU. int32_t log_power = (c->GetDeviceType() == kCpu ? 0 : 4); int32_t max_num_states = fsas.shape.MaxSize(1); // the following avoids doing too much extra work accumulating powers // of 'dest_states' for very small problem sizes. while (log_power > 0 && (1 << (1 + log_power)) > max_num_states) log_power--; // Ignoring edge effects: `dest_states_powers[0]` is just an array indexed by // state_idx01, that gives us the dest_state_idx01 that would be the beginning // of the next batch if state_idx01 were the beginning of the current batch. // So if we follow this chain forward from the start of one of the FSAs until // it passes the end of this FSA, we get the beginnings of the batches // we want. The natural algorithm to find the beginnings of the batches // is sequential. Array2<int32_t> dest_states_powers(c, log_power + 1, num_states); const int32_t *arc_dest_states_data = arc_dest_states.Data(), *fsas_row_splits2_data = fsas.RowSplits(2).Data(); int32_t *dest_states_power_data = dest_states_powers.Data(); // only process Row[0] below const int32_t int_max = std::numeric_limits<int32_t>::max(); K2_EVAL( c, num_states, lambda_set_dest_states, (int32_t state_idx01)->void { int32_t arc_idx01x = fsas_row_splits2_data[state_idx01]; // If this state has arcs, let its `dest_state` be the smallest // `dest_state` of any of its arcs (which is the first element of those // arcs' dest states in `arc_dest_states_data`); otherwise, take the // `dest_state` from the 1st arc of the next state, which is the largest // value we can take (if the definition is: the highest-numbered state s // for which neither this state nor any later-numbered state has an arc // to a state lower than s). // if this state has arcs, // arc_idx01x is the first arc index of this state, we get the // smallest dest state of this state's arcs using // arc_dest_states_data[arc_idx01x] // else // arc_idx01x is the first arc index of the next state, then // arc_dest_states_data[arc_idx01x] is the largest value we can take, // which is also the smallest dest state in the next state. int32_t dest_state = (arc_idx01x < num_arcs ? arc_dest_states_data[arc_idx01x] : int_max); dest_states_power_data[state_idx01] = dest_state; // if the following fails, it's either a code error or the input FSA had // cycles. K2_CHECK_GT(dest_state, state_idx01); }); // `num_batches_per_fsa` will be set to the number of batches of states that // we'll use for each FSA... it corresponds to the number of times we have // to follow links forward in the dest_states array till we pass the // end of the array for this fSA. Array1<int32_t> num_batches_per_fsa(c, num_fsas + 1, 0); // `batch_starts` will contain the locations of the first state_idx01 for each // batch, but in an 'un-consolidated' format. Specifically, for FSA with // index i, the batch_starts for that FSA begin at element fsa.RowSplits(1)[i] // of `batch_starts`. This is just a convenient layout because we know there // can't be more batches than there are states. We'll later consolidate the // information into a single array. Array1<int32_t> batch_starts(c, num_states + 1); int32_t *num_batches_per_fsa_data = num_batches_per_fsa.Data(), *batch_starts_data = batch_starts.Data(); const int32_t *fsas_row_splits1_data = fsas.RowSplits(1).Data(); #if 0 // This is a simple version of the kernel that demonstrates what we're trying // to do with the more complex code. K2_EVAL( c, num_fsas, lambda_set_batch_info_simple, (int32_t fsa_idx) { int32_t begin_state_idx01 = fsas_row_splits1_data[fsa_idx], end_state_idx01 = fsas_row_splits1_data[fsa_idx + 1]; int32_t i = 0, cur_state_idx01 = begin_state_idx01; while (cur_state_idx01 < end_state_idx01) { batch_starts_data[begin_state_idx01 + i] = cur_state_idx01; cur_state_idx01 = dest_states_power_data[cur_state_idx01]; ++i; } num_batches_per_fsa_data[fsa_idx] = i; }); #else int32_t stride = dest_states_powers.ElemStride0(); for (int32_t power = 1; power <= log_power; power++) { const int32_t *src_data = dest_states_powers.Data() + (power - 1) * stride; int32_t *dest_data = dest_states_powers.Data() + power * stride; K2_EVAL( c, num_states, lambda_square_array, (int32_t state_idx01)->void { int32_t dest_state = src_data[state_idx01], dest_state_sq = (dest_state < num_states ? src_data[dest_state] : int_max); dest_data[state_idx01] = dest_state_sq; }); } // jobs_per_fsa tells us how many separate chains of states we'll follow for // each FSA. // jobs_multiple is a kind of trick to ensure any given warp doesn't // issue more memory requests than it can handle at a time (we drop // some threads). int32_t jobs_per_fsa = (1 << log_power), jobs_multiple = (c->GetDeviceType() == kCuda ? 8 : 1); while (jobs_multiple > 1 && jobs_per_fsa * jobs_multiple * num_fsas > 10000) jobs_multiple /= 2; // Likely won't get here. Just reduce multiple if // num-jobs is ridiculous. auto dest_states_powers_acc = dest_states_powers.Accessor(); K2_EVAL2( c, num_fsas, jobs_per_fsa * jobs_multiple, lambda_set_batch_info, (int32_t fsa_idx, int32_t j) { if (j % jobs_multiple != 0) return; // a trick to avoid too much random // memory access for any given warp int32_t task_idx = j / jobs_multiple; // Now 0 <= task_idx < jobs_per_fsa. // The task indexed `task_idx` is responsible for batches numbered // task_idx, task_idx + jobs_per_fsa, task_index + 2 * job_per_fsa and // so on, for the FSA numbered `fsa_idx`. Comparing this code to // `lambda_set_batch_info_simple`, this task is responsible for the // assignment to batch_starts_data for all i such that i % jobs_per_fsas // == task_idx, together with the assignment to // num_batchess_per_fsa_data if // i % jobs_per_fsas == task_idx (here referring to the i value finally // assigned to that location). int32_t begin_state_idx01 = fsas_row_splits1_data[fsa_idx], end_state_idx01 = fsas_row_splits1_data[fsa_idx + 1]; int32_t num_states_this_fsa = end_state_idx01 - begin_state_idx01; int32_t i = 0, cur_state_idx01 = begin_state_idx01; if (task_idx >= num_states_this_fsa) return; // The next loop advances `cur_state_idx01` by // a number of steps equal to `task_idx`. for (int32_t m = 0; m < log_power; ++m) { int32_t n = 1 << m; if ((task_idx & n) != 0) { i += n; int32_t next = dest_states_powers_acc(m, cur_state_idx01); if (next >= end_state_idx01) return; cur_state_idx01 = next; } } K2_CHECK_EQ(i, task_idx); while (1) { if (i >= num_states_this_fsa) return; batch_starts_data[begin_state_idx01 + i] = cur_state_idx01; int32_t next_state_idx01 = dest_states_powers_acc( log_power, cur_state_idx01); // advance jobs_per_fsa = (1 << log_power) // steps if (next_state_idx01 >= end_state_idx01) { // if exactly one step would also be enough to take us past the // boundary... if (dest_states_powers_acc(0, cur_state_idx01) >= end_state_idx01) { num_batches_per_fsa_data[fsa_idx] = i + 1; } return; } else { i += jobs_per_fsa; cur_state_idx01 = next_state_idx01; } } }); #endif ExclusiveSum(num_batches_per_fsa, &num_batches_per_fsa); Array1<int32_t> &ans_row_splits1 = num_batches_per_fsa; int32_t num_batches = num_batches_per_fsa[num_fsas]; Array1<int32_t> ans_row_ids1(c, num_batches); RowSplitsToRowIds(ans_row_splits1, &ans_row_ids1); Array1<int32_t> ans_row_splits2(c, num_batches + 1); const int32_t *ans_row_splits1_data = ans_row_splits1.Data(), *ans_row_ids1_data = ans_row_ids1.Data(); int32_t *ans_row_splits2_data = ans_row_splits2.Data(); ans_row_splits2.Range(num_batches, 1) = num_states; // The kernel below won't // set this last element K2_EVAL( c, num_batches, lambda_set_ans_row_splits2, (int32_t idx01)->void { int32_t idx0 = ans_row_ids1_data[idx01], // Fsa index idx0x = ans_row_splits1_data[idx0], idx1 = idx01 - idx0x, fsas_idx0x = fsas_row_splits1_data[idx0]; // 1st state-idx (idx01) // in fsas_, for this FSA int32_t fsas_idx01 = fsas_idx0x + idx1; // the idx1 is actually the // batch-index, this statement // reflects the 'un-consolidated' // format of `batch_starts`. int32_t this_batch_start = batch_starts_data[fsas_idx01]; ans_row_splits2_data[idx01] = this_batch_start; }); RaggedShape ans_shape = RaggedShape3(&ans_row_splits1, &ans_row_ids1, num_batches, &ans_row_splits2, nullptr, num_states); Array1<int32_t> ans_value = Range(c, num_states, 0); if (transpose) { ans_shape = MakeTransposable(ans_shape); Ragged<int32_t> ans(ans_shape, ans_value); return Transpose(ans); } else { return Ragged<int32_t>(ans_shape, ans_value); } } Ragged<int32_t> GetIncomingArcs(FsaVec &fsas, const Array1<int32_t> &dest_states) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK(IsCompatible(fsas, dest_states)); ContextPtr &c = fsas.Context(); Ragged<int32_t> dest_states_tensor(fsas.shape, dest_states); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); Array1<int32_t> incoming_arcs_order = GetTransposeReordering(dest_states_tensor, num_states), ans_row_ids2 = dest_states[incoming_arcs_order]; // Note: incoming_arcs_row_ids2 will be monotonically increasing Array1<int32_t> ans_row_splits2(c, num_states + 1); RowIdsToRowSplits(ans_row_ids2, &ans_row_splits2); // Axis 1 corresponds to FSA states, so the row-ids and row-splits for axis // 1 are the same as for `fsas`. Array1<int32_t> ans_row_ids1 = fsas.RowIds(1), ans_row_splits1 = fsas.RowSplits(1); return Ragged<int32_t>( RaggedShape3(&ans_row_splits1, &ans_row_ids1, num_states, &ans_row_splits2, &ans_row_ids2, num_arcs), incoming_arcs_order); } Ragged<int32_t> GetLeavingArcIndexBatches(FsaVec &fsas, Ragged<int32_t> &state_batches) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); K2_DCHECK(state_batches.TotSize(1) == num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); // get ans_shape Array1<int32_t> ans_row_splits3(c, num_states + 1); int32_t *ans_row_splits3_data = ans_row_splits3.Data(); const int32_t *fsa_states_row_splits_data = fsas.RowSplits(2).Data(); const int32_t *batch_states_data = state_batches.values.Data(); K2_EVAL( c, num_states, lambda_set_ans_row_splits3, (int32_t idx) { int32_t state_idx = batch_states_data[idx]; ans_row_splits3_data[idx] = fsa_states_row_splits_data[state_idx + 1] - fsa_states_row_splits_data[state_idx]; }); ExclusiveSum(ans_row_splits3, &ans_row_splits3); Array1<int32_t> ans_row_ids3(c, num_arcs); RowSplitsToRowIds(ans_row_splits3, &ans_row_ids3); RaggedShape ans_shape = ComposeRaggedShapes( state_batches.shape, RaggedShape2(&ans_row_splits3, &ans_row_ids3, num_arcs)); // get ans_values Array1<int32_t> ans_values(c, num_arcs); int32_t *ans_values_data = ans_values.Data(); const int32_t *ans_row_ids3_data = ans_row_ids3.Data(); K2_EVAL( c, num_arcs, lambda_set_ans_values, (int32_t idx0123) { int32_t ans_idx012 = ans_row_ids3_data[idx0123]; int32_t state_idx = batch_states_data[ans_idx012]; // state_idx is idx01 in fsas int32_t fsa_idx01x = fsa_states_row_splits_data[state_idx]; // ans_idx3 is fsas_idx2, i.e. the arc idx in a state int32_t ans_idx3 = idx0123 - ans_row_splits3_data[ans_idx012]; ans_values_data[idx0123] = fsa_idx01x + ans_idx3; }); return Ragged<int32_t>(ans_shape, ans_values); } Ragged<int32_t> GetEnteringArcIndexBatches(FsaVec &fsas, Ragged<int32_t> &incoming_arcs, Ragged<int32_t> &state_batches) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK(IsCompatible(fsas, incoming_arcs)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(incoming_arcs.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); // just using DCHECK below to save time in production code K2_DCHECK(state_batches.TotSize(1) == num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(incoming_arcs.Dim0(), num_fsas); K2_DCHECK_EQ(incoming_arcs.TotSize(1), num_states); K2_DCHECK_EQ(incoming_arcs.NumElements(), num_arcs); // get ans_shape Array1<int32_t> ans_row_splits3(c, num_states + 1); int32_t *ans_row_splits3_data = ans_row_splits3.Data(); const int32_t *incoming_arcs_row_splits_data = incoming_arcs.RowSplits(2).Data(); const int32_t *batch_states_data = state_batches.values.Data(); K2_EVAL( c, num_states, lambda_set_ans_row_splits3, (int32_t idx) { int32_t state_idx = batch_states_data[idx]; ans_row_splits3_data[idx] = incoming_arcs_row_splits_data[state_idx + 1] - incoming_arcs_row_splits_data[state_idx]; }); ExclusiveSum(ans_row_splits3, &ans_row_splits3); Array1<int32_t> ans_row_ids3(c, num_arcs); RowSplitsToRowIds(ans_row_splits3, &ans_row_ids3); RaggedShape ans_shape = ComposeRaggedShapes( state_batches.shape, RaggedShape2(&ans_row_splits3, &ans_row_ids3, num_arcs)); // get ans_values Array1<int32_t> ans_values(c, num_arcs); int32_t *ans_values_data = ans_values.Data(); const int32_t *ans_row_ids3_data = ans_row_ids3.Data(); const int32_t *incoming_arcs_data = incoming_arcs.values.Data(); K2_EVAL( c, num_arcs, lambda_set_ans_values, (int32_t idx0123) { int32_t ans_idx012 = ans_row_ids3_data[idx0123]; int32_t state_idx = batch_states_data[ans_idx012]; // state_idx is idx01 in // incoming_arcs int32_t incoming_arcs_idx01x = incoming_arcs_row_splits_data[state_idx]; // ans_idx3 is incoming_arcs_idx2, i.e. the entering arc idx for a state int32_t ans_idx3 = idx0123 - ans_row_splits3_data[ans_idx012]; int32_t incoming_arcs_idx012 = incoming_arcs_idx01x + ans_idx3; ans_values_data[idx0123] = incoming_arcs_data[incoming_arcs_idx012]; }); return Ragged<int32_t>(ans_shape, ans_values); } FsaVec ConvertDenseToFsaVec(DenseFsaVec &src) { NVTX_RANGE(K2_FUNC); ContextPtr &c = src.shape.Context(); // caution: 'num_symbols' is the number of symbols excluding the final-symbol // -1. int32_t num_fsas = src.shape.Dim0(), num_symbols = src.scores.Dim1() - 1; // the "1" is the extra state per FSA we need in the FsaVec format, // for the final-state. RaggedShape fsa2state = ChangeSublistSize(src.shape, 1); // again, the "+num_fsas" below is the extra state per FSA we need in the // FsaVec format, for the final-state. int32_t num_states = src.shape.NumElements() + num_fsas; // The explanation num-arcs below is as follows: // Firstly, all rows of src.scores (==all elements of src.shape) correspond // to states with arcs leaving them. Most of them have `num_symbols` arcs, // but the final one for each FSA has 1 arc (with symbol -1) int32_t num_arcs = src.shape.NumElements() * num_symbols - (num_symbols - 1) * num_fsas; Array1<int32_t> row_splits2(c, num_states + 1), row_ids2(c, num_arcs); const int32_t *row_ids1_data = fsa2state.RowIds(1).Data(), *src_row_ids1_data = src.shape.RowIds(1).Data(), *src_row_splits1_data = src.shape.RowSplits(1).Data(); Array1<Arc> arcs(c, num_arcs); Arc *arcs_data = arcs.Data(); auto scores_acc = src.scores.Accessor(); int32_t *row_splits2_data = row_splits2.Data(), *row_ids2_data = row_ids2.Data(); // 0 <= s < num_symbols; note, `num_symbols` excludes the final-symbol (-1). // note: `src` means: w.r.t. the numbering in the original DenseFsaVec. K2_EVAL2( c, src.shape.NumElements(), num_symbols, lambda_set_arcs_etc, (int32_t src_state_idx01, int32_t s)->void { int32_t fsa_idx0 = src_row_ids1_data[src_state_idx01], src_state_idx0x = src_row_splits1_data[fsa_idx0], state_idx1 = src_state_idx01 - src_state_idx0x, src_next_state_idx0x = src_row_splits1_data[fsa_idx0 + 1], src_num_states1 = src_next_state_idx0x - src_state_idx0x, ans_state_idx01 = src_state_idx01 + fsa_idx0; // we add one final-state per FSA.. // "+ fsa_idx0" gives the // difference from old->new // numbering. // arc_idx0xx is the 1st arc-index of the FSA we are creating.. each // source state has `num_symbols` arcs leaving it except the last one of // each FSA, which has 1 arc leaving it (to the final-state). int32_t arc_idx0xx = (src_state_idx0x * num_symbols) - fsa_idx0 * (num_symbols - 1), arc_idx01x = arc_idx0xx + (state_idx1 * num_symbols), arc_idx012 = arc_idx01x + s; int32_t symbol_offset; if (state_idx1 + 1 == src_num_states1) { symbol_offset = -1; if (s > 0) return; // we just need the arc with -1. // if this is the state before the final state of this FSA. it has the // responsibility to write the row_splits2 value for the final state. // It's arc_idx012 + 1; the "+1" corresponds to the single arc with // the final-symbol on it. row_splits2_data[ans_state_idx01 + 1] = arc_idx012 + 1; } else { symbol_offset = 0; } // the "+ 1" is because index 0 in `scores` is for the final-symbol -1, // then 0, 1, etc. int32_t symbol_index_in_scores = s + symbol_offset + 1; arcs_data[arc_idx012] = Arc(state_idx1, state_idx1 + 1, s + symbol_offset, scores_acc(src_state_idx01, symbol_index_in_scores)); row_ids2_data[arc_idx012] = ans_state_idx01; if (s == 0) { // 1st arc for this state. row_splits2_data[ans_state_idx01] = arc_idx012; K2_CHECK(row_ids1_data[ans_state_idx01] == fsa_idx0); if (src_state_idx01 == 0) row_splits2_data[num_states] = num_arcs; } }); RaggedShape state2arc = RaggedShape2(&row_splits2, &row_ids2, num_arcs); return Ragged<Arc>(ComposeRaggedShapes(fsa2state, state2arc), arcs); } template <typename FloatType> Array1<FloatType> GetForwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, Array1<int32_t> *entering_arcs) { NVTX_RANGE(K2_FUNC); K2_STATIC_ASSERT((std::is_same<float, FloatType>::value || std::is_same<double, FloatType>::value)); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK(IsCompatible(fsas, entering_arc_batches)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); K2_CHECK_EQ(entering_arc_batches.NumAxes(), 4); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); // just using DCHECK below to save time in production code K2_DCHECK(state_batches.TotSize(1) == num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(entering_arc_batches.Dim0(), num_batches); K2_DCHECK_EQ(entering_arc_batches.TotSize(1), state_batches.TotSize(1)); K2_DCHECK_EQ(entering_arc_batches.TotSize(2), num_states); K2_DCHECK_EQ(entering_arc_batches.NumElements(), num_arcs); FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); Array1<FloatType> state_scores(c, num_states, negative_infinity); FloatType *state_scores_data = state_scores.Data(); // set the score of start state in each fsa to be 0 const int32_t *fsa_row_splits1 = fsas.RowSplits(1).Data(); K2_EVAL( c, num_fsas, lambda_set_start_state_score, (int32_t fsa_idx) { int32_t start_state = fsa_row_splits1[fsa_idx], start_state_next_fsa = fsa_row_splits1[fsa_idx + 1]; if (start_state_next_fsa - start_state > 0) state_scores_data[start_state] = 0; }); // get the 1st entering arc index in each batch, +1 so we can get the number // of entering arcs in each batch by taking the difference of adjacent // elements Array1<int32_t> entering_arc_start_index(c, num_batches + 1); int32_t *entering_arc_start_index_data = entering_arc_start_index.Data(); const int32_t *arc_batches_row_splits1 = entering_arc_batches.RowSplits(1).Data(); const int32_t *arc_batches_row_splits2 = entering_arc_batches.RowSplits(2).Data(); const int32_t *arc_batches_row_splits3 = entering_arc_batches.RowSplits(3).Data(); K2_EVAL( c, num_batches, lambda_set_entering_arc_start_index, (int32_t batch_idx) { int32_t this_state_idx0xx = arc_batches_row_splits2[batch_idx * num_fsas]; int32_t this_arc_idx0xxx = arc_batches_row_splits3[this_state_idx0xx]; entering_arc_start_index_data[batch_idx] = this_arc_idx0xxx; if (batch_idx == num_batches - 1) { // process the last element int32_t next_state_idx0xx = arc_batches_row_splits2[num_batches * num_fsas]; int32_t next_arc_idx0xxx = arc_batches_row_splits3[next_state_idx0xx]; entering_arc_start_index_data[num_batches] = next_arc_idx0xxx; } }); const int32_t *arc_batches_row_ids1 = entering_arc_batches.RowIds(1).Data(); const int32_t *arc_batches_row_ids2 = entering_arc_batches.RowIds(2).Data(); const int32_t *arc_batches_row_ids3 = entering_arc_batches.RowIds(3).Data(); const int32_t *entering_arc_ids = entering_arc_batches.values.Data(); const int32_t *states_data = state_batches.values.Data(); const Arc *arcs = fsas.values.Data(); Array1<FloatType> entering_arc_score_values( c, num_arcs); // entering arc_scores in batches FloatType *arc_scores_data = entering_arc_score_values.Data(); // copy entering_arc_start_index to cpu as we will access its elements in // below Eval function for `lambda_set_entering_arc_scores` Array1<int32_t> cpu_entering_arc_start_index = entering_arc_start_index.To(GetCpuContext()); const int32_t *cpu_entering_arc_start = cpu_entering_arc_start_index.Data(); // copy the index of start state in each fsa to CPU Array1<int32_t> &arc_batches_row_splits1_array = entering_arc_batches.RowSplits(1); Array1<int32_t> arc_batches_row_splits12_cpu = entering_arc_batches.RowSplits(2)[arc_batches_row_splits1_array].To( GetCpuContext()); K2_CHECK_EQ(arc_batches_row_splits12_cpu.Dim(), num_batches + 1); const int32_t *arc_batches_row_splits12_cpu_data = arc_batches_row_splits12_cpu.Data(); Array1<int32_t> arc_row_splits_mem(c, num_states + 1); Array1<FloatType> score_cache(c, num_states + 1); int32_t *entering_arcs_data = nullptr; if (entering_arcs) { K2_CHECK_EQ(log_semiring, false) << " entering_arcs supplied"; *entering_arcs = Array1<int32_t>(c, num_states, -1); entering_arcs_data = entering_arcs->Data(); } // process batch sequentially. for (int32_t i = 0; i < num_batches; ++i) { // get the range we would call Max/LogSum per sub list int32_t this_state_idx0xx = arc_batches_row_splits12_cpu_data[i], next_state_idx0xx = arc_batches_row_splits12_cpu_data[i + 1]; K2_CHECK_LT(this_state_idx0xx, num_states); K2_CHECK_LE(next_state_idx0xx, num_states); int32_t num_states_this_batch = next_state_idx0xx - this_state_idx0xx; K2_CHECK_LT(num_states_this_batch, arc_row_splits_mem.Dim()); // we always use the first `num_states_this_batch` elements in // arc_row_splits_mem. Array1<int32_t> arc_row_splits_part = arc_row_splits_mem.Range( 0, num_states_this_batch + 1); // +1 for the last element int32_t num_arcs_this_batch = cpu_entering_arc_start[i + 1] - cpu_entering_arc_start[i]; { ParallelRunner pr(c); // get entering arc scores { With w(pr.NewStream()); K2_EVAL( c, num_arcs_this_batch, lambda_set_entering_arc_score, (int32_t idx123) { // all idx** in below code are the indexes to entering_arc_batches int32_t idx0123 = entering_arc_start_index_data[i] + idx123; int32_t idx012 = arc_batches_row_ids3[idx0123]; int32_t idx01 = arc_batches_row_ids2[idx012]; K2_CHECK_EQ(idx01 / num_fsas, i); // idx01/num_fsas is batch_id int32_t fsa_id = idx01 % num_fsas; int32_t entering_arc_id = entering_arc_ids[idx0123]; float curr_arc_score = arcs[entering_arc_id].score; int32_t src_state_idx1 = arcs[entering_arc_id].src_state; int32_t src_state_idx01 = fsa_row_splits1[fsa_id] + src_state_idx1; arc_scores_data[idx0123] = state_scores_data[src_state_idx01] + curr_arc_score; }); } { With w(pr.NewStream()); // make entering arc row splits info in each batch starting from zero, // we will use it to call MaxPerSublist or LogSumPerSubList int32_t *sum_splits_data = arc_row_splits_part.Data(); K2_EVAL( c, num_states_this_batch + 1, lambda_set_row_splits_for_sum, (int32_t idx) { sum_splits_data[idx] = arc_batches_row_splits3[idx + this_state_idx0xx] - arc_batches_row_splits3[this_state_idx0xx]; }); } } int32_t this_arc_idx0xxx = cpu_entering_arc_start[i]; Array1<FloatType> sub_scores_values = entering_arc_score_values.Range(this_arc_idx0xxx, num_arcs_this_batch); RaggedShape sub_scores_shape = RaggedShape2(&arc_row_splits_part, nullptr, sub_scores_values.Dim()); Ragged<FloatType> sub_scores(sub_scores_shape, sub_scores_values); // we always use the first num_rows elements in score_cache. Array1<FloatType> sub_state_scores = score_cache.Range(0, num_states_this_batch); // get scores per state in this batch if (log_semiring) { LogSumPerSublist(sub_scores, negative_infinity, &sub_state_scores); } else { MaxPerSublist(sub_scores, negative_infinity, &sub_state_scores); if (entering_arcs_data != nullptr) { FloatType *sub_state_scores_data = sub_state_scores.Data(), *sub_scores_data = sub_scores.values.Data(); int32_t *sub_scores_row_ids_data = sub_scores.RowIds(1).Data(); const int32_t *sub_state_ids_data = states_data + this_state_idx0xx, *sub_entering_arc_ids_data = entering_arc_ids + this_arc_idx0xxx; // arc_idx01 below is an index into sub_scores, it is also an arc_idx123 // into entering_arc_batches. K2_EVAL( c, sub_scores.NumElements(), lambda_set_entering_arcs, (int32_t arc_idx01) { // state_idx0 below is idx0 into `sub_scores`, also an index into // `sub_scores`. int32_t state_idx0 = sub_scores_row_ids_data[arc_idx01]; if (sub_scores_data[arc_idx01] == sub_state_scores_data[state_idx0]) { int32_t fsas_state_idx01 = sub_state_ids_data[state_idx0], fsas_entering_arc_idx012 = sub_entering_arc_ids_data[arc_idx01]; // The following statement has a race condition if there is a // tie on scores, but this is OK and by design. It makes the // choice of traceback non-deterministic in these cases. entering_arcs_data[fsas_state_idx01] = fsas_entering_arc_idx012; } }); } } const FloatType *sub_state_scores_data = sub_state_scores.Data(); // Copy those scores to corresponding state in state_scores. // `state_idx12` is an idx12 w.r.t. state_batches and entering_arc_batches, // but an idx1 w.r.t. sub_scores and an index into the array // sub_state_scores. K2_EVAL( c, num_states_this_batch, lambda_copy_state_scores, (int32_t state_idx12) { int32_t batches_idx012 = this_state_idx0xx + state_idx12; int32_t fsas_state_idx01 = states_data[batches_idx012]; int32_t batches_idx01 = arc_batches_row_ids2[batches_idx012]; int32_t fsa_idx0 = batches_idx01 % num_fsas; int32_t start_state_idx01 = fsa_row_splits1[fsa_idx0]; // don't override score 0 in the start state in each fsa. if (fsas_state_idx01 != start_state_idx01) state_scores_data[fsas_state_idx01] = sub_state_scores_data[state_idx12]; }); } return state_scores; } template <typename FloatType> Array1<FloatType> GetBackwardScores( FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, const Array1<FloatType> *tot_scores /*= nullptr*/, bool log_semiring /*= true*/) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK(IsCompatible(fsas, leaving_arc_batches)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); K2_CHECK_EQ(leaving_arc_batches.NumAxes(), 4); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); K2_DCHECK(state_batches.TotSize(1) == num_fsas * num_batches); // just using DCHECK below to save time in production code K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(leaving_arc_batches.Dim0(), num_batches); K2_DCHECK_EQ(leaving_arc_batches.TotSize(1), state_batches.TotSize(1)); K2_DCHECK_EQ(leaving_arc_batches.TotSize(2), num_states); K2_DCHECK_EQ(leaving_arc_batches.NumElements(), num_arcs); FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); Array1<FloatType> state_scores(c, num_states, negative_infinity); FloatType *state_scores_data = state_scores.Data(); const int32_t *fsa_row_splits1 = fsas.RowSplits(1).Data(); if (tot_scores != nullptr) { K2_CHECK(IsCompatible(fsas, *tot_scores)); K2_CHECK_EQ(tot_scores->Dim(), num_fsas); const FloatType *tot_scores_data = tot_scores->Data(); // set the score of final state in fsa i to be negative of tot_scores[i] K2_EVAL( c, num_fsas, lambda_set_final_state_score, (int32_t fsa_idx) { int32_t start_state = fsa_row_splits1[fsa_idx], start_state_next_fsa = fsa_row_splits1[fsa_idx + 1]; if (start_state_next_fsa - start_state > 0) { // We never set the score of a state to positive_infinity, otherwise // we may get NaN when add it with negative_infinity. But this // usually would not happen for a connected FSA. if (tot_scores_data[fsa_idx] != negative_infinity) { state_scores_data[start_state_next_fsa - 1] = -tot_scores_data[fsa_idx]; } else { state_scores_data[start_state_next_fsa - 1] = negative_infinity; } } }); } else { // set the score of final state in each fsa to be 0 K2_EVAL( c, num_fsas, lambda_set_final_state_score, (int32_t fsa_idx) { int32_t start_state = fsa_row_splits1[fsa_idx], start_state_next_fsa = fsa_row_splits1[fsa_idx + 1]; if (start_state_next_fsa - start_state > 0) state_scores_data[start_state_next_fsa - 1] = 0; }); } // get the 1st leaving arc index in each batch, +1 so we can get the number of // leaving arcs in each batch by taking the difference of adjacent elements Array1<int32_t> leaving_arc_start_index(c, num_batches + 1); int32_t *leaving_arc_start_index_data = leaving_arc_start_index.Data(); const int32_t *arc_batches_row_splits1 = leaving_arc_batches.RowSplits(1).Data(); const int32_t *arc_batches_row_splits2 = leaving_arc_batches.RowSplits(2).Data(); const int32_t *arc_batches_row_splits3 = leaving_arc_batches.RowSplits(3).Data(); K2_EVAL( c, num_batches, lambda_set_leaving_arc_start_index, (int32_t batch_idx) { int32_t this_state_idx0xx = arc_batches_row_splits2[batch_idx * num_fsas]; int32_t this_arc_idx0xxx = arc_batches_row_splits3[this_state_idx0xx]; leaving_arc_start_index_data[batch_idx] = this_arc_idx0xxx; if (batch_idx == num_batches - 1) { // process the last element int32_t next_state_idx0xx = arc_batches_row_splits2[num_batches * num_fsas]; int32_t next_arc_idx0xxx = arc_batches_row_splits3[next_state_idx0xx]; leaving_arc_start_index_data[num_batches] = next_arc_idx0xxx; } }); const int32_t *arc_batches_row_ids1 = leaving_arc_batches.RowIds(1).Data(); const int32_t *arc_batches_row_ids2 = leaving_arc_batches.RowIds(2).Data(); const int32_t *arc_batches_row_ids3 = leaving_arc_batches.RowIds(3).Data(); const int32_t *leaving_arc_ids = leaving_arc_batches.values.Data(); const int32_t *states_data = state_batches.values.Data(); const Arc *arcs = fsas.values.Data(); Array1<FloatType> leaving_arc_score_values( c, num_arcs); // leaving arc_scores in batches FloatType *arc_scores_data = leaving_arc_score_values.Data(); // copy leaving_arc_start_index to cpu as we will access its elements in below // Eval function for `lambda_set_leaving_arc_scores` Array1<int32_t> cpu_leaving_arc_start_index = leaving_arc_start_index.To(GetCpuContext()); const int32_t *cpu_leaving_arc_start = cpu_leaving_arc_start_index.Data(); // copy the index of start state in each fsa to CPU Array1<int32_t> arc_batches_row_splits1_array = leaving_arc_batches.RowSplits(1); Array1<int32_t> arc_batches_row_splits12_cpu = leaving_arc_batches.RowSplits(2)[arc_batches_row_splits1_array].To( GetCpuContext()); K2_CHECK_EQ(arc_batches_row_splits12_cpu.Dim(), num_batches + 1); const int32_t *arc_batches_row_splits12_cpu_data = arc_batches_row_splits12_cpu.Data(); Array1<int32_t> arc_row_splits_mem(c, num_states + 1); Array1<FloatType> score_cache(c, num_states + 1); // process batch sequentially. for (int32_t i = num_batches - 1; i >= 0; --i) { // get the range we would call Max/LogSum per sub list int32_t this_state_idx0xx = arc_batches_row_splits12_cpu_data[i]; int32_t next_state_idx0xx = arc_batches_row_splits12_cpu_data[i + 1]; // the 1st state idx in the // next batch K2_CHECK_LT(this_state_idx0xx, num_states); K2_CHECK_LE(next_state_idx0xx, num_states); int32_t num_states_this_batch = next_state_idx0xx - this_state_idx0xx; K2_CHECK_LT(num_states_this_batch, arc_row_splits_mem.Dim()); // we always use the first `num_states_this_batch` elements in // arc_row_splits_mem. Array1<int32_t> arc_row_splits_part = arc_row_splits_mem.Range( 0, num_states_this_batch + 1); // +1 for the last element int32_t num_arcs_this_batch = cpu_leaving_arc_start[i + 1] - cpu_leaving_arc_start[i]; { ParallelRunner pr(c); // get leaving arc scores { With w(pr.NewStream()); K2_EVAL( c, num_arcs_this_batch, lambda_set_leaving_arc_score, (int32_t idx123) { // all idx** in below code are the indexes to leaving_arc_batches int32_t idx0123 = leaving_arc_start_index_data[i] + idx123; int32_t idx012 = arc_batches_row_ids3[idx0123]; int32_t idx01 = arc_batches_row_ids2[idx012]; K2_CHECK_EQ(idx01 / num_fsas, i); // idx01/num_fsas is batch_id int32_t fsa_id = idx01 % num_fsas; int32_t leaving_arc_id = leaving_arc_ids[idx0123]; float curr_arc_score = arcs[leaving_arc_id].score; int32_t dest_state_idx1 = arcs[leaving_arc_id].dest_state; int32_t dest_state_idx01 = fsa_row_splits1[fsa_id] + dest_state_idx1; arc_scores_data[idx0123] = state_scores_data[dest_state_idx01] + curr_arc_score; }); } { With w(pr.NewStream()); // make leaving arc row splits info in each batch starting from zero, // we will use it to call MaxPerSublist or LogSumPerSubList int32_t *sum_splits_data = arc_row_splits_part.Data(); K2_EVAL( c, num_states_this_batch + 1, lambda_set_row_splits_for_sum, (int32_t idx) { sum_splits_data[idx] = arc_batches_row_splits3[idx + this_state_idx0xx] - arc_batches_row_splits3[this_state_idx0xx]; }); } } int32_t this_arc_idx0xxx = cpu_leaving_arc_start[i]; Array1<FloatType> sub_scores_values = leaving_arc_score_values.Range(this_arc_idx0xxx, num_arcs_this_batch); RaggedShape sub_scores_shape = RaggedShape2(&arc_row_splits_part, nullptr, sub_scores_values.Dim()); Ragged<FloatType> sub_scores(sub_scores_shape, sub_scores_values); // we always use the first num_rows elements in score_cache. Array1<FloatType> sub_state_scores = score_cache.Range(0, num_states_this_batch); // get scores per state in this batch if (log_semiring) LogSumPerSublist(sub_scores, negative_infinity, &sub_state_scores); else MaxPerSublist(sub_scores, negative_infinity, &sub_state_scores); const FloatType *sub_state_scores_data = sub_state_scores.Data(); // copy those scores to corresponding state in state_scores K2_EVAL( c, num_states_this_batch, lambda_copy_state_scores, (int32_t idx2) { int32_t idx012 = this_state_idx0xx + idx2; int32_t state_idx012 = states_data[idx012]; int32_t idx01 = arc_batches_row_ids2[idx012]; int32_t fsa_id = idx01 % num_fsas; int32_t start_state = fsa_row_splits1[fsa_id], start_state_next_fsa = fsa_row_splits1[fsa_id + 1]; if (start_state_next_fsa - start_state > 0) { // non-empty fsa int32_t final_state_idx = start_state_next_fsa - 1; // don't override score in the final state in each fsa. if (state_idx012 != final_state_idx) state_scores_data[state_idx012] = sub_state_scores_data[idx2]; } }); } return state_scores; } template <typename FloatType> Array1<FloatType> GetTotScores(FsaVec &fsas, const Array1<FloatType> &forward_scores) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, forward_scores)); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1); K2_CHECK_EQ(num_states, forward_scores.Dim()); FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); Array1<FloatType> tot_scores(c, num_fsas, negative_infinity); FloatType *tot_scores_data = tot_scores.Data(); const int32_t *fsa_row_splits1 = fsas.RowSplits(1).Data(); const FloatType *forward_scores_data = forward_scores.Data(); K2_EVAL( c, num_fsas, lambda_copy_tot_scores, (int32_t fsa_idx) { int32_t start_state = fsa_row_splits1[fsa_idx], start_state_next_fsa = fsa_row_splits1[fsa_idx + 1]; if (start_state_next_fsa > start_state) { // non-empty fsa int32_t final_state_idx = start_state_next_fsa - 1; tot_scores_data[fsa_idx] = forward_scores_data[final_state_idx]; } }); return tot_scores; } template <typename FloatType> Array1<FloatType> GetArcScores(FsaVec &fsas, const Array1<FloatType> &forward_scores, const Array1<FloatType> &backward_scores) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, forward_scores)); K2_CHECK(IsCompatible(fsas, backward_scores)); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); K2_CHECK_EQ(num_states, forward_scores.Dim()); K2_CHECK_EQ(num_states, backward_scores.Dim()); Array1<FloatType> arc_scores(c, num_arcs); FloatType *arc_scores_data = arc_scores.Data(); const int32_t *fsa_row_splits1 = fsas.RowSplits(1).Data(); const int32_t *fsa_row_ids1 = fsas.RowIds(1).Data(); const int32_t *fsa_row_ids2 = fsas.RowIds(2).Data(); const Arc *arcs = fsas.values.Data(); const FloatType *forward_scores_data = forward_scores.Data(); const FloatType *backward_scores_data = backward_scores.Data(); K2_EVAL( c, num_arcs, lambda_get_arc_scores, (int32_t arc_idx012) { int32_t src_state_idx1 = arcs[arc_idx012].src_state; int32_t dest_state_idx1 = arcs[arc_idx012].dest_state; float arc_score = arcs[arc_idx012].score; int32_t idx01 = fsa_row_ids2[arc_idx012]; int32_t idx0 = fsa_row_ids1[idx01]; int32_t idx0x = fsa_row_splits1[idx0]; int32_t src_state_idx01 = idx0x + src_state_idx1; int32_t dest_state_idx01 = idx0x + dest_state_idx1; arc_scores_data[arc_idx012] = arc_score + forward_scores_data[src_state_idx01] + backward_scores_data[dest_state_idx01]; }); return arc_scores; } // explicit instantiation for those score computation functions above template Array1<float> GetForwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, Array1<int32_t> *entering_arcs); template Array1<double> GetForwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, Array1<int32_t> *entering_arcs); template Array1<float> GetBackwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, const Array1<float> *tot_scores, bool log_semiring); template Array1<double> GetBackwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, const Array1<double> *tot_scores, bool log_semiring); template Array1<float> GetArcScores(FsaVec &fsas, const Array1<float> &forward_scores, const Array1<float> &backward_scores); template Array1<double> GetArcScores(FsaVec &fsas, const Array1<double> &forward_scores, const Array1<double> &backward_scores); template Array1<float> GetTotScores(FsaVec &fsas, const Array1<float> &forward_scores); template Array1<double> GetTotScores(FsaVec &fsas, const Array1<double> &forward_scores); Fsa RandomFsa(bool acyclic /*=true*/, int32_t max_symbol /*=50*/, int32_t min_num_arcs /*=0*/, int32_t max_num_arcs /*=1000*/) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetCpuContext(); K2_CHECK_GE(min_num_arcs, 0); K2_CHECK_GE(max_num_arcs, min_num_arcs); K2_CHECK_GE(max_symbol, 0); RaggedShape shape = RandomRaggedShape(false, 2, 2, min_num_arcs, max_num_arcs); int32_t dim0 = shape.Dim0(); // empty Fsa if (dim0 == 0) return Fsa(shape, Array1<Arc>(c, std::vector<Arc>{})); // as there should be no arcs leaving the final_state, we always push back an // empty row here. Array1<int32_t> ans_row_splits1(c, dim0 + 2); Array1<int32_t> sub_range = ans_row_splits1.Range(0, dim0 + 1); sub_range.CopyFrom(shape.RowSplits(1)); int32_t *ans_row_splits1_data = ans_row_splits1.Data(); ans_row_splits1_data[dim0 + 1] = ans_row_splits1_data[dim0]; // create returned shape RaggedShapeLayer ans_shape_dim; ans_shape_dim.row_splits = ans_row_splits1; ans_shape_dim.cached_tot_size = shape.TotSize(1); RaggedShape ans_shape(std::vector<RaggedShapeLayer>{ans_shape_dim}, true); ans_shape.Populate(); // will be used to generate scores on arcs. std::random_device rd; std::mt19937 gen(rd()); // TODO(haowen): let the users set the range of scores? it's fine to use it // for now as we just use it to test. std::uniform_real_distribution<float> dis_score(0, 10); // create arcs int32_t *row_ids1 = ans_shape.RowIds(1).Data(); int32_t num_states = ans_shape.Dim0(), num_arcs = ans_shape.TotSize(1); int32_t start_state = 0, final_state = num_states - 1; std::vector<Arc> arcs(num_arcs); for (int32_t i = 0; i != num_arcs; ++i) { int32_t curr_state = row_ids1[i]; int32_t dest_state = acyclic ? RandInt(curr_state + 1, final_state) : RandInt(start_state, final_state); int32_t symbol = dest_state == final_state ? -1 : RandInt(0, max_symbol); float score = dis_score(gen); arcs[i] = Arc(curr_state, dest_state, symbol, score); } return Fsa(ans_shape, Array1<Arc>(c, arcs)); } FsaVec RandomFsaVec(int32_t min_num_fsas /*=1*/, int32_t max_num_fsas /*=1000*/, bool acyclic /*=true*/, int32_t max_symbol /*=50*/, int32_t min_num_arcs /*=0*/, int32_t max_num_arcs /*=1000*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(min_num_fsas, 0); K2_CHECK_GE(max_num_fsas, min_num_fsas); int32_t num_fsas = RandInt(min_num_fsas, max_num_fsas); std::vector<Fsa> fsas(num_fsas); for (int32_t i = 0; i != num_fsas; ++i) { fsas[i] = RandomFsa(acyclic, max_symbol, min_num_arcs, max_num_arcs); } return Stack(0, num_fsas, fsas.data()); } DenseFsaVec RandomDenseFsaVec(int32_t min_num_fsas, int32_t max_num_fsas, int32_t min_frames, int32_t max_frames, int32_t min_symbols, int32_t max_symbols, float scores_scale) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetCpuContext(); int32_t num_fsas = RandInt(min_num_fsas, max_num_fsas); // num_symbols includes epsilon but not final-symbol -1. int32_t num_symbols = RandInt(min_symbols, max_symbols); // `num_frames` includes the extra 1 frame for the final-symbol. std::vector<int32_t> num_frames(num_fsas + 1); int32_t tot_frames = 0; for (int32_t i = 0; i < num_fsas; i++) { num_frames[i] = RandInt(min_frames, max_frames) + 1; tot_frames += num_frames[i]; } Array2<float> scores(c, tot_frames, num_symbols + 1); auto scores_acc = scores.Accessor(); std::vector<int32_t> row_splits_vec(num_fsas + 1); row_splits_vec[0] = 0; int32_t cur_start_frame = 0; RandIntGenerator gen; for (int32_t i = 0; i < num_fsas; i++) { int32_t this_num_frames = num_frames[i], end_frame = cur_start_frame + this_num_frames; for (int32_t f = cur_start_frame; f + 1 < end_frame; f++) { scores_acc(f, 0) = -std::numeric_limits<float>::infinity(); for (int32_t j = 0; j < num_symbols; j++) scores_acc(f, j + 1) = scores_scale * gen(-50, 50) * 0.01; } // on the last frame the placement of infinity vs. finite is reversed: // -1 gets finite value, others get infinity. int32_t f = end_frame - 1; scores_acc(f, 0) = scores_scale * gen(-50, 50) * 0.01; for (int32_t j = 0; j < num_symbols; j++) scores_acc(f, j + 1) = -std::numeric_limits<float>::infinity(); row_splits_vec[i + 1] = cur_start_frame = end_frame; } Array1<int32_t> row_splits(c, row_splits_vec); return DenseFsaVec(RaggedShape2(&row_splits, nullptr, tot_frames), scores); } Ragged<int32_t> GetStartStates(FsaVec &src) { NVTX_RANGE(K2_FUNC); ContextPtr c = src.Context(); K2_CHECK(src.NumAxes() == 3); int32_t num_fsas = src.Dim0(); const int32_t *src_row_splits1_data = src.RowSplits(1).Data(); Array1<int32_t> ans_row_splits(c, num_fsas + 1); // will first set the elements of ans_row_splits to the number of states kept // from this FSA (either 0 or 1). int32_t *num_states_data = ans_row_splits.Data(); K2_EVAL( c, num_fsas, lambda_set_num_states, (int32_t fsa_idx0)->void { // 1 if the FSA is not empty, 0 if empty. num_states_data[fsa_idx0] = (src_row_splits1_data[fsa_idx0 + 1] > src_row_splits1_data[fsa_idx0]); }); ExclusiveSum(ans_row_splits, &ans_row_splits); int32_t ans_dim = ans_row_splits.Back(); Ragged<int32_t> ans(RaggedShape2(&ans_row_splits, nullptr, ans_dim), Array1<int32_t>(c, ans_dim)); const int32_t *ans_row_ids1_data = ans.shape.RowIds(1).Data(); int32_t *ans_values_data = ans.values.Data(); K2_EVAL( c, ans_dim, lambda_set_ans_values, (int32_t ans_idx01)->void { int32_t idx0 = ans_row_ids1_data[ans_idx01]; int32_t src_start_state_idx01 = src_row_splits1_data[idx0]; K2_CHECK_GT(src_row_splits1_data[idx0 + 1], src_row_splits1_data[idx0]); ans_values_data[ans_idx01] = src_start_state_idx01; }); return ans; } FsaVec FsaVecFromArcIndexes(FsaVec &fsas, Ragged<int32_t> &best_arc_indexes) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(best_arc_indexes.NumAxes(), 2); K2_CHECK(IsCompatible(fsas, best_arc_indexes)); K2_CHECK_EQ(fsas.Dim0(), best_arc_indexes.Dim0()); // if there are n arcs (for n > 0), there are n + 1 states; if there are 0 // arcs, there are 0 states (that FSA will have no arcs or states). RaggedShape states_shape = ChangeSublistSizePinned(best_arc_indexes.shape, 1); const int32_t *states_shape_row_splits1_data = states_shape.RowSplits(1).Data(); int32_t num_fsas = fsas.Dim0(); int32_t num_states = states_shape.NumElements(); int32_t num_arcs = best_arc_indexes.shape.NumElements(); ContextPtr &context = fsas.Context(); if (num_arcs == 0) { RaggedShape shape_a = RegularRaggedShape(context, num_fsas, 0), shape_b = RegularRaggedShape(context, 0, 0); return FsaVec(ComposeRaggedShapes(shape_a, shape_b), Array1<Arc>(context, 0)); } Array1<int32_t> row_splits2(context, num_states + 1); Array1<int32_t> row_ids2(context, num_arcs); int32_t *row_splits2_data = row_splits2.Data(); int32_t *row_ids2_data = row_ids2.Data(); Array1<Arc> arcs(context, num_arcs); Arc *arcs_data = arcs.Data(); const int32_t *best_arc_indexes_row_splits1_data = best_arc_indexes.RowSplits(1).Data(); const int32_t *best_arc_indexes_row_ids1_data = best_arc_indexes.RowIds(1).Data(); const int32_t *best_arc_indexes_data = best_arc_indexes.values.Data(); const Arc *fsas_values_data = fsas.values.Data(); K2_EVAL( context, num_arcs, lambda_set_arcs, (int32_t best_arc_idx01) { int32_t fsas_idx0 = best_arc_indexes_row_ids1_data[best_arc_idx01]; int32_t best_arc_idx0x = best_arc_indexes_row_splits1_data[fsas_idx0]; int32_t best_arc_idx0x_next = best_arc_indexes_row_splits1_data[fsas_idx0 + 1]; int32_t num_best_arcs = best_arc_idx0x_next - best_arc_idx0x; int32_t best_arc_idx1 = best_arc_idx01 - best_arc_idx0x; int32_t state_offset = states_shape_row_splits1_data[fsas_idx0]; const Arc &arc = fsas_values_data[best_arc_indexes_data[best_arc_idx01]]; int32_t src_state = best_arc_idx1; int32_t dest_state = src_state + 1; int32_t label = arc.label; float score = arc.score; arcs_data[best_arc_idx01] = Arc(src_state, dest_state, label, score); int32_t state_idx01 = state_offset + src_state; row_ids2_data[best_arc_idx01] = state_idx01; row_splits2_data[state_idx01 + 1] = best_arc_idx01 + 1; if (best_arc_idx01 == 0) row_splits2_data[0] = 0; if (best_arc_idx1 + 1 == num_best_arcs) row_splits2_data[state_idx01 + 2] = best_arc_idx01 + 1; }); RaggedShape shape = RaggedShape3(&states_shape.RowSplits(1), &states_shape.RowIds(1), num_states, &row_splits2, &row_ids2, num_arcs); Ragged<Arc> ans(shape, arcs); return ans; } FsaVec GetIncomingFsaVec(FsaVec &fsas) { Array1<int32_t> dest_states = GetDestStates(fsas, true); Ragged<int32_t> arc_indexes = GetIncomingArcs(fsas, dest_states); return FsaVec(arc_indexes.shape, fsas.values[arc_indexes.values]); } Ragged<int32_t> ComposeArcMaps(Ragged<int32_t> &step1_arc_map, Ragged<int32_t> &step2_arc_map) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(step1_arc_map.NumAxes(), 2); K2_CHECK_EQ(step2_arc_map.NumAxes(), 2); return Index(step1_arc_map, step2_arc_map, true); } } // namespace k2
0f41b6cb360b41aba560c561af25b90c40b20c01.cu
/** * @brief Utilities for creating FSAs. * * Note that serializations are done in Python. * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * Mobvoi Inc. (authors: Fangjun Kuang) * Guoguo Chen * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include <limits> #include <sstream> #include <utility> #include <vector> #include "k2/csrc/array.h" #include "k2/csrc/context.h" #include "k2/csrc/fsa.h" #include "k2/csrc/fsa_utils.h" #include "k2/csrc/math.h" #include "k2/csrc/ragged.h" namespace k2 { // field separator within a line for a text form FSA static constexpr const char *kDelim = " \t"; // Convert a string to an integer. Abort the program on failure. static int32_t StringToInt(const std::string &s) { NVTX_RANGE(K2_FUNC); K2_CHECK(!s.empty()); bool ok = false; char *p = nullptr; // std::strtol requires a `long` type long n = std::strtol(s.c_str(), &p, 10); // NOLINT if (*p == '\0') ok = true; auto res = static_cast<int32_t>(n); if (n != res) ok = false; // out of range K2_CHECK(ok) << "Failed to convert " << s << " to an integer"; return res; } // Convert a string to a float. Abort the program on failure. // TODO(guoguo): We may run into locale problems, with comma vs. period for // decimals. We have to test if the C code will behave the same // w.r.t. locale as Python does. static float StringToFloat(const std::string &s) { NVTX_RANGE(K2_FUNC); K2_CHECK(!s.empty()); char *p = nullptr; float f = std::strtof(s.c_str(), &p); if (*p != '\0') K2_LOG(FATAL) << "Failed to convert " << s << " to a float"; return f; } // Trim leading and trailing spaces of a string. static void TrimString(std::string *s) { NVTX_RANGE(K2_FUNC); K2_CHECK_NE(s, nullptr); auto not_space = [](int32_t c) -> bool { return std::isspace(c) == 0; }; s->erase(s->begin(), std::find_if(s->begin(), s->end(), not_space)); s->erase(std::find_if(s->rbegin(), s->rend(), not_space).base(), s->end()); } /* Split a string to a vector of strings using a set of delimiters. Example usage: @code std::string in = "1 2 3"; const char *delim = " \t"; std::vector<std::string> out; SplitStringToVector(in, delim, &out); @endcode @param [in] in The input string to be split. @param [in] delim A string of delimiters. @param [out] out It saves the split result. */ static void SplitStringToVector(const std::string &in, const char *delim, std::vector<std::string> *out) { NVTX_RANGE(K2_FUNC); K2_CHECK_NE(delim, nullptr); K2_CHECK_NE(out, nullptr); out->clear(); std::size_t start = 0; while (true) { auto pos = in.find_first_of(delim, start); if (pos == std::string::npos) break; auto sub = in.substr(start, pos - start); start = pos + 1; TrimString(&sub); if (!sub.empty()) out->emplace_back(std::move(sub)); } if (start < in.size()) { auto sub = in.substr(start); TrimString(&sub); if (!sub.empty()) out->emplace_back(std::move(sub)); } } /* Create an acceptor from a stream, assuming the acceptor is in the k2 format: src_state1 dest_state1 label1 score1 src_state2 dest_state2 label2 score2 ... ... final_state The source states will be in non-descending order, and the final state does not bear a cost/score -- we put the cost/score on the arc that connects to the final state and set its label to -1. @param [in] is The input stream that contains the acceptor. @return It returns an Fsa on CPU. */ static Fsa K2AcceptorFromStream(std::istringstream &is) { NVTX_RANGE(K2_FUNC); std::vector<Arc> arcs; std::vector<std::string> splits; std::string line; bool finished = false; // when the final state is read, set it to true. while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line K2_CHECK_EQ(finished, false); auto num_fields = splits.size(); if (num_fields == 4u) { // 0 1 2 3 // src_state dest_state label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); float score = StringToFloat(splits[3]); arcs.emplace_back(src_state, dest_state, symbol, score); } else if (num_fields == 1u) { // 0 // final_state (void)StringToInt(splits[0]); // this is a final state finished = true; // set finish } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nk2 acceptor expects a line with 1 (final_state) or " "4 (src_state dest_state label score) fields"; } } K2_CHECK_EQ(finished, true) << "The last line should be the final state"; bool error = true; Array1<Arc> array(GetCpuContext(), arcs); auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } /* Create a transducer from a stream, assuming the transducer is in the K2 format: src_state1 dest_state1 label1 aux_label1 score1 src_state2 dest_state2 label2 aux_label2 score2 ... ... final_state The source states will be in non-descending order, and the final state does not bear a cost/score -- we put the cost/score on the arc that connects to the final state and set its label to -1. @param [in] is The input stream that contains the transducer. @return It returns an Fsa on CPU. */ static Fsa K2TransducerFromStream(std::istringstream &is, Array1<int32_t> *aux_labels) { NVTX_RANGE(K2_FUNC); K2_CHECK(aux_labels != nullptr); std::vector<int32_t> aux_labels_internal; std::vector<Arc> arcs; std::vector<std::string> splits; std::string line; bool finished = false; // when the final state is read, set it to true. while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line K2_CHECK_EQ(finished, false); auto num_fields = splits.size(); if (num_fields == 5u) { // 0 1 2 3 4 // src_state dest_state label aux_label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); int32_t aux_label = StringToInt(splits[3]); float score = StringToFloat(splits[4]); arcs.emplace_back(src_state, dest_state, symbol, score); aux_labels_internal.push_back(aux_label); } else if (num_fields == 1u) { // 0 // final_state (void)StringToInt(splits[0]); finished = true; // set finish } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nk2 transducer expects a line with 1 (final_state) or " "5 (src_state dest_state label aux_label score) fields"; } } K2_CHECK_EQ(finished, true) << "The last line should be the final state"; auto cpu_context = GetCpuContext(); *aux_labels = Array1<int32_t>(cpu_context, aux_labels_internal); Array1<Arc> array(cpu_context, arcs); bool error = true; auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } /* Create an acceptor from a stream, assuming the acceptor is in the OpenFST format: src_state1 dest_state1 label1 score1 src_state2 dest_state2 label2 score2 ... ... final_state final_score We will negate the cost/score when we read them in. Also note, OpenFST may omit the cost/score if it is 0.0. We always create the super final state. If there are final state(s) in the original FSA, then we add arc(s) from the original final state(s) to the super final state, with the (negated) old final state cost/score as its cost/score, and -1 as its label. @param [in] is The input stream that contains the acceptor. @return It returns an Fsa on CPU. */ static Fsa OpenFstAcceptorFromStream(std::istringstream &is) { NVTX_RANGE(K2_FUNC); std::vector<Arc> arcs; std::vector<std::vector<Arc>> state_to_arcs; // indexed by states std::vector<std::string> splits; std::string line; int32_t max_state = -1; int32_t num_arcs = 0; std::vector<int32_t> original_final_states; std::vector<float> original_final_weights; while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line auto num_fields = splits.size(); if (num_fields == 3u || num_fields == 4u) { // 0 1 2 // src_state dest_state label // // or // // 0 1 2 3 // src_state dest_state label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); float score = 0.0f; if (num_fields == 4u) score = -1.0f * StringToFloat(splits[3]); // Add the arc to "state_to_arcs". ++num_arcs; max_state = std::max(max_state, std::max(src_state, dest_state)); if (static_cast<int32_t>(state_to_arcs.size()) <= src_state) state_to_arcs.resize(src_state + 1); state_to_arcs[src_state].emplace_back(src_state, dest_state, symbol, score); } else if (num_fields == 1u || num_fields == 2u) { // 0 1 // final_state score float score = 0.0f; if (num_fields == 2u) score = -1.0f * StringToFloat(splits[1]); original_final_states.push_back(StringToInt(splits[0])); original_final_weights.push_back(score); max_state = std::max(max_state, original_final_states.back()); } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nOpenFST acceptor expects a line with 1 (final_state)," " 2 (final_state score), 3 (src_state dest_state label) " "or 4 (src_state dest_state label score) fields."; } } K2_CHECK(is.eof()); // Post processing on final states. If there are final state(s) in the // original FSA, we add the super final state as well as arc(s) from original // final state(s) to the super final state. Otherwise, the super final state // will be added by FsaFromArray1 (since there's no arc with label // kFinalSymbol). if (original_final_states.size() > 0) { K2_CHECK_EQ(original_final_states.size(), original_final_weights.size()); int32_t super_final_state = max_state + 1; state_to_arcs.resize(super_final_state); for (std::size_t i = 0; i != original_final_states.size(); ++i) { state_to_arcs[original_final_states[i]].emplace_back( original_final_states[i], super_final_state, -1, // kFinalSymbol original_final_weights[i]); ++num_arcs; } } // Move arcs from "state_to_arcs" to "arcs". int32_t arc_index = 0; arcs.resize(num_arcs); for (std::size_t s = 0; s < state_to_arcs.size(); ++s) { for (std::size_t a = 0; a < state_to_arcs[s].size(); ++a) { K2_CHECK_GT(num_arcs, arc_index); arcs[arc_index] = state_to_arcs[s][a]; ++arc_index; } } K2_CHECK_EQ(num_arcs, arc_index); bool error = true; Array1<Arc> array(GetCpuContext(), arcs); // FsaFromArray1 will add a super final state if the original FSA doesn't have // a final state. auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } /* Create a transducer from a stream, assuming the transducer is in the OpenFST format: src_state1 dest_state1 label1 aux_label1 score1 src_state2 dest_state2 label2 aux_label2 score2 ... ... final_state final_score We will negate the cost/score when we read them in. Also note, OpenFST may omit the cost/score if it is 0.0. We always create the super final state. If there are final state(s) in the original FST, then we add arc(s) from the original final state(s) to the super final state, with the (negated) old final state cost/score as its cost/score, -1 as its label and -1 as its aux_label. @param [in] is The input stream that contains the transducer. @return It returns an Fsa on CPU. */ static Fsa OpenFstTransducerFromStream(std::istringstream &is, Array1<int32_t> *aux_labels) { NVTX_RANGE(K2_FUNC); K2_CHECK(aux_labels != nullptr); std::vector<std::vector<int32_t>> state_to_aux_labels; // indexed by states std::vector<std::vector<Arc>> state_to_arcs; // indexed by states std::vector<int32_t> aux_labels_internal; std::vector<Arc> arcs; std::vector<std::string> splits; std::string line; int32_t max_state = -1; int32_t num_arcs = 0; std::vector<int32_t> original_final_states; std::vector<float> original_final_weights; while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line auto num_fields = splits.size(); if (num_fields == 4u || num_fields == 5u) { // 0 1 2 3 // src_state dest_state label aux_label // // or // // 0 1 2 3 4 // src_state dest_state label aux_label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); int32_t aux_label = StringToInt(splits[3]); float score = 0.0f; if (num_fields == 5u) score = -1.0f * StringToFloat(splits[4]); // Add the arc to "state_to_arcs", and aux_label to "state_to_aux_labels" ++num_arcs; max_state = std::max(max_state, std::max(src_state, dest_state)); if (static_cast<int32_t>(state_to_arcs.size()) <= src_state) { state_to_arcs.resize(src_state + 1); state_to_aux_labels.resize(src_state + 1); } state_to_arcs[src_state].emplace_back(src_state, dest_state, symbol, score); state_to_aux_labels[src_state].push_back(aux_label); } else if (num_fields == 1u || num_fields == 2u) { // 0 // final_state // // or // // 0 1 // final_state score // There could be multiple final states, so we first have to collect all // the final states, and then work out the super final state. float score = 0.0f; if (num_fields == 2u) score = -1.0f * StringToFloat(splits[1]); original_final_states.push_back(StringToInt(splits[0])); original_final_weights.push_back(score); max_state = std::max(max_state, original_final_states.back()); } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nOpenFST transducer expects a line with " "1 (final_state), 2 (final_state score), " "4 (src_state dest_state label aux_label) or " "5 (src_state dest_state label aux_label score) fields."; } } K2_CHECK(is.eof()); // Post processing on final states. If there are final state(s) in the // original FST, we add the super final state as well as arc(s) from original // final state(s) to the super final state. Otherwise, the super final state // will be added by FsaFromArray1 (since there's no arc with label // kFinalSymbol). if (original_final_states.size() > 0) { K2_CHECK_EQ(original_final_states.size(), original_final_weights.size()); int32_t super_final_state = max_state + 1; state_to_arcs.resize(super_final_state); state_to_aux_labels.resize(super_final_state); for (std::size_t i = 0; i != original_final_states.size(); ++i) { state_to_arcs[original_final_states[i]].emplace_back( original_final_states[i], super_final_state, -1, // kFinalSymbol original_final_weights[i]); state_to_aux_labels[original_final_states[i]].push_back( -1); // kFinalSymbol ++num_arcs; } } // Move arcs from "state_to_arcs" to "arcs", and aux_labels from // "state_to_aux_labels" to "aux_labels_internal" int32_t arc_index = 0; arcs.resize(num_arcs); aux_labels_internal.resize(num_arcs); K2_CHECK_EQ(state_to_arcs.size(), state_to_aux_labels.size()); for (std::size_t s = 0; s < state_to_arcs.size(); ++s) { K2_CHECK_EQ(state_to_arcs[s].size(), state_to_aux_labels[s].size()); for (std::size_t a = 0; a < state_to_arcs[s].size(); ++a) { K2_CHECK_GT(num_arcs, arc_index); arcs[arc_index] = state_to_arcs[s][a]; aux_labels_internal[arc_index] = state_to_aux_labels[s][a]; ++arc_index; } } K2_CHECK_EQ(num_arcs, arc_index); auto cpu_context = GetCpuContext(); *aux_labels = Array1<int32_t>(cpu_context, aux_labels_internal); Array1<Arc> array(cpu_context, arcs); bool error = true; // FsaFromArray1 will add a super final state if the original FSA doesn't have // a final state. auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } Fsa FsaFromString(const std::string &s, bool openfst /*= false*/, Array1<int32_t> *aux_labels /*= nullptr*/) { NVTX_RANGE(K2_FUNC); std::istringstream is(s); K2_CHECK(is); if (openfst == false && aux_labels == nullptr) return K2AcceptorFromStream(is); else if (openfst == false && aux_labels != nullptr) return K2TransducerFromStream(is, aux_labels); else if (openfst == true && aux_labels == nullptr) return OpenFstAcceptorFromStream(is); else if (openfst == true && aux_labels != nullptr) return OpenFstTransducerFromStream(is, aux_labels); return Fsa(); // unreachable code } std::string FsaToString(const Fsa &fsa, bool openfst /*= false*/, const Array1<int32_t> *aux_labels /*= nullptr*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsa.NumAxes(), 2); if (fsa.Context()->GetDeviceType() != kCpu) { Fsa _fsa = fsa.To(GetCpuContext()); Array1<int32_t> _aux_labels; if (aux_labels) _aux_labels = aux_labels->To(_fsa.Context()); return FsaToString(_fsa, openfst, aux_labels ? &_aux_labels : nullptr); } K2_CHECK_EQ(fsa.Context()->GetDeviceType(), kCpu); const Array1<int32_t> &row_splits = fsa.shape.RowSplits(1); const Array1<Arc> &arcs = fsa.values; const int32_t *p = nullptr; if (aux_labels != nullptr) { K2_CHECK(IsCompatible(fsa, *aux_labels)); K2_CHECK_EQ(aux_labels->Dim(), arcs.Dim()); p = aux_labels->Data(); } float scale = 1; if (openfst) scale = -1; std::ostringstream os; int32_t n = arcs.Dim(); char sep = ' '; char line_sep = '\n'; for (int32_t i = 0; i != n; ++i) { const auto &arc = arcs[i]; os << arc.src_state << sep << arc.dest_state << sep << arc.label << sep; if (p != nullptr) os << p[i] << sep; os << (scale * arc.score) << line_sep; } os << (fsa.shape.Dim0() - 1) << line_sep; return os.str(); } Array1<int32_t> GetDestStates(FsaVec &fsas, bool as_idx01) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_arcs = fsas.NumElements(); Array1<int32_t> ans(c, num_arcs); const Arc *arcs_data = fsas.values.Data(); int32_t *ans_data = ans.Data(); if (!as_idx01) { K2_EVAL( c, num_arcs, lambda_set_dest_states1, (int32_t arc_idx012) { ans_data[arc_idx012] = arcs_data[arc_idx012].dest_state; }); } else { const int32_t *row_ids2 = fsas.RowIds(2).Data(); K2_EVAL( c, num_arcs, lambda_set_dest_states01, (int32_t arc_idx012) { int32_t src_state = arcs_data[arc_idx012].src_state, dest_state = arcs_data[arc_idx012].dest_state; // (row_ids2[arc_idx012] - src_state) is the same as // row_splits1[row_ids1[row_ids2[arc_idx012]]]; it's the idx01 of the // 1st state in this FSA. ans_data[arc_idx012] = dest_state + (row_ids2[arc_idx012] - src_state); }); } return ans; } Ragged<int32_t> GetStateBatches(FsaVec &fsas, bool transpose) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); Array1<int32_t> arc_dest_states = GetDestStates(fsas, true); MonotonicLowerBound(arc_dest_states, &arc_dest_states); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); // We can tune `log_power` as a tradeoff between work done and clock time on // GPU. int32_t log_power = (c->GetDeviceType() == kCpu ? 0 : 4); int32_t max_num_states = fsas.shape.MaxSize(1); // the following avoids doing too much extra work accumulating powers // of 'dest_states' for very small problem sizes. while (log_power > 0 && (1 << (1 + log_power)) > max_num_states) log_power--; // Ignoring edge effects: `dest_states_powers[0]` is just an array indexed by // state_idx01, that gives us the dest_state_idx01 that would be the beginning // of the next batch if state_idx01 were the beginning of the current batch. // So if we follow this chain forward from the start of one of the FSAs until // it passes the end of this FSA, we get the beginnings of the batches // we want. The natural algorithm to find the beginnings of the batches // is sequential. Array2<int32_t> dest_states_powers(c, log_power + 1, num_states); const int32_t *arc_dest_states_data = arc_dest_states.Data(), *fsas_row_splits2_data = fsas.RowSplits(2).Data(); int32_t *dest_states_power_data = dest_states_powers.Data(); // only process Row[0] below const int32_t int_max = std::numeric_limits<int32_t>::max(); K2_EVAL( c, num_states, lambda_set_dest_states, (int32_t state_idx01)->void { int32_t arc_idx01x = fsas_row_splits2_data[state_idx01]; // If this state has arcs, let its `dest_state` be the smallest // `dest_state` of any of its arcs (which is the first element of those // arcs' dest states in `arc_dest_states_data`); otherwise, take the // `dest_state` from the 1st arc of the next state, which is the largest // value we can take (if the definition is: the highest-numbered state s // for which neither this state nor any later-numbered state has an arc // to a state lower than s). // if this state has arcs, // arc_idx01x is the first arc index of this state, we get the // smallest dest state of this state's arcs using // arc_dest_states_data[arc_idx01x] // else // arc_idx01x is the first arc index of the next state, then // arc_dest_states_data[arc_idx01x] is the largest value we can take, // which is also the smallest dest state in the next state. int32_t dest_state = (arc_idx01x < num_arcs ? arc_dest_states_data[arc_idx01x] : int_max); dest_states_power_data[state_idx01] = dest_state; // if the following fails, it's either a code error or the input FSA had // cycles. K2_CHECK_GT(dest_state, state_idx01); }); // `num_batches_per_fsa` will be set to the number of batches of states that // we'll use for each FSA... it corresponds to the number of times we have // to follow links forward in the dest_states array till we pass the // end of the array for this fSA. Array1<int32_t> num_batches_per_fsa(c, num_fsas + 1, 0); // `batch_starts` will contain the locations of the first state_idx01 for each // batch, but in an 'un-consolidated' format. Specifically, for FSA with // index i, the batch_starts for that FSA begin at element fsa.RowSplits(1)[i] // of `batch_starts`. This is just a convenient layout because we know there // can't be more batches than there are states. We'll later consolidate the // information into a single array. Array1<int32_t> batch_starts(c, num_states + 1); int32_t *num_batches_per_fsa_data = num_batches_per_fsa.Data(), *batch_starts_data = batch_starts.Data(); const int32_t *fsas_row_splits1_data = fsas.RowSplits(1).Data(); #if 0 // This is a simple version of the kernel that demonstrates what we're trying // to do with the more complex code. K2_EVAL( c, num_fsas, lambda_set_batch_info_simple, (int32_t fsa_idx) { int32_t begin_state_idx01 = fsas_row_splits1_data[fsa_idx], end_state_idx01 = fsas_row_splits1_data[fsa_idx + 1]; int32_t i = 0, cur_state_idx01 = begin_state_idx01; while (cur_state_idx01 < end_state_idx01) { batch_starts_data[begin_state_idx01 + i] = cur_state_idx01; cur_state_idx01 = dest_states_power_data[cur_state_idx01]; ++i; } num_batches_per_fsa_data[fsa_idx] = i; }); #else int32_t stride = dest_states_powers.ElemStride0(); for (int32_t power = 1; power <= log_power; power++) { const int32_t *src_data = dest_states_powers.Data() + (power - 1) * stride; int32_t *dest_data = dest_states_powers.Data() + power * stride; K2_EVAL( c, num_states, lambda_square_array, (int32_t state_idx01)->void { int32_t dest_state = src_data[state_idx01], dest_state_sq = (dest_state < num_states ? src_data[dest_state] : int_max); dest_data[state_idx01] = dest_state_sq; }); } // jobs_per_fsa tells us how many separate chains of states we'll follow for // each FSA. // jobs_multiple is a kind of trick to ensure any given warp doesn't // issue more memory requests than it can handle at a time (we drop // some threads). int32_t jobs_per_fsa = (1 << log_power), jobs_multiple = (c->GetDeviceType() == kCuda ? 8 : 1); while (jobs_multiple > 1 && jobs_per_fsa * jobs_multiple * num_fsas > 10000) jobs_multiple /= 2; // Likely won't get here. Just reduce multiple if // num-jobs is ridiculous. auto dest_states_powers_acc = dest_states_powers.Accessor(); K2_EVAL2( c, num_fsas, jobs_per_fsa * jobs_multiple, lambda_set_batch_info, (int32_t fsa_idx, int32_t j) { if (j % jobs_multiple != 0) return; // a trick to avoid too much random // memory access for any given warp int32_t task_idx = j / jobs_multiple; // Now 0 <= task_idx < jobs_per_fsa. // The task indexed `task_idx` is responsible for batches numbered // task_idx, task_idx + jobs_per_fsa, task_index + 2 * job_per_fsa and // so on, for the FSA numbered `fsa_idx`. Comparing this code to // `lambda_set_batch_info_simple`, this task is responsible for the // assignment to batch_starts_data for all i such that i % jobs_per_fsas // == task_idx, together with the assignment to // num_batchess_per_fsa_data if // i % jobs_per_fsas == task_idx (here referring to the i value finally // assigned to that location). int32_t begin_state_idx01 = fsas_row_splits1_data[fsa_idx], end_state_idx01 = fsas_row_splits1_data[fsa_idx + 1]; int32_t num_states_this_fsa = end_state_idx01 - begin_state_idx01; int32_t i = 0, cur_state_idx01 = begin_state_idx01; if (task_idx >= num_states_this_fsa) return; // The next loop advances `cur_state_idx01` by // a number of steps equal to `task_idx`. for (int32_t m = 0; m < log_power; ++m) { int32_t n = 1 << m; if ((task_idx & n) != 0) { i += n; int32_t next = dest_states_powers_acc(m, cur_state_idx01); if (next >= end_state_idx01) return; cur_state_idx01 = next; } } K2_CHECK_EQ(i, task_idx); while (1) { if (i >= num_states_this_fsa) return; batch_starts_data[begin_state_idx01 + i] = cur_state_idx01; int32_t next_state_idx01 = dest_states_powers_acc( log_power, cur_state_idx01); // advance jobs_per_fsa = (1 << log_power) // steps if (next_state_idx01 >= end_state_idx01) { // if exactly one step would also be enough to take us past the // boundary... if (dest_states_powers_acc(0, cur_state_idx01) >= end_state_idx01) { num_batches_per_fsa_data[fsa_idx] = i + 1; } return; } else { i += jobs_per_fsa; cur_state_idx01 = next_state_idx01; } } }); #endif ExclusiveSum(num_batches_per_fsa, &num_batches_per_fsa); Array1<int32_t> &ans_row_splits1 = num_batches_per_fsa; int32_t num_batches = num_batches_per_fsa[num_fsas]; Array1<int32_t> ans_row_ids1(c, num_batches); RowSplitsToRowIds(ans_row_splits1, &ans_row_ids1); Array1<int32_t> ans_row_splits2(c, num_batches + 1); const int32_t *ans_row_splits1_data = ans_row_splits1.Data(), *ans_row_ids1_data = ans_row_ids1.Data(); int32_t *ans_row_splits2_data = ans_row_splits2.Data(); ans_row_splits2.Range(num_batches, 1) = num_states; // The kernel below won't // set this last element K2_EVAL( c, num_batches, lambda_set_ans_row_splits2, (int32_t idx01)->void { int32_t idx0 = ans_row_ids1_data[idx01], // Fsa index idx0x = ans_row_splits1_data[idx0], idx1 = idx01 - idx0x, fsas_idx0x = fsas_row_splits1_data[idx0]; // 1st state-idx (idx01) // in fsas_, for this FSA int32_t fsas_idx01 = fsas_idx0x + idx1; // the idx1 is actually the // batch-index, this statement // reflects the 'un-consolidated' // format of `batch_starts`. int32_t this_batch_start = batch_starts_data[fsas_idx01]; ans_row_splits2_data[idx01] = this_batch_start; }); RaggedShape ans_shape = RaggedShape3(&ans_row_splits1, &ans_row_ids1, num_batches, &ans_row_splits2, nullptr, num_states); Array1<int32_t> ans_value = Range(c, num_states, 0); if (transpose) { ans_shape = MakeTransposable(ans_shape); Ragged<int32_t> ans(ans_shape, ans_value); return Transpose(ans); } else { return Ragged<int32_t>(ans_shape, ans_value); } } Ragged<int32_t> GetIncomingArcs(FsaVec &fsas, const Array1<int32_t> &dest_states) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK(IsCompatible(fsas, dest_states)); ContextPtr &c = fsas.Context(); Ragged<int32_t> dest_states_tensor(fsas.shape, dest_states); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); Array1<int32_t> incoming_arcs_order = GetTransposeReordering(dest_states_tensor, num_states), ans_row_ids2 = dest_states[incoming_arcs_order]; // Note: incoming_arcs_row_ids2 will be monotonically increasing Array1<int32_t> ans_row_splits2(c, num_states + 1); RowIdsToRowSplits(ans_row_ids2, &ans_row_splits2); // Axis 1 corresponds to FSA states, so the row-ids and row-splits for axis // 1 are the same as for `fsas`. Array1<int32_t> ans_row_ids1 = fsas.RowIds(1), ans_row_splits1 = fsas.RowSplits(1); return Ragged<int32_t>( RaggedShape3(&ans_row_splits1, &ans_row_ids1, num_states, &ans_row_splits2, &ans_row_ids2, num_arcs), incoming_arcs_order); } Ragged<int32_t> GetLeavingArcIndexBatches(FsaVec &fsas, Ragged<int32_t> &state_batches) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); K2_DCHECK(state_batches.TotSize(1) == num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); // get ans_shape Array1<int32_t> ans_row_splits3(c, num_states + 1); int32_t *ans_row_splits3_data = ans_row_splits3.Data(); const int32_t *fsa_states_row_splits_data = fsas.RowSplits(2).Data(); const int32_t *batch_states_data = state_batches.values.Data(); K2_EVAL( c, num_states, lambda_set_ans_row_splits3, (int32_t idx) { int32_t state_idx = batch_states_data[idx]; ans_row_splits3_data[idx] = fsa_states_row_splits_data[state_idx + 1] - fsa_states_row_splits_data[state_idx]; }); ExclusiveSum(ans_row_splits3, &ans_row_splits3); Array1<int32_t> ans_row_ids3(c, num_arcs); RowSplitsToRowIds(ans_row_splits3, &ans_row_ids3); RaggedShape ans_shape = ComposeRaggedShapes( state_batches.shape, RaggedShape2(&ans_row_splits3, &ans_row_ids3, num_arcs)); // get ans_values Array1<int32_t> ans_values(c, num_arcs); int32_t *ans_values_data = ans_values.Data(); const int32_t *ans_row_ids3_data = ans_row_ids3.Data(); K2_EVAL( c, num_arcs, lambda_set_ans_values, (int32_t idx0123) { int32_t ans_idx012 = ans_row_ids3_data[idx0123]; int32_t state_idx = batch_states_data[ans_idx012]; // state_idx is idx01 in fsas int32_t fsa_idx01x = fsa_states_row_splits_data[state_idx]; // ans_idx3 is fsas_idx2, i.e. the arc idx in a state int32_t ans_idx3 = idx0123 - ans_row_splits3_data[ans_idx012]; ans_values_data[idx0123] = fsa_idx01x + ans_idx3; }); return Ragged<int32_t>(ans_shape, ans_values); } Ragged<int32_t> GetEnteringArcIndexBatches(FsaVec &fsas, Ragged<int32_t> &incoming_arcs, Ragged<int32_t> &state_batches) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK(IsCompatible(fsas, incoming_arcs)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(incoming_arcs.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); // just using DCHECK below to save time in production code K2_DCHECK(state_batches.TotSize(1) == num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(incoming_arcs.Dim0(), num_fsas); K2_DCHECK_EQ(incoming_arcs.TotSize(1), num_states); K2_DCHECK_EQ(incoming_arcs.NumElements(), num_arcs); // get ans_shape Array1<int32_t> ans_row_splits3(c, num_states + 1); int32_t *ans_row_splits3_data = ans_row_splits3.Data(); const int32_t *incoming_arcs_row_splits_data = incoming_arcs.RowSplits(2).Data(); const int32_t *batch_states_data = state_batches.values.Data(); K2_EVAL( c, num_states, lambda_set_ans_row_splits3, (int32_t idx) { int32_t state_idx = batch_states_data[idx]; ans_row_splits3_data[idx] = incoming_arcs_row_splits_data[state_idx + 1] - incoming_arcs_row_splits_data[state_idx]; }); ExclusiveSum(ans_row_splits3, &ans_row_splits3); Array1<int32_t> ans_row_ids3(c, num_arcs); RowSplitsToRowIds(ans_row_splits3, &ans_row_ids3); RaggedShape ans_shape = ComposeRaggedShapes( state_batches.shape, RaggedShape2(&ans_row_splits3, &ans_row_ids3, num_arcs)); // get ans_values Array1<int32_t> ans_values(c, num_arcs); int32_t *ans_values_data = ans_values.Data(); const int32_t *ans_row_ids3_data = ans_row_ids3.Data(); const int32_t *incoming_arcs_data = incoming_arcs.values.Data(); K2_EVAL( c, num_arcs, lambda_set_ans_values, (int32_t idx0123) { int32_t ans_idx012 = ans_row_ids3_data[idx0123]; int32_t state_idx = batch_states_data[ans_idx012]; // state_idx is idx01 in // incoming_arcs int32_t incoming_arcs_idx01x = incoming_arcs_row_splits_data[state_idx]; // ans_idx3 is incoming_arcs_idx2, i.e. the entering arc idx for a state int32_t ans_idx3 = idx0123 - ans_row_splits3_data[ans_idx012]; int32_t incoming_arcs_idx012 = incoming_arcs_idx01x + ans_idx3; ans_values_data[idx0123] = incoming_arcs_data[incoming_arcs_idx012]; }); return Ragged<int32_t>(ans_shape, ans_values); } FsaVec ConvertDenseToFsaVec(DenseFsaVec &src) { NVTX_RANGE(K2_FUNC); ContextPtr &c = src.shape.Context(); // caution: 'num_symbols' is the number of symbols excluding the final-symbol // -1. int32_t num_fsas = src.shape.Dim0(), num_symbols = src.scores.Dim1() - 1; // the "1" is the extra state per FSA we need in the FsaVec format, // for the final-state. RaggedShape fsa2state = ChangeSublistSize(src.shape, 1); // again, the "+num_fsas" below is the extra state per FSA we need in the // FsaVec format, for the final-state. int32_t num_states = src.shape.NumElements() + num_fsas; // The explanation num-arcs below is as follows: // Firstly, all rows of src.scores (==all elements of src.shape) correspond // to states with arcs leaving them. Most of them have `num_symbols` arcs, // but the final one for each FSA has 1 arc (with symbol -1) int32_t num_arcs = src.shape.NumElements() * num_symbols - (num_symbols - 1) * num_fsas; Array1<int32_t> row_splits2(c, num_states + 1), row_ids2(c, num_arcs); const int32_t *row_ids1_data = fsa2state.RowIds(1).Data(), *src_row_ids1_data = src.shape.RowIds(1).Data(), *src_row_splits1_data = src.shape.RowSplits(1).Data(); Array1<Arc> arcs(c, num_arcs); Arc *arcs_data = arcs.Data(); auto scores_acc = src.scores.Accessor(); int32_t *row_splits2_data = row_splits2.Data(), *row_ids2_data = row_ids2.Data(); // 0 <= s < num_symbols; note, `num_symbols` excludes the final-symbol (-1). // note: `src` means: w.r.t. the numbering in the original DenseFsaVec. K2_EVAL2( c, src.shape.NumElements(), num_symbols, lambda_set_arcs_etc, (int32_t src_state_idx01, int32_t s)->void { int32_t fsa_idx0 = src_row_ids1_data[src_state_idx01], src_state_idx0x = src_row_splits1_data[fsa_idx0], state_idx1 = src_state_idx01 - src_state_idx0x, src_next_state_idx0x = src_row_splits1_data[fsa_idx0 + 1], src_num_states1 = src_next_state_idx0x - src_state_idx0x, ans_state_idx01 = src_state_idx01 + fsa_idx0; // we add one final-state per FSA.. // "+ fsa_idx0" gives the // difference from old->new // numbering. // arc_idx0xx is the 1st arc-index of the FSA we are creating.. each // source state has `num_symbols` arcs leaving it except the last one of // each FSA, which has 1 arc leaving it (to the final-state). int32_t arc_idx0xx = (src_state_idx0x * num_symbols) - fsa_idx0 * (num_symbols - 1), arc_idx01x = arc_idx0xx + (state_idx1 * num_symbols), arc_idx012 = arc_idx01x + s; int32_t symbol_offset; if (state_idx1 + 1 == src_num_states1) { symbol_offset = -1; if (s > 0) return; // we just need the arc with -1. // if this is the state before the final state of this FSA. it has the // responsibility to write the row_splits2 value for the final state. // It's arc_idx012 + 1; the "+1" corresponds to the single arc with // the final-symbol on it. row_splits2_data[ans_state_idx01 + 1] = arc_idx012 + 1; } else { symbol_offset = 0; } // the "+ 1" is because index 0 in `scores` is for the final-symbol -1, // then 0, 1, etc. int32_t symbol_index_in_scores = s + symbol_offset + 1; arcs_data[arc_idx012] = Arc(state_idx1, state_idx1 + 1, s + symbol_offset, scores_acc(src_state_idx01, symbol_index_in_scores)); row_ids2_data[arc_idx012] = ans_state_idx01; if (s == 0) { // 1st arc for this state. row_splits2_data[ans_state_idx01] = arc_idx012; K2_CHECK(row_ids1_data[ans_state_idx01] == fsa_idx0); if (src_state_idx01 == 0) row_splits2_data[num_states] = num_arcs; } }); RaggedShape state2arc = RaggedShape2(&row_splits2, &row_ids2, num_arcs); return Ragged<Arc>(ComposeRaggedShapes(fsa2state, state2arc), arcs); } template <typename FloatType> Array1<FloatType> GetForwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, Array1<int32_t> *entering_arcs) { NVTX_RANGE(K2_FUNC); K2_STATIC_ASSERT((std::is_same<float, FloatType>::value || std::is_same<double, FloatType>::value)); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK(IsCompatible(fsas, entering_arc_batches)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); K2_CHECK_EQ(entering_arc_batches.NumAxes(), 4); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); // just using DCHECK below to save time in production code K2_DCHECK(state_batches.TotSize(1) == num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(entering_arc_batches.Dim0(), num_batches); K2_DCHECK_EQ(entering_arc_batches.TotSize(1), state_batches.TotSize(1)); K2_DCHECK_EQ(entering_arc_batches.TotSize(2), num_states); K2_DCHECK_EQ(entering_arc_batches.NumElements(), num_arcs); FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); Array1<FloatType> state_scores(c, num_states, negative_infinity); FloatType *state_scores_data = state_scores.Data(); // set the score of start state in each fsa to be 0 const int32_t *fsa_row_splits1 = fsas.RowSplits(1).Data(); K2_EVAL( c, num_fsas, lambda_set_start_state_score, (int32_t fsa_idx) { int32_t start_state = fsa_row_splits1[fsa_idx], start_state_next_fsa = fsa_row_splits1[fsa_idx + 1]; if (start_state_next_fsa - start_state > 0) state_scores_data[start_state] = 0; }); // get the 1st entering arc index in each batch, +1 so we can get the number // of entering arcs in each batch by taking the difference of adjacent // elements Array1<int32_t> entering_arc_start_index(c, num_batches + 1); int32_t *entering_arc_start_index_data = entering_arc_start_index.Data(); const int32_t *arc_batches_row_splits1 = entering_arc_batches.RowSplits(1).Data(); const int32_t *arc_batches_row_splits2 = entering_arc_batches.RowSplits(2).Data(); const int32_t *arc_batches_row_splits3 = entering_arc_batches.RowSplits(3).Data(); K2_EVAL( c, num_batches, lambda_set_entering_arc_start_index, (int32_t batch_idx) { int32_t this_state_idx0xx = arc_batches_row_splits2[batch_idx * num_fsas]; int32_t this_arc_idx0xxx = arc_batches_row_splits3[this_state_idx0xx]; entering_arc_start_index_data[batch_idx] = this_arc_idx0xxx; if (batch_idx == num_batches - 1) { // process the last element int32_t next_state_idx0xx = arc_batches_row_splits2[num_batches * num_fsas]; int32_t next_arc_idx0xxx = arc_batches_row_splits3[next_state_idx0xx]; entering_arc_start_index_data[num_batches] = next_arc_idx0xxx; } }); const int32_t *arc_batches_row_ids1 = entering_arc_batches.RowIds(1).Data(); const int32_t *arc_batches_row_ids2 = entering_arc_batches.RowIds(2).Data(); const int32_t *arc_batches_row_ids3 = entering_arc_batches.RowIds(3).Data(); const int32_t *entering_arc_ids = entering_arc_batches.values.Data(); const int32_t *states_data = state_batches.values.Data(); const Arc *arcs = fsas.values.Data(); Array1<FloatType> entering_arc_score_values( c, num_arcs); // entering arc_scores in batches FloatType *arc_scores_data = entering_arc_score_values.Data(); // copy entering_arc_start_index to cpu as we will access its elements in // below Eval function for `lambda_set_entering_arc_scores` Array1<int32_t> cpu_entering_arc_start_index = entering_arc_start_index.To(GetCpuContext()); const int32_t *cpu_entering_arc_start = cpu_entering_arc_start_index.Data(); // copy the index of start state in each fsa to CPU Array1<int32_t> &arc_batches_row_splits1_array = entering_arc_batches.RowSplits(1); Array1<int32_t> arc_batches_row_splits12_cpu = entering_arc_batches.RowSplits(2)[arc_batches_row_splits1_array].To( GetCpuContext()); K2_CHECK_EQ(arc_batches_row_splits12_cpu.Dim(), num_batches + 1); const int32_t *arc_batches_row_splits12_cpu_data = arc_batches_row_splits12_cpu.Data(); Array1<int32_t> arc_row_splits_mem(c, num_states + 1); Array1<FloatType> score_cache(c, num_states + 1); int32_t *entering_arcs_data = nullptr; if (entering_arcs) { K2_CHECK_EQ(log_semiring, false) << " entering_arcs supplied"; *entering_arcs = Array1<int32_t>(c, num_states, -1); entering_arcs_data = entering_arcs->Data(); } // process batch sequentially. for (int32_t i = 0; i < num_batches; ++i) { // get the range we would call Max/LogSum per sub list int32_t this_state_idx0xx = arc_batches_row_splits12_cpu_data[i], next_state_idx0xx = arc_batches_row_splits12_cpu_data[i + 1]; K2_CHECK_LT(this_state_idx0xx, num_states); K2_CHECK_LE(next_state_idx0xx, num_states); int32_t num_states_this_batch = next_state_idx0xx - this_state_idx0xx; K2_CHECK_LT(num_states_this_batch, arc_row_splits_mem.Dim()); // we always use the first `num_states_this_batch` elements in // arc_row_splits_mem. Array1<int32_t> arc_row_splits_part = arc_row_splits_mem.Range( 0, num_states_this_batch + 1); // +1 for the last element int32_t num_arcs_this_batch = cpu_entering_arc_start[i + 1] - cpu_entering_arc_start[i]; { ParallelRunner pr(c); // get entering arc scores { With w(pr.NewStream()); K2_EVAL( c, num_arcs_this_batch, lambda_set_entering_arc_score, (int32_t idx123) { // all idx** in below code are the indexes to entering_arc_batches int32_t idx0123 = entering_arc_start_index_data[i] + idx123; int32_t idx012 = arc_batches_row_ids3[idx0123]; int32_t idx01 = arc_batches_row_ids2[idx012]; K2_CHECK_EQ(idx01 / num_fsas, i); // idx01/num_fsas is batch_id int32_t fsa_id = idx01 % num_fsas; int32_t entering_arc_id = entering_arc_ids[idx0123]; float curr_arc_score = arcs[entering_arc_id].score; int32_t src_state_idx1 = arcs[entering_arc_id].src_state; int32_t src_state_idx01 = fsa_row_splits1[fsa_id] + src_state_idx1; arc_scores_data[idx0123] = state_scores_data[src_state_idx01] + curr_arc_score; }); } { With w(pr.NewStream()); // make entering arc row splits info in each batch starting from zero, // we will use it to call MaxPerSublist or LogSumPerSubList int32_t *sum_splits_data = arc_row_splits_part.Data(); K2_EVAL( c, num_states_this_batch + 1, lambda_set_row_splits_for_sum, (int32_t idx) { sum_splits_data[idx] = arc_batches_row_splits3[idx + this_state_idx0xx] - arc_batches_row_splits3[this_state_idx0xx]; }); } } int32_t this_arc_idx0xxx = cpu_entering_arc_start[i]; Array1<FloatType> sub_scores_values = entering_arc_score_values.Range(this_arc_idx0xxx, num_arcs_this_batch); RaggedShape sub_scores_shape = RaggedShape2(&arc_row_splits_part, nullptr, sub_scores_values.Dim()); Ragged<FloatType> sub_scores(sub_scores_shape, sub_scores_values); // we always use the first num_rows elements in score_cache. Array1<FloatType> sub_state_scores = score_cache.Range(0, num_states_this_batch); // get scores per state in this batch if (log_semiring) { LogSumPerSublist(sub_scores, negative_infinity, &sub_state_scores); } else { MaxPerSublist(sub_scores, negative_infinity, &sub_state_scores); if (entering_arcs_data != nullptr) { FloatType *sub_state_scores_data = sub_state_scores.Data(), *sub_scores_data = sub_scores.values.Data(); int32_t *sub_scores_row_ids_data = sub_scores.RowIds(1).Data(); const int32_t *sub_state_ids_data = states_data + this_state_idx0xx, *sub_entering_arc_ids_data = entering_arc_ids + this_arc_idx0xxx; // arc_idx01 below is an index into sub_scores, it is also an arc_idx123 // into entering_arc_batches. K2_EVAL( c, sub_scores.NumElements(), lambda_set_entering_arcs, (int32_t arc_idx01) { // state_idx0 below is idx0 into `sub_scores`, also an index into // `sub_scores`. int32_t state_idx0 = sub_scores_row_ids_data[arc_idx01]; if (sub_scores_data[arc_idx01] == sub_state_scores_data[state_idx0]) { int32_t fsas_state_idx01 = sub_state_ids_data[state_idx0], fsas_entering_arc_idx012 = sub_entering_arc_ids_data[arc_idx01]; // The following statement has a race condition if there is a // tie on scores, but this is OK and by design. It makes the // choice of traceback non-deterministic in these cases. entering_arcs_data[fsas_state_idx01] = fsas_entering_arc_idx012; } }); } } const FloatType *sub_state_scores_data = sub_state_scores.Data(); // Copy those scores to corresponding state in state_scores. // `state_idx12` is an idx12 w.r.t. state_batches and entering_arc_batches, // but an idx1 w.r.t. sub_scores and an index into the array // sub_state_scores. K2_EVAL( c, num_states_this_batch, lambda_copy_state_scores, (int32_t state_idx12) { int32_t batches_idx012 = this_state_idx0xx + state_idx12; int32_t fsas_state_idx01 = states_data[batches_idx012]; int32_t batches_idx01 = arc_batches_row_ids2[batches_idx012]; int32_t fsa_idx0 = batches_idx01 % num_fsas; int32_t start_state_idx01 = fsa_row_splits1[fsa_idx0]; // don't override score 0 in the start state in each fsa. if (fsas_state_idx01 != start_state_idx01) state_scores_data[fsas_state_idx01] = sub_state_scores_data[state_idx12]; }); } return state_scores; } template <typename FloatType> Array1<FloatType> GetBackwardScores( FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, const Array1<FloatType> *tot_scores /*= nullptr*/, bool log_semiring /*= true*/) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK(IsCompatible(fsas, leaving_arc_batches)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); K2_CHECK_EQ(leaving_arc_batches.NumAxes(), 4); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); K2_DCHECK(state_batches.TotSize(1) == num_fsas * num_batches); // just using DCHECK below to save time in production code K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(leaving_arc_batches.Dim0(), num_batches); K2_DCHECK_EQ(leaving_arc_batches.TotSize(1), state_batches.TotSize(1)); K2_DCHECK_EQ(leaving_arc_batches.TotSize(2), num_states); K2_DCHECK_EQ(leaving_arc_batches.NumElements(), num_arcs); FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); Array1<FloatType> state_scores(c, num_states, negative_infinity); FloatType *state_scores_data = state_scores.Data(); const int32_t *fsa_row_splits1 = fsas.RowSplits(1).Data(); if (tot_scores != nullptr) { K2_CHECK(IsCompatible(fsas, *tot_scores)); K2_CHECK_EQ(tot_scores->Dim(), num_fsas); const FloatType *tot_scores_data = tot_scores->Data(); // set the score of final state in fsa i to be negative of tot_scores[i] K2_EVAL( c, num_fsas, lambda_set_final_state_score, (int32_t fsa_idx) { int32_t start_state = fsa_row_splits1[fsa_idx], start_state_next_fsa = fsa_row_splits1[fsa_idx + 1]; if (start_state_next_fsa - start_state > 0) { // We never set the score of a state to positive_infinity, otherwise // we may get NaN when add it with negative_infinity. But this // usually would not happen for a connected FSA. if (tot_scores_data[fsa_idx] != negative_infinity) { state_scores_data[start_state_next_fsa - 1] = -tot_scores_data[fsa_idx]; } else { state_scores_data[start_state_next_fsa - 1] = negative_infinity; } } }); } else { // set the score of final state in each fsa to be 0 K2_EVAL( c, num_fsas, lambda_set_final_state_score, (int32_t fsa_idx) { int32_t start_state = fsa_row_splits1[fsa_idx], start_state_next_fsa = fsa_row_splits1[fsa_idx + 1]; if (start_state_next_fsa - start_state > 0) state_scores_data[start_state_next_fsa - 1] = 0; }); } // get the 1st leaving arc index in each batch, +1 so we can get the number of // leaving arcs in each batch by taking the difference of adjacent elements Array1<int32_t> leaving_arc_start_index(c, num_batches + 1); int32_t *leaving_arc_start_index_data = leaving_arc_start_index.Data(); const int32_t *arc_batches_row_splits1 = leaving_arc_batches.RowSplits(1).Data(); const int32_t *arc_batches_row_splits2 = leaving_arc_batches.RowSplits(2).Data(); const int32_t *arc_batches_row_splits3 = leaving_arc_batches.RowSplits(3).Data(); K2_EVAL( c, num_batches, lambda_set_leaving_arc_start_index, (int32_t batch_idx) { int32_t this_state_idx0xx = arc_batches_row_splits2[batch_idx * num_fsas]; int32_t this_arc_idx0xxx = arc_batches_row_splits3[this_state_idx0xx]; leaving_arc_start_index_data[batch_idx] = this_arc_idx0xxx; if (batch_idx == num_batches - 1) { // process the last element int32_t next_state_idx0xx = arc_batches_row_splits2[num_batches * num_fsas]; int32_t next_arc_idx0xxx = arc_batches_row_splits3[next_state_idx0xx]; leaving_arc_start_index_data[num_batches] = next_arc_idx0xxx; } }); const int32_t *arc_batches_row_ids1 = leaving_arc_batches.RowIds(1).Data(); const int32_t *arc_batches_row_ids2 = leaving_arc_batches.RowIds(2).Data(); const int32_t *arc_batches_row_ids3 = leaving_arc_batches.RowIds(3).Data(); const int32_t *leaving_arc_ids = leaving_arc_batches.values.Data(); const int32_t *states_data = state_batches.values.Data(); const Arc *arcs = fsas.values.Data(); Array1<FloatType> leaving_arc_score_values( c, num_arcs); // leaving arc_scores in batches FloatType *arc_scores_data = leaving_arc_score_values.Data(); // copy leaving_arc_start_index to cpu as we will access its elements in below // Eval function for `lambda_set_leaving_arc_scores` Array1<int32_t> cpu_leaving_arc_start_index = leaving_arc_start_index.To(GetCpuContext()); const int32_t *cpu_leaving_arc_start = cpu_leaving_arc_start_index.Data(); // copy the index of start state in each fsa to CPU Array1<int32_t> arc_batches_row_splits1_array = leaving_arc_batches.RowSplits(1); Array1<int32_t> arc_batches_row_splits12_cpu = leaving_arc_batches.RowSplits(2)[arc_batches_row_splits1_array].To( GetCpuContext()); K2_CHECK_EQ(arc_batches_row_splits12_cpu.Dim(), num_batches + 1); const int32_t *arc_batches_row_splits12_cpu_data = arc_batches_row_splits12_cpu.Data(); Array1<int32_t> arc_row_splits_mem(c, num_states + 1); Array1<FloatType> score_cache(c, num_states + 1); // process batch sequentially. for (int32_t i = num_batches - 1; i >= 0; --i) { // get the range we would call Max/LogSum per sub list int32_t this_state_idx0xx = arc_batches_row_splits12_cpu_data[i]; int32_t next_state_idx0xx = arc_batches_row_splits12_cpu_data[i + 1]; // the 1st state idx in the // next batch K2_CHECK_LT(this_state_idx0xx, num_states); K2_CHECK_LE(next_state_idx0xx, num_states); int32_t num_states_this_batch = next_state_idx0xx - this_state_idx0xx; K2_CHECK_LT(num_states_this_batch, arc_row_splits_mem.Dim()); // we always use the first `num_states_this_batch` elements in // arc_row_splits_mem. Array1<int32_t> arc_row_splits_part = arc_row_splits_mem.Range( 0, num_states_this_batch + 1); // +1 for the last element int32_t num_arcs_this_batch = cpu_leaving_arc_start[i + 1] - cpu_leaving_arc_start[i]; { ParallelRunner pr(c); // get leaving arc scores { With w(pr.NewStream()); K2_EVAL( c, num_arcs_this_batch, lambda_set_leaving_arc_score, (int32_t idx123) { // all idx** in below code are the indexes to leaving_arc_batches int32_t idx0123 = leaving_arc_start_index_data[i] + idx123; int32_t idx012 = arc_batches_row_ids3[idx0123]; int32_t idx01 = arc_batches_row_ids2[idx012]; K2_CHECK_EQ(idx01 / num_fsas, i); // idx01/num_fsas is batch_id int32_t fsa_id = idx01 % num_fsas; int32_t leaving_arc_id = leaving_arc_ids[idx0123]; float curr_arc_score = arcs[leaving_arc_id].score; int32_t dest_state_idx1 = arcs[leaving_arc_id].dest_state; int32_t dest_state_idx01 = fsa_row_splits1[fsa_id] + dest_state_idx1; arc_scores_data[idx0123] = state_scores_data[dest_state_idx01] + curr_arc_score; }); } { With w(pr.NewStream()); // make leaving arc row splits info in each batch starting from zero, // we will use it to call MaxPerSublist or LogSumPerSubList int32_t *sum_splits_data = arc_row_splits_part.Data(); K2_EVAL( c, num_states_this_batch + 1, lambda_set_row_splits_for_sum, (int32_t idx) { sum_splits_data[idx] = arc_batches_row_splits3[idx + this_state_idx0xx] - arc_batches_row_splits3[this_state_idx0xx]; }); } } int32_t this_arc_idx0xxx = cpu_leaving_arc_start[i]; Array1<FloatType> sub_scores_values = leaving_arc_score_values.Range(this_arc_idx0xxx, num_arcs_this_batch); RaggedShape sub_scores_shape = RaggedShape2(&arc_row_splits_part, nullptr, sub_scores_values.Dim()); Ragged<FloatType> sub_scores(sub_scores_shape, sub_scores_values); // we always use the first num_rows elements in score_cache. Array1<FloatType> sub_state_scores = score_cache.Range(0, num_states_this_batch); // get scores per state in this batch if (log_semiring) LogSumPerSublist(sub_scores, negative_infinity, &sub_state_scores); else MaxPerSublist(sub_scores, negative_infinity, &sub_state_scores); const FloatType *sub_state_scores_data = sub_state_scores.Data(); // copy those scores to corresponding state in state_scores K2_EVAL( c, num_states_this_batch, lambda_copy_state_scores, (int32_t idx2) { int32_t idx012 = this_state_idx0xx + idx2; int32_t state_idx012 = states_data[idx012]; int32_t idx01 = arc_batches_row_ids2[idx012]; int32_t fsa_id = idx01 % num_fsas; int32_t start_state = fsa_row_splits1[fsa_id], start_state_next_fsa = fsa_row_splits1[fsa_id + 1]; if (start_state_next_fsa - start_state > 0) { // non-empty fsa int32_t final_state_idx = start_state_next_fsa - 1; // don't override score in the final state in each fsa. if (state_idx012 != final_state_idx) state_scores_data[state_idx012] = sub_state_scores_data[idx2]; } }); } return state_scores; } template <typename FloatType> Array1<FloatType> GetTotScores(FsaVec &fsas, const Array1<FloatType> &forward_scores) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, forward_scores)); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1); K2_CHECK_EQ(num_states, forward_scores.Dim()); FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); Array1<FloatType> tot_scores(c, num_fsas, negative_infinity); FloatType *tot_scores_data = tot_scores.Data(); const int32_t *fsa_row_splits1 = fsas.RowSplits(1).Data(); const FloatType *forward_scores_data = forward_scores.Data(); K2_EVAL( c, num_fsas, lambda_copy_tot_scores, (int32_t fsa_idx) { int32_t start_state = fsa_row_splits1[fsa_idx], start_state_next_fsa = fsa_row_splits1[fsa_idx + 1]; if (start_state_next_fsa > start_state) { // non-empty fsa int32_t final_state_idx = start_state_next_fsa - 1; tot_scores_data[fsa_idx] = forward_scores_data[final_state_idx]; } }); return tot_scores; } template <typename FloatType> Array1<FloatType> GetArcScores(FsaVec &fsas, const Array1<FloatType> &forward_scores, const Array1<FloatType> &backward_scores) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, forward_scores)); K2_CHECK(IsCompatible(fsas, backward_scores)); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); K2_CHECK_EQ(num_states, forward_scores.Dim()); K2_CHECK_EQ(num_states, backward_scores.Dim()); Array1<FloatType> arc_scores(c, num_arcs); FloatType *arc_scores_data = arc_scores.Data(); const int32_t *fsa_row_splits1 = fsas.RowSplits(1).Data(); const int32_t *fsa_row_ids1 = fsas.RowIds(1).Data(); const int32_t *fsa_row_ids2 = fsas.RowIds(2).Data(); const Arc *arcs = fsas.values.Data(); const FloatType *forward_scores_data = forward_scores.Data(); const FloatType *backward_scores_data = backward_scores.Data(); K2_EVAL( c, num_arcs, lambda_get_arc_scores, (int32_t arc_idx012) { int32_t src_state_idx1 = arcs[arc_idx012].src_state; int32_t dest_state_idx1 = arcs[arc_idx012].dest_state; float arc_score = arcs[arc_idx012].score; int32_t idx01 = fsa_row_ids2[arc_idx012]; int32_t idx0 = fsa_row_ids1[idx01]; int32_t idx0x = fsa_row_splits1[idx0]; int32_t src_state_idx01 = idx0x + src_state_idx1; int32_t dest_state_idx01 = idx0x + dest_state_idx1; arc_scores_data[arc_idx012] = arc_score + forward_scores_data[src_state_idx01] + backward_scores_data[dest_state_idx01]; }); return arc_scores; } // explicit instantiation for those score computation functions above template Array1<float> GetForwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, Array1<int32_t> *entering_arcs); template Array1<double> GetForwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, Array1<int32_t> *entering_arcs); template Array1<float> GetBackwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, const Array1<float> *tot_scores, bool log_semiring); template Array1<double> GetBackwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, const Array1<double> *tot_scores, bool log_semiring); template Array1<float> GetArcScores(FsaVec &fsas, const Array1<float> &forward_scores, const Array1<float> &backward_scores); template Array1<double> GetArcScores(FsaVec &fsas, const Array1<double> &forward_scores, const Array1<double> &backward_scores); template Array1<float> GetTotScores(FsaVec &fsas, const Array1<float> &forward_scores); template Array1<double> GetTotScores(FsaVec &fsas, const Array1<double> &forward_scores); Fsa RandomFsa(bool acyclic /*=true*/, int32_t max_symbol /*=50*/, int32_t min_num_arcs /*=0*/, int32_t max_num_arcs /*=1000*/) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetCpuContext(); K2_CHECK_GE(min_num_arcs, 0); K2_CHECK_GE(max_num_arcs, min_num_arcs); K2_CHECK_GE(max_symbol, 0); RaggedShape shape = RandomRaggedShape(false, 2, 2, min_num_arcs, max_num_arcs); int32_t dim0 = shape.Dim0(); // empty Fsa if (dim0 == 0) return Fsa(shape, Array1<Arc>(c, std::vector<Arc>{})); // as there should be no arcs leaving the final_state, we always push back an // empty row here. Array1<int32_t> ans_row_splits1(c, dim0 + 2); Array1<int32_t> sub_range = ans_row_splits1.Range(0, dim0 + 1); sub_range.CopyFrom(shape.RowSplits(1)); int32_t *ans_row_splits1_data = ans_row_splits1.Data(); ans_row_splits1_data[dim0 + 1] = ans_row_splits1_data[dim0]; // create returned shape RaggedShapeLayer ans_shape_dim; ans_shape_dim.row_splits = ans_row_splits1; ans_shape_dim.cached_tot_size = shape.TotSize(1); RaggedShape ans_shape(std::vector<RaggedShapeLayer>{ans_shape_dim}, true); ans_shape.Populate(); // will be used to generate scores on arcs. std::random_device rd; std::mt19937 gen(rd()); // TODO(haowen): let the users set the range of scores? it's fine to use it // for now as we just use it to test. std::uniform_real_distribution<float> dis_score(0, 10); // create arcs int32_t *row_ids1 = ans_shape.RowIds(1).Data(); int32_t num_states = ans_shape.Dim0(), num_arcs = ans_shape.TotSize(1); int32_t start_state = 0, final_state = num_states - 1; std::vector<Arc> arcs(num_arcs); for (int32_t i = 0; i != num_arcs; ++i) { int32_t curr_state = row_ids1[i]; int32_t dest_state = acyclic ? RandInt(curr_state + 1, final_state) : RandInt(start_state, final_state); int32_t symbol = dest_state == final_state ? -1 : RandInt(0, max_symbol); float score = dis_score(gen); arcs[i] = Arc(curr_state, dest_state, symbol, score); } return Fsa(ans_shape, Array1<Arc>(c, arcs)); } FsaVec RandomFsaVec(int32_t min_num_fsas /*=1*/, int32_t max_num_fsas /*=1000*/, bool acyclic /*=true*/, int32_t max_symbol /*=50*/, int32_t min_num_arcs /*=0*/, int32_t max_num_arcs /*=1000*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(min_num_fsas, 0); K2_CHECK_GE(max_num_fsas, min_num_fsas); int32_t num_fsas = RandInt(min_num_fsas, max_num_fsas); std::vector<Fsa> fsas(num_fsas); for (int32_t i = 0; i != num_fsas; ++i) { fsas[i] = RandomFsa(acyclic, max_symbol, min_num_arcs, max_num_arcs); } return Stack(0, num_fsas, fsas.data()); } DenseFsaVec RandomDenseFsaVec(int32_t min_num_fsas, int32_t max_num_fsas, int32_t min_frames, int32_t max_frames, int32_t min_symbols, int32_t max_symbols, float scores_scale) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetCpuContext(); int32_t num_fsas = RandInt(min_num_fsas, max_num_fsas); // num_symbols includes epsilon but not final-symbol -1. int32_t num_symbols = RandInt(min_symbols, max_symbols); // `num_frames` includes the extra 1 frame for the final-symbol. std::vector<int32_t> num_frames(num_fsas + 1); int32_t tot_frames = 0; for (int32_t i = 0; i < num_fsas; i++) { num_frames[i] = RandInt(min_frames, max_frames) + 1; tot_frames += num_frames[i]; } Array2<float> scores(c, tot_frames, num_symbols + 1); auto scores_acc = scores.Accessor(); std::vector<int32_t> row_splits_vec(num_fsas + 1); row_splits_vec[0] = 0; int32_t cur_start_frame = 0; RandIntGenerator gen; for (int32_t i = 0; i < num_fsas; i++) { int32_t this_num_frames = num_frames[i], end_frame = cur_start_frame + this_num_frames; for (int32_t f = cur_start_frame; f + 1 < end_frame; f++) { scores_acc(f, 0) = -std::numeric_limits<float>::infinity(); for (int32_t j = 0; j < num_symbols; j++) scores_acc(f, j + 1) = scores_scale * gen(-50, 50) * 0.01; } // on the last frame the placement of infinity vs. finite is reversed: // -1 gets finite value, others get infinity. int32_t f = end_frame - 1; scores_acc(f, 0) = scores_scale * gen(-50, 50) * 0.01; for (int32_t j = 0; j < num_symbols; j++) scores_acc(f, j + 1) = -std::numeric_limits<float>::infinity(); row_splits_vec[i + 1] = cur_start_frame = end_frame; } Array1<int32_t> row_splits(c, row_splits_vec); return DenseFsaVec(RaggedShape2(&row_splits, nullptr, tot_frames), scores); } Ragged<int32_t> GetStartStates(FsaVec &src) { NVTX_RANGE(K2_FUNC); ContextPtr c = src.Context(); K2_CHECK(src.NumAxes() == 3); int32_t num_fsas = src.Dim0(); const int32_t *src_row_splits1_data = src.RowSplits(1).Data(); Array1<int32_t> ans_row_splits(c, num_fsas + 1); // will first set the elements of ans_row_splits to the number of states kept // from this FSA (either 0 or 1). int32_t *num_states_data = ans_row_splits.Data(); K2_EVAL( c, num_fsas, lambda_set_num_states, (int32_t fsa_idx0)->void { // 1 if the FSA is not empty, 0 if empty. num_states_data[fsa_idx0] = (src_row_splits1_data[fsa_idx0 + 1] > src_row_splits1_data[fsa_idx0]); }); ExclusiveSum(ans_row_splits, &ans_row_splits); int32_t ans_dim = ans_row_splits.Back(); Ragged<int32_t> ans(RaggedShape2(&ans_row_splits, nullptr, ans_dim), Array1<int32_t>(c, ans_dim)); const int32_t *ans_row_ids1_data = ans.shape.RowIds(1).Data(); int32_t *ans_values_data = ans.values.Data(); K2_EVAL( c, ans_dim, lambda_set_ans_values, (int32_t ans_idx01)->void { int32_t idx0 = ans_row_ids1_data[ans_idx01]; int32_t src_start_state_idx01 = src_row_splits1_data[idx0]; K2_CHECK_GT(src_row_splits1_data[idx0 + 1], src_row_splits1_data[idx0]); ans_values_data[ans_idx01] = src_start_state_idx01; }); return ans; } FsaVec FsaVecFromArcIndexes(FsaVec &fsas, Ragged<int32_t> &best_arc_indexes) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(best_arc_indexes.NumAxes(), 2); K2_CHECK(IsCompatible(fsas, best_arc_indexes)); K2_CHECK_EQ(fsas.Dim0(), best_arc_indexes.Dim0()); // if there are n arcs (for n > 0), there are n + 1 states; if there are 0 // arcs, there are 0 states (that FSA will have no arcs or states). RaggedShape states_shape = ChangeSublistSizePinned(best_arc_indexes.shape, 1); const int32_t *states_shape_row_splits1_data = states_shape.RowSplits(1).Data(); int32_t num_fsas = fsas.Dim0(); int32_t num_states = states_shape.NumElements(); int32_t num_arcs = best_arc_indexes.shape.NumElements(); ContextPtr &context = fsas.Context(); if (num_arcs == 0) { RaggedShape shape_a = RegularRaggedShape(context, num_fsas, 0), shape_b = RegularRaggedShape(context, 0, 0); return FsaVec(ComposeRaggedShapes(shape_a, shape_b), Array1<Arc>(context, 0)); } Array1<int32_t> row_splits2(context, num_states + 1); Array1<int32_t> row_ids2(context, num_arcs); int32_t *row_splits2_data = row_splits2.Data(); int32_t *row_ids2_data = row_ids2.Data(); Array1<Arc> arcs(context, num_arcs); Arc *arcs_data = arcs.Data(); const int32_t *best_arc_indexes_row_splits1_data = best_arc_indexes.RowSplits(1).Data(); const int32_t *best_arc_indexes_row_ids1_data = best_arc_indexes.RowIds(1).Data(); const int32_t *best_arc_indexes_data = best_arc_indexes.values.Data(); const Arc *fsas_values_data = fsas.values.Data(); K2_EVAL( context, num_arcs, lambda_set_arcs, (int32_t best_arc_idx01) { int32_t fsas_idx0 = best_arc_indexes_row_ids1_data[best_arc_idx01]; int32_t best_arc_idx0x = best_arc_indexes_row_splits1_data[fsas_idx0]; int32_t best_arc_idx0x_next = best_arc_indexes_row_splits1_data[fsas_idx0 + 1]; int32_t num_best_arcs = best_arc_idx0x_next - best_arc_idx0x; int32_t best_arc_idx1 = best_arc_idx01 - best_arc_idx0x; int32_t state_offset = states_shape_row_splits1_data[fsas_idx0]; const Arc &arc = fsas_values_data[best_arc_indexes_data[best_arc_idx01]]; int32_t src_state = best_arc_idx1; int32_t dest_state = src_state + 1; int32_t label = arc.label; float score = arc.score; arcs_data[best_arc_idx01] = Arc(src_state, dest_state, label, score); int32_t state_idx01 = state_offset + src_state; row_ids2_data[best_arc_idx01] = state_idx01; row_splits2_data[state_idx01 + 1] = best_arc_idx01 + 1; if (best_arc_idx01 == 0) row_splits2_data[0] = 0; if (best_arc_idx1 + 1 == num_best_arcs) row_splits2_data[state_idx01 + 2] = best_arc_idx01 + 1; }); RaggedShape shape = RaggedShape3(&states_shape.RowSplits(1), &states_shape.RowIds(1), num_states, &row_splits2, &row_ids2, num_arcs); Ragged<Arc> ans(shape, arcs); return ans; } FsaVec GetIncomingFsaVec(FsaVec &fsas) { Array1<int32_t> dest_states = GetDestStates(fsas, true); Ragged<int32_t> arc_indexes = GetIncomingArcs(fsas, dest_states); return FsaVec(arc_indexes.shape, fsas.values[arc_indexes.values]); } Ragged<int32_t> ComposeArcMaps(Ragged<int32_t> &step1_arc_map, Ragged<int32_t> &step2_arc_map) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(step1_arc_map.NumAxes(), 2); K2_CHECK_EQ(step2_arc_map.NumAxes(), 2); return Index(step1_arc_map, step2_arc_map, true); } } // namespace k2
fe1f919b983acc68e642467a4b55be86e19369d2.hip
// !!! This is a file automatically generated by hipify!!! /* * ExcuteConstraint.cu * * Created on: Nov 4, 2017 * Author: zy */ #include <iostream> #include "./../ConstraintParser/ConstraintParameter.cuh" #include "./../model/CoodinateDouble.cuh" #include "./../model/IntervalDouble.cuh" #include "./../model/PriorityDouble.cuh" #include "./../model/PredictValue.cuh" #include "./../solver/ATG.h" #include "./../solver/PCATG.h" #include "./../solver/ConstantValue.h" #include "ExcuteConstraint.cuh" #include "HardwareStrategy.cuh" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "device_functions.hpp" #include "math_functions.h" #include <stdio.h> using namespace std; /* * * */ /* * * 1 * 2if * * * getRuntimeValue_i_jij * */ __device__ void getRuntimeValue_0_0(double x,double y,double z,CoodinateDouble* res) { res->y = 9 - x*y*z; res->isCovered = (bool)(res->y >= 0); res->isValid = (bool)(isfinite(res->y)); return ; } __device__ void getRuntimeValue_0_1(double x,double y,double z,CoodinateDouble* res) { res->y = z - x*y; res->isCovered = (bool)(res->y > 0); res->isValid = (bool)(isfinite(res->y)); return ; } __device__ void getRuntimeValue_0_2(double x,double y,double z,CoodinateDouble* res) { res->y = y - z; res->isCovered = (bool)(res->y > 0); res->isValid = (bool)(isfinite(res->y)); return ; } /* * index=0,1,2 * */ __global__ void calaConstraint_0_0_0(CoodinateDouble* dev_predictArray,double* dev_parameter, const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_0(dev_predictArray[i+base].x,dev_parameter[1],dev_parameter[2],dev_predictArray+i+base); } } __global__ void calaConstraint_0_0_1(CoodinateDouble* dev_predictArray,double* dev_parameter, const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_0(dev_parameter[0],dev_predictArray[i+base].x,dev_parameter[2],dev_predictArray+i+base); } } __global__ void calaConstraint_0_0_2(CoodinateDouble* dev_predictArray,double* dev_parameter, const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_0(dev_parameter[0],dev_parameter[1],dev_predictArray[i+base].x,dev_predictArray+i+base); } } /* * index=0,1,2 * */ __global__ void calaConstraint_0_1_0(CoodinateDouble* dev_predictArray,double* dev_parameter, const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_1(dev_predictArray[i+base].x,dev_parameter[1],dev_parameter[2],dev_predictArray+i+base); } } __global__ void calaConstraint_0_1_1(CoodinateDouble* dev_predictArray,double* dev_parameter, const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_1(dev_parameter[0],dev_predictArray[i+base].x,dev_parameter[2],dev_predictArray+i+base); } } __global__ void calaConstraint_0_1_2(CoodinateDouble* dev_predictArray,double* dev_parameter, const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_1(dev_parameter[0],dev_parameter[1],dev_predictArray[i+base].x,dev_predictArray+i+base); } } /* * index=0,1,2 * */ __global__ void calaConstraint_0_2_0(CoodinateDouble* dev_predictArray,double* dev_parameter, const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_2(dev_predictArray[i+base].x,dev_parameter[1],dev_parameter[2],dev_predictArray+i+base); } } __global__ void calaConstraint_0_2_1(CoodinateDouble* dev_predictArray,double* dev_parameter, const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_2(dev_parameter[0],dev_predictArray[i+base].x,dev_parameter[2],dev_predictArray+i+base); } } __global__ void calaConstraint_0_2_2(CoodinateDouble* dev_predictArray,double* dev_parameter, const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_2(dev_parameter[0],dev_parameter[1],dev_predictArray[i+base].x,dev_predictArray+i+base); } } /* * * @CUDA * */ void calaRuntimeValue(int paraIndex,CoodinateDouble* dev_predictArray,double* dev_parameter, const int row,const int col) { Block res = HardwareStrategy::getHardwareStrategy(col); // if(paraIndex == 0) { hipLaunchKernelGGL(( calaConstraint_0_0_0), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock), 0, 0, dev_predictArray,dev_parameter,0*col,col); hipLaunchKernelGGL(( calaConstraint_0_1_0), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock), 0, 0, dev_predictArray,dev_parameter,1*col,col); hipLaunchKernelGGL(( calaConstraint_0_2_0), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock), 0, 0, dev_predictArray,dev_parameter,2*col,col); }else if(paraIndex == 1) { hipLaunchKernelGGL(( calaConstraint_0_0_1), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock), 0, 0, dev_predictArray,dev_parameter,0*col,col); hipLaunchKernelGGL(( calaConstraint_0_1_1), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock), 0, 0, dev_predictArray,dev_parameter,1*col,col); hipLaunchKernelGGL(( calaConstraint_0_2_1), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock), 0, 0, dev_predictArray,dev_parameter,2*col,col); } else if(paraIndex == 2) { hipLaunchKernelGGL(( calaConstraint_0_0_2), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock), 0, 0, dev_predictArray,dev_parameter,0*col,col); hipLaunchKernelGGL(( calaConstraint_0_1_2), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock), 0, 0, dev_predictArray,dev_parameter,1*col,col); hipLaunchKernelGGL(( calaConstraint_0_2_2), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock), 0, 0, dev_predictArray,dev_parameter,2*col,col); }else { cout<<"You Should Never Get Here. In Function Of: "<<endl; } } __global__ void calaFeasibleSolution(FullCoveredInfo* dev_coveredInfo,CoodinateDouble* dev_predictArray,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { dev_coveredInfo[i].index = i; dev_coveredInfo[i].isCovered = dev_predictArray[i + 0*Size].isCovered && dev_predictArray[i + 1*Size].isCovered && dev_predictArray[i + 2*Size].isCovered; dev_coveredInfo[i].isVaild = dev_predictArray[i + 0*Size].isValid && dev_predictArray[i + 1*Size].isValid && dev_predictArray[i + 2*Size].isValid; dev_coveredInfo[i].vaildNum = (int)(dev_coveredInfo[i].isVaild); } } /* * * CPU * */ bool checkisFullCovered(FullCoveredInfo* dev_coveredInfo,CoodinateDouble* initArray, bool &findSolution,const int row,const int col) { FullCoveredInfo* coveredInfo = new FullCoveredInfo[col]; hipMemcpy(coveredInfo,dev_coveredInfo,col * sizeof(FullCoveredInfo),hipMemcpyDeviceToHost); for(int i=0;i<col;i++) { double x=0,y=0,z=0; if(ATG::currentSearchParamIndex==0) { x = initArray[i].x; y = ATG::parameters[1]; z = ATG::parameters[2]; }else if(ATG::currentSearchParamIndex==1) { x = ATG::parameters[0]; y = initArray[i].x; z = ATG::parameters[2]; }else if(ATG::currentSearchParamIndex==2) { x = ATG::parameters[0]; y = ATG::parameters[1]; z = initArray[i].x; }else { cout<<"FFFUCK YOU ************* "<<endl; } bool tmpp = (9-x*y*z >=0) && (z-x*y>0) && (y-z>0); if(coveredInfo[i].isCovered == tmpp ) { if(tmpp) { findSolution = true; //cout<<"Index: "<<i<<" ( "<<x<<" , "<<y<<" , "<<z<<" )"<<endl; } }else { cout<<"************* Not Equal ******************"<<endl; return false; } } delete []coveredInfo; return true; } /* * * */ __global__ void calaFinalIntervel(IntervalDouble* dev_finalIntervel,IntervalDouble* dev_interval,const int calaArraySize) { int i = threadIdx.x + blockIdx.x*blockDim.x; bool condition = (i>=1) && (i<calaArraySize); if(condition) { IntervalDouble* a1 = dev_interval + i; IntervalDouble* a2 = dev_interval + i + calaArraySize * 1; IntervalDouble* a3 = dev_interval + i + calaArraySize * 2; double left=a1->left; left = max(left,a2->left); left = max(left,a3->left); double right=a1->right; right = min(right,a2->right); right = min(right,a3->right); bool hasIntervel = a1->hasIntervel & a2->hasIntervel & a3->hasIntervel; dev_finalIntervel[i].left = left; dev_finalIntervel[i].right = right; dev_finalIntervel[i].hasIntervel = hasIntervel; //printf("(%f , %f ) (%f , %f ) (%f , %f ) Final %d (%f , %f)\n",a1->left,a1->right,a2->left,a2->right,a3->left,a3->right,hasIntervel,left,right); } } /* * predct * */ __global__ void generatePredictMat(CoodinateDouble* dev_predictArray,PredictValue* dev_finalAllPredictValue,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if( i < Size ) { dev_predictArray[i+0*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i+1*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i+2*Size].x = dev_finalAllPredictValue[i].value; } } /* * * */ __global__ void calaPriority(PriorityDouble* dev_priority,CoodinateDouble* dev_calaArray, const int row,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if( i < Size ) { double pri = 0.0; CoodinateDouble* a1 = dev_calaArray + i + 0 * Size; CoodinateDouble* a2 = dev_calaArray + i + 1 * Size; CoodinateDouble* a3 = dev_calaArray + i + 2 * Size; pri = pri + ((int)(a1->isCovered==true))*1 + ((int)(a1->isCovered==false))*((int)(a1->isValid==true))*1.0/(1.0+abs(a1->y)); pri = pri + ((int)(a2->isCovered==true))*1 + ((int)(a2->isCovered==false))*((int)(a2->isValid==true))*1.0/(1.0+abs(a2->y)); pri = pri + ((int)(a3->isCovered==true))*1 + ((int)(a3->isCovered==false))*((int)(a3->isValid==true))*1.0/(1.0+abs(a3->y)); dev_priority[i].priority = pri / (double)row; dev_priority[i].x = a1->x; // bool isOne = (a1->x == a2->x) && (a2->x == a3->x); bool isCovered = a1->isCovered && a2->isCovered && a3->isCovered; bool isValid= a1->isValid && a2->isValid && a3->isValid; if(isCovered == true) { printf("Cala Prioruty Wrong,index:%d: (%f,%f,%d,%d) ,(%f,%f,%d,%d) ,(%f,%f,%d,%d) , isOne:%d, isCovered:%d ,isValid:%d \n", i, a1->x,a1->y,a1->isCovered,a1->isValid, a2->x,a2->y,a2->isCovered,a2->isValid, a3->x,a3->y,a3->isCovered,a3->isValid, isOne,isCovered,isValid); } } }
fe1f919b983acc68e642467a4b55be86e19369d2.cu
/* * ExcuteConstraint.cu * * Created on: Nov 4, 2017 * Author: zy */ #include <iostream> #include "./../ConstraintParser/ConstraintParameter.cuh" #include "./../model/CoodinateDouble.cuh" #include "./../model/IntervalDouble.cuh" #include "./../model/PriorityDouble.cuh" #include "./../model/PredictValue.cuh" #include "./../solver/ATG.h" #include "./../solver/PCATG.h" #include "./../solver/ConstantValue.h" #include "ExcuteConstraint.cuh" #include "HardwareStrategy.cuh" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "device_functions.hpp" #include "math_functions.h" #include <stdio.h> using namespace std; /* * 这个文件是函数获取运行时刻的各种函数的实现 * */ /* * 注意这里计算有两部的计算优化: * 1)在计算运行时刻值的时候,顺便把子约束满足情况计算了 * 2)计算子约束的满足情况的时候没有使用if等判断分支结构, * 同时使用到已经计算好的运行时刻值去减少复杂的浮点数计算过程 * 这里采用的编码函数命名编码规则是这样的 * getRuntimeValue_i_j表示计算第i个析取范式的第j个约束的运行时刻值 * */ __device__ void getRuntimeValue_0_0(double x,double y,double z,CoodinateDouble* res) { res->y = 9 - x*y*z; res->isCovered = (bool)(res->y >= 0); res->isValid = (bool)(isfinite(res->y)); return ; } __device__ void getRuntimeValue_0_1(double x,double y,double z,CoodinateDouble* res) { res->y = z - x*y; res->isCovered = (bool)(res->y > 0); res->isValid = (bool)(isfinite(res->y)); return ; } __device__ void getRuntimeValue_0_2(double x,double y,double z,CoodinateDouble* res) { res->y = y - z; res->isCovered = (bool)(res->y > 0); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 下面是计算某一个析取范式的第一个约束在index=0,1,2搜索方向的计算过程 * */ __global__ void calaConstraint_0_0_0(CoodinateDouble* dev_predictArray,double* dev_parameter, const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_0(dev_predictArray[i+base].x,dev_parameter[1],dev_parameter[2],dev_predictArray+i+base); } } __global__ void calaConstraint_0_0_1(CoodinateDouble* dev_predictArray,double* dev_parameter, const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_0(dev_parameter[0],dev_predictArray[i+base].x,dev_parameter[2],dev_predictArray+i+base); } } __global__ void calaConstraint_0_0_2(CoodinateDouble* dev_predictArray,double* dev_parameter, const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_0(dev_parameter[0],dev_parameter[1],dev_predictArray[i+base].x,dev_predictArray+i+base); } } /* * 下面是计算某一个析取范式的第二个约束在index=0,1,2搜索方向的计算过程 * */ __global__ void calaConstraint_0_1_0(CoodinateDouble* dev_predictArray,double* dev_parameter, const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_1(dev_predictArray[i+base].x,dev_parameter[1],dev_parameter[2],dev_predictArray+i+base); } } __global__ void calaConstraint_0_1_1(CoodinateDouble* dev_predictArray,double* dev_parameter, const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_1(dev_parameter[0],dev_predictArray[i+base].x,dev_parameter[2],dev_predictArray+i+base); } } __global__ void calaConstraint_0_1_2(CoodinateDouble* dev_predictArray,double* dev_parameter, const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_1(dev_parameter[0],dev_parameter[1],dev_predictArray[i+base].x,dev_predictArray+i+base); } } /* * 下面是计算某三个析取范式的第二个约束在index=0,1,2搜索方向的计算过程 * */ __global__ void calaConstraint_0_2_0(CoodinateDouble* dev_predictArray,double* dev_parameter, const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_2(dev_predictArray[i+base].x,dev_parameter[1],dev_parameter[2],dev_predictArray+i+base); } } __global__ void calaConstraint_0_2_1(CoodinateDouble* dev_predictArray,double* dev_parameter, const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_2(dev_parameter[0],dev_predictArray[i+base].x,dev_parameter[2],dev_predictArray+i+base); } } __global__ void calaConstraint_0_2_2(CoodinateDouble* dev_predictArray,double* dev_parameter, const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_2(dev_parameter[0],dev_parameter[1],dev_predictArray[i+base].x,dev_predictArray+i+base); } } /* * 复合约束的并行计算模块 * @注意,这个函数还可以使用CUDA提供的流加速计算 * */ void calaRuntimeValue(int paraIndex,CoodinateDouble* dev_predictArray,double* dev_parameter, const int row,const int col) { Block res = HardwareStrategy::getHardwareStrategy(col); //根据不同的搜索方向做判断 if(paraIndex == 0) { calaConstraint_0_0_0<<<res.NumOfBlock , res.ThreadPreBlock>>>(dev_predictArray,dev_parameter,0*col,col); calaConstraint_0_1_0<<<res.NumOfBlock , res.ThreadPreBlock>>>(dev_predictArray,dev_parameter,1*col,col); calaConstraint_0_2_0<<<res.NumOfBlock , res.ThreadPreBlock>>>(dev_predictArray,dev_parameter,2*col,col); }else if(paraIndex == 1) { calaConstraint_0_0_1<<<res.NumOfBlock , res.ThreadPreBlock>>>(dev_predictArray,dev_parameter,0*col,col); calaConstraint_0_1_1<<<res.NumOfBlock , res.ThreadPreBlock>>>(dev_predictArray,dev_parameter,1*col,col); calaConstraint_0_2_1<<<res.NumOfBlock , res.ThreadPreBlock>>>(dev_predictArray,dev_parameter,2*col,col); } else if(paraIndex == 2) { calaConstraint_0_0_2<<<res.NumOfBlock , res.ThreadPreBlock>>>(dev_predictArray,dev_parameter,0*col,col); calaConstraint_0_1_2<<<res.NumOfBlock , res.ThreadPreBlock>>>(dev_predictArray,dev_parameter,1*col,col); calaConstraint_0_2_2<<<res.NumOfBlock , res.ThreadPreBlock>>>(dev_predictArray,dev_parameter,2*col,col); }else { cout<<"You Should Never Get Here. In Function Of: "<<endl; } } __global__ void calaFeasibleSolution(FullCoveredInfo* dev_coveredInfo,CoodinateDouble* dev_predictArray,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { dev_coveredInfo[i].index = i; dev_coveredInfo[i].isCovered = dev_predictArray[i + 0*Size].isCovered && dev_predictArray[i + 1*Size].isCovered && dev_predictArray[i + 2*Size].isCovered; dev_coveredInfo[i].isVaild = dev_predictArray[i + 0*Size].isValid && dev_predictArray[i + 1*Size].isValid && dev_predictArray[i + 2*Size].isValid; dev_coveredInfo[i].vaildNum = (int)(dev_coveredInfo[i].isVaild); } } /* * 判断是否有满足复合约束的可行解 * CPU验证模块 * */ bool checkisFullCovered(FullCoveredInfo* dev_coveredInfo,CoodinateDouble* initArray, bool &findSolution,const int row,const int col) { FullCoveredInfo* coveredInfo = new FullCoveredInfo[col]; cudaMemcpy(coveredInfo,dev_coveredInfo,col * sizeof(FullCoveredInfo),cudaMemcpyDeviceToHost); for(int i=0;i<col;i++) { double x=0,y=0,z=0; if(ATG::currentSearchParamIndex==0) { x = initArray[i].x; y = ATG::parameters[1]; z = ATG::parameters[2]; }else if(ATG::currentSearchParamIndex==1) { x = ATG::parameters[0]; y = initArray[i].x; z = ATG::parameters[2]; }else if(ATG::currentSearchParamIndex==2) { x = ATG::parameters[0]; y = ATG::parameters[1]; z = initArray[i].x; }else { cout<<"FFFUCK YOU ************* "<<endl; } bool tmpp = (9-x*y*z >=0) && (z-x*y>0) && (y-z>0); if(coveredInfo[i].isCovered == tmpp ) { if(tmpp) { findSolution = true; //cout<<"Index: "<<i<<" ( "<<x<<" , "<<y<<" , "<<z<<" )"<<endl; } }else { cout<<"************* Not Equal ******************"<<endl; return false; } } delete []coveredInfo; return true; } /* * 就是区间交运算的计算 * */ __global__ void calaFinalIntervel(IntervalDouble* dev_finalIntervel,IntervalDouble* dev_interval,const int calaArraySize) { int i = threadIdx.x + blockIdx.x*blockDim.x; bool condition = (i>=1) && (i<calaArraySize); if(condition) { IntervalDouble* a1 = dev_interval + i; IntervalDouble* a2 = dev_interval + i + calaArraySize * 1; IntervalDouble* a3 = dev_interval + i + calaArraySize * 2; double left=a1->left; left = max(left,a2->left); left = max(left,a3->left); double right=a1->right; right = min(right,a2->right); right = min(right,a3->right); bool hasIntervel = a1->hasIntervel & a2->hasIntervel & a3->hasIntervel; dev_finalIntervel[i].left = left; dev_finalIntervel[i].right = right; dev_finalIntervel[i].hasIntervel = hasIntervel; //printf("(%f , %f ) (%f , %f ) (%f , %f ) Final %d (%f , %f)\n",a1->left,a1->right,a2->left,a2->right,a3->left,a3->right,hasIntervel,left,right); } } /* * 根据预测的序列生成predct矩阵 * */ __global__ void generatePredictMat(CoodinateDouble* dev_predictArray,PredictValue* dev_finalAllPredictValue,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if( i < Size ) { dev_predictArray[i+0*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i+1*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i+2*Size].x = dev_finalAllPredictValue[i].value; } } /* * 并行计算所有的预测解向量的优先级 * */ __global__ void calaPriority(PriorityDouble* dev_priority,CoodinateDouble* dev_calaArray, const int row,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if( i < Size ) { double pri = 0.0; CoodinateDouble* a1 = dev_calaArray + i + 0 * Size; CoodinateDouble* a2 = dev_calaArray + i + 1 * Size; CoodinateDouble* a3 = dev_calaArray + i + 2 * Size; pri = pri + ((int)(a1->isCovered==true))*1 + ((int)(a1->isCovered==false))*((int)(a1->isValid==true))*1.0/(1.0+abs(a1->y)); pri = pri + ((int)(a2->isCovered==true))*1 + ((int)(a2->isCovered==false))*((int)(a2->isValid==true))*1.0/(1.0+abs(a2->y)); pri = pri + ((int)(a3->isCovered==true))*1 + ((int)(a3->isCovered==false))*((int)(a3->isValid==true))*1.0/(1.0+abs(a3->y)); dev_priority[i].priority = pri / (double)row; dev_priority[i].x = a1->x; //下面是测试代码 bool isOne = (a1->x == a2->x) && (a2->x == a3->x); bool isCovered = a1->isCovered && a2->isCovered && a3->isCovered; bool isValid= a1->isValid && a2->isValid && a3->isValid; if(isCovered == true) { printf("Cala Prioruty Wrong,index:%d: (%f,%f,%d,%d) ,(%f,%f,%d,%d) ,(%f,%f,%d,%d) , isOne:%d, isCovered:%d ,isValid:%d \n", i, a1->x,a1->y,a1->isCovered,a1->isValid, a2->x,a2->y,a2->isCovered,a2->isValid, a3->x,a3->y,a3->isCovered,a3->isValid, isOne,isCovered,isValid); } } }