hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
84ea5b3687d512db85d7263a5a55756fce686cb5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdiocu.h> #include <crtdefscu.h> #include <ext\memfile.h> #include <assert.h> static __global__ void g_ext_memfile_test1() { printf("ext_memfile_test1\n"); } hipError_t ext_memfile_test1() {hipLaunchKernelGGL(( g_ext_memfile_test1), dim3(1), dim3(1), 0, 0, ); return hipDeviceSynchronize(); }
84ea5b3687d512db85d7263a5a55756fce686cb5.cu
#include <stdiocu.h> #include <crtdefscu.h> #include <ext\memfile.h> #include <assert.h> static __global__ void g_ext_memfile_test1() { printf("ext_memfile_test1\n"); } cudaError_t ext_memfile_test1() { g_ext_memfile_test1<<<1, 1>>>(); return cudaDeviceSynchronize(); }
60df6f02030fedd82bc95bc4982bbc5e297f4ed7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <assert.h> #include <iostream> #include <random> #include "../nn_utils/nn_exception.hh" #include "../nn_utils/shape.hh" #include "linear_softmax.hh" #define BLOCK_DIM 16 // This kernel is optimized to ensure all global reads and writes are coalesced, // and to avoid bank conflicts in shared memory. This kernel is up to 11x faster // than the naive kernel below. Note that the shared memory array is sized to // (BLOCK_DIM+1)*BLOCK_DIM. This pads each row of the 2D block in shared memory // so that bank conflicts do not occur when threads address the array column-wise. __global__ void transpose_softmax(float *odata, float *idata, int width, int height) { __shared__ float block[BLOCK_DIM][BLOCK_DIM+1]; // read the matrix tile into shared memory // load one element per thread from device memory (idata) and store it // in transpose_relud order in block[][] unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x; unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y; if((xIndex < width) && (yIndex < height)) { unsigned int index_in = yIndex * width + xIndex; block[threadIdx.y][threadIdx.x] = idata[index_in]; } // synchronise to ensure all writes to block[][] have completed __syncthreads(); // write the transpose_relud matrix tile to global memory (odata) in linear order xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x; yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y; if((xIndex < height) && (yIndex < width)) { unsigned int index_out = yIndex * height + xIndex; odata[index_out] = block[threadIdx.x][threadIdx.y]; } } #define TILE_WIDTH 16 // Compute C = A * B __global__ void matrixMultiplyAndSoftmax(float * A, float * B, float * C, float* b, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here __shared__ float ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_N[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y, Row = by * TILE_WIDTH + ty, Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for (int m = 0; m < (numAColumns-1)/TILE_WIDTH+1; ++m) { if (Row < numARows && m*TILE_WIDTH+tx < numAColumns) ds_M[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH+tx]; else ds_M[ty][tx] = 0; if (Col < numBColumns && m*TILE_WIDTH+ty < numBRows) ds_N[ty][tx] = B[(m*TILE_WIDTH+ty)*numBColumns+Col]; else ds_N[ty][tx] = 0; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) Pvalue += ds_M[ty][k] * ds_N[k][tx]; __syncthreads(); } if (Row < numCRows && Col < numCColumns) C[Row*numCColumns+Col] = (Pvalue + b[Row]); } __global__ void matrixMultiplyBackPropSoftmax(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here __shared__ float ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_N[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y, Row = by * TILE_WIDTH + ty, Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for (int m = 0; m < (numAColumns-1)/TILE_WIDTH+1; ++m) { if (Row < numARows && m*TILE_WIDTH+tx < numAColumns) ds_M[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH+tx]; else ds_M[ty][tx] = 0; if (Col < numBColumns && m*TILE_WIDTH+ty < numBRows) ds_N[ty][tx] = B[(m*TILE_WIDTH+ty)*numBColumns+Col]; else ds_N[ty][tx] = 0; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) Pvalue += ds_M[ty][k] * ds_N[k][tx]; __syncthreads(); } if (Row < numCRows && Col < numCColumns) C[Row*numCColumns+Col] = Pvalue ; } // Compute C = A * B __global__ void matrixMultiplyUpdateWeights_softmax(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns, float learning_rate) { //@@ Insert code to implement matrix multiplication here __shared__ float ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_N[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y, Row = by * TILE_WIDTH + ty, Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for (int m = 0; m < (numAColumns-1)/TILE_WIDTH+1; ++m) { if (Row < numARows && m*TILE_WIDTH+tx < numAColumns) ds_M[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH+tx]; else ds_M[ty][tx] = 0; if (Col < numBColumns && m*TILE_WIDTH+ty < numBRows) ds_N[ty][tx] = B[(m*TILE_WIDTH+ty)*numBColumns+Col]; else ds_N[ty][tx] = 0; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) Pvalue += ds_M[ty][k] * ds_N[k][tx]; __syncthreads(); } if (Row < numCRows && Col < numCColumns) C[Row*numCColumns+Col] = C[Row*numCColumns+Col] - learning_rate * (Pvalue / numAColumns); } __global__ void initializeBiasKernel_softmax(float* b, int size){ int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < size){ b[index] = 0.0; } } __global__ void updateBiasKernel_softmax(float* dZ, float* b, int cols, int row, float learning_rate){ int bid = blockIdx.x; extern __shared__ float _share[]; //float * _max = _share; float * _sum = _share; float* sp = dZ + cols * bid; _sum[threadIdx.x] = 0.0; for(int id = threadIdx.x ; id < cols; id += blockDim.x){ // int id = tid + threadIdx.x; //if(id < cols){ _sum[threadIdx.x] += sp[id]; //} } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { _sum[threadIdx.x] += _sum[threadIdx.x + skip]; } len = (len + 1) >> 1; } __syncthreads(); b[bid] -= learning_rate * (_sum[0]/cols); } LinearSoftmaxLayer::LinearSoftmaxLayer(std::string name, Shape W_shape) : W(W_shape), b(W_shape.y, 1) { this->name = name; b.allocateMemory(); W.allocateMemory(); initializeBiasWithZeros_softmax(); initializeWeightsRandomly_softmax(); } LinearSoftmaxLayer::~LinearSoftmaxLayer() { } void LinearSoftmaxLayer::initializeWeightsRandomly_softmax() { std::default_random_engine generator; std::normal_distribution<float> normal_distribution(0.0, 1.0); float weights_init_threshold = 0.01; for (int x = 0; x < W.shape.x; x++) { for (int y = 0; y < W.shape.y; y++) { W[y * W.shape.x + x] = normal_distribution(generator) * weights_init_threshold; } } W.copyHostToDevice(); /* dim3 blockDim(256); dim3 gridDim((W.shape.x * W.shape.y + blockDim.x - 1)/blockDim.x); initializeWeightsKernel<<<gridDim, blockDim>>>(W.data_device.get(), W.shape.x * W.shape.y); */ } void LinearSoftmaxLayer::initializeBiasWithZeros_softmax() { /* for (int x = 0; x < b.shape.x; x++) { b[x] = 0; } b.copyHostToDevice(); */ dim3 blockDim(256); dim3 gridDim((b.shape.x * b.shape.y + blockDim.x - 1)/blockDim.x); hipLaunchKernelGGL(( initializeBiasKernel_softmax), dim3(gridDim), dim3(blockDim), 0, 0, b.data_device.get(), b.shape.x * b.shape.y); } Matrix& LinearSoftmaxLayer::forward(Matrix& A) { assert(W.shape.x == A.shape.y); this->A = A; Shape Z_shape(A.shape.x, W.shape.y); Z.allocateMemoryIfNotAllocated(Z_shape); T.allocateMemoryIfNotAllocated(Z_shape); computeAndStoreLayerOutput_softmax(A); NNException::throwIfDeviceErrorsOccurred("Cannot perform linear layer forward propagation."); return Z; } __global__ void softmax_linear(float* softmaxP, float* b, int rows, int cols){ int tid = threadIdx.x; int bid = blockIdx.x; float _max = -100000000.0; float sum = 0.0; extern __shared__ float _share[]; if(tid * cols + bid < rows * cols){ for(int i = 0 ; i < rows ; i++) _share[i] = b[i * cols + bid]; for(int i = 0 ; i < rows ; i++) _max = max(_max, _share[i]); for(int i = 0 ; i < rows ; i++) _share[i] = __expf(_share[i]-_max); for(int i = 0 ; i < rows ; i++) sum += _share[i]; for(int i = 0 ; i < rows ; i++) softmaxP[i * cols + bid] = _share[i]/sum; } } void LinearSoftmaxLayer::computeAndStoreLayerOutput_softmax(Matrix& A) { dim3 block_size(TILE_WIDTH, TILE_WIDTH); dim3 num_of_blocks( (Z.shape.x + block_size.x - 1) / block_size.x, (Z.shape.y + block_size.y - 1) / block_size.y); /* linearLayerForward<<<num_of_blocks, block_size>>>( W.data_device.get(), A.data_device.get(), Z.data_device.get(), b.data_device.get(), W.shape.x, W.shape.y, A.shape.x, A.shape.y); */ hipLaunchKernelGGL(( matrixMultiplyAndSoftmax), dim3(num_of_blocks), dim3(block_size), 0, 0, W.data_device.get(), A.data_device.get(), T.data_device.get(), b.data_device.get(), W.shape.y, W.shape.x, A.shape.y, A.shape.x, T.shape.y, T.shape.x); dim3 block = T.shape.x; dim3 threads = 1; hipLaunchKernelGGL(( softmax_linear), dim3(block), dim3(threads), Z.shape.y * sizeof(float), 0, Z.data_device.get(), T.data_device.get(), Z.shape.y, Z.shape.x); } Matrix& LinearSoftmaxLayer::backprop(Matrix& dZ, float learning_rate) { dA.allocateMemoryIfNotAllocated(A.shape); WT.allocateMemoryIfNotAllocated(Shape(W.shape.y, W.shape.x)); AT.allocateMemoryIfNotAllocated(Shape(A.shape.y, A.shape.x)); //std::cout << "Here" << std::endl; computeAndStoreBackpropError_softmax(dZ); NNException::throwIfDeviceErrorsOccurred("Cannot perform back propagation."); updateBias_softmax(dZ, learning_rate); NNException::throwIfDeviceErrorsOccurred("Cannot perform bias update."); updateWeights_softmax(dZ, learning_rate); NNException::throwIfDeviceErrorsOccurred("Cannot perform weights update."); return dA; } void LinearSoftmaxLayer::computeAndStoreBackpropError_softmax(Matrix& dZ) { dim3 block_size(TILE_WIDTH, TILE_WIDTH); dim3 num_of_blocks( (A.shape.x + block_size.x - 1) / block_size.x, (A.shape.y + block_size.y - 1) / block_size.y); /* linearLayerBackprop<<<num_of_blocks, block_size>>>( W.data_device.get(), dZ.data_device.get(), dA.data_device.get(), W.shape.x, W.shape.y, dZ.shape.x, dZ.shape.y); */ dim3 transpose_relu_block(BLOCK_DIM, BLOCK_DIM); dim3 num_t_blocks((W.shape.x + transpose_relu_block.x - 1) / transpose_relu_block.x, (W.shape.y + transpose_relu_block.y - 1) / transpose_relu_block.y); hipLaunchKernelGGL(( transpose_softmax), dim3(num_t_blocks), dim3(transpose_relu_block), 0, 0, WT.data_device.get(), W.data_device.get(), W.shape.x, W.shape.y); hipLaunchKernelGGL(( matrixMultiplyBackPropSoftmax), dim3(num_of_blocks), dim3(block_size), 0, 0, WT.data_device.get(), dZ.data_device.get(), dA.data_device.get(), WT.shape.y, WT.shape.x, dZ.shape.y, dZ.shape.x, dA.shape.y, dA.shape.x); } void LinearSoftmaxLayer::updateWeights_softmax(Matrix& dZ, float learning_rate) { dim3 block_size(TILE_WIDTH, TILE_WIDTH); dim3 num_of_blocks( (W.shape.x + block_size.x - 1) / block_size.x, (W.shape.y + block_size.y - 1) / block_size.y); /* linearLayerUpdateWeights<<<num_of_blocks, block_size>>>(dZ.data_device.get(), A.data_device.get(), W.data_device.get(), dZ.shape.x, dZ.shape.y, A.shape.x, A.shape.y, learning_rate); */ dim3 transpose_relu_block(BLOCK_DIM, BLOCK_DIM); dim3 num_t_blocks((A.shape.x + transpose_relu_block.x - 1) / transpose_relu_block.x, (A.shape.y + transpose_relu_block.y - 1) / transpose_relu_block.y); hipLaunchKernelGGL(( transpose_softmax), dim3(num_t_blocks), dim3(transpose_relu_block), 0, 0, AT.data_device.get(), A.data_device.get(), A.shape.x, A.shape.y); hipLaunchKernelGGL(( matrixMultiplyUpdateWeights_softmax), dim3(num_of_blocks), dim3(block_size), 0, 0, dZ.data_device.get(), AT.data_device.get(), W.data_device.get(), dZ.shape.y, dZ.shape.x, AT.shape.y, AT.shape.x, W.shape.y, W.shape.x, learning_rate); } void LinearSoftmaxLayer::updateBias_softmax(Matrix& dZ, float learning_rate) { /* dim3 block_size(256); dim3 num_of_blocks( (dZ.shape.y * dZ.shape.x + block_size.x - 1) / block_size.x); linearLayerUpdateBias<<<num_of_blocks, block_size>>>(dZ.data_device.get(), b.data_device.get(), dZ.shape.x, dZ.shape.y, b.shape.x, learning_rate); */ dim3 block_size(::min(256, int(dZ.shape.x))); dim3 num_of_blocks(dZ.shape.y); hipLaunchKernelGGL(( updateBiasKernel_softmax), dim3(num_of_blocks), dim3(block_size), sizeof(float) * block_size.x, 0, dZ.data_device.get(), b.data_device.get(), dZ.shape.x, dZ.shape.y, learning_rate); } int LinearSoftmaxLayer::getXDim() const { return W.shape.x; } int LinearSoftmaxLayer::getYDim() const { return W.shape.y; } Matrix LinearSoftmaxLayer::getWeightsMatrix() const { return W; } Matrix LinearSoftmaxLayer::getBiasVector() const { return b; }
60df6f02030fedd82bc95bc4982bbc5e297f4ed7.cu
#include <stdlib.h> #include <assert.h> #include <iostream> #include <random> #include "../nn_utils/nn_exception.hh" #include "../nn_utils/shape.hh" #include "linear_softmax.hh" #define BLOCK_DIM 16 // This kernel is optimized to ensure all global reads and writes are coalesced, // and to avoid bank conflicts in shared memory. This kernel is up to 11x faster // than the naive kernel below. Note that the shared memory array is sized to // (BLOCK_DIM+1)*BLOCK_DIM. This pads each row of the 2D block in shared memory // so that bank conflicts do not occur when threads address the array column-wise. __global__ void transpose_softmax(float *odata, float *idata, int width, int height) { __shared__ float block[BLOCK_DIM][BLOCK_DIM+1]; // read the matrix tile into shared memory // load one element per thread from device memory (idata) and store it // in transpose_relud order in block[][] unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x; unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y; if((xIndex < width) && (yIndex < height)) { unsigned int index_in = yIndex * width + xIndex; block[threadIdx.y][threadIdx.x] = idata[index_in]; } // synchronise to ensure all writes to block[][] have completed __syncthreads(); // write the transpose_relud matrix tile to global memory (odata) in linear order xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x; yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y; if((xIndex < height) && (yIndex < width)) { unsigned int index_out = yIndex * height + xIndex; odata[index_out] = block[threadIdx.x][threadIdx.y]; } } #define TILE_WIDTH 16 // Compute C = A * B __global__ void matrixMultiplyAndSoftmax(float * A, float * B, float * C, float* b, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here __shared__ float ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_N[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y, Row = by * TILE_WIDTH + ty, Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for (int m = 0; m < (numAColumns-1)/TILE_WIDTH+1; ++m) { if (Row < numARows && m*TILE_WIDTH+tx < numAColumns) ds_M[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH+tx]; else ds_M[ty][tx] = 0; if (Col < numBColumns && m*TILE_WIDTH+ty < numBRows) ds_N[ty][tx] = B[(m*TILE_WIDTH+ty)*numBColumns+Col]; else ds_N[ty][tx] = 0; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) Pvalue += ds_M[ty][k] * ds_N[k][tx]; __syncthreads(); } if (Row < numCRows && Col < numCColumns) C[Row*numCColumns+Col] = (Pvalue + b[Row]); } __global__ void matrixMultiplyBackPropSoftmax(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here __shared__ float ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_N[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y, Row = by * TILE_WIDTH + ty, Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for (int m = 0; m < (numAColumns-1)/TILE_WIDTH+1; ++m) { if (Row < numARows && m*TILE_WIDTH+tx < numAColumns) ds_M[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH+tx]; else ds_M[ty][tx] = 0; if (Col < numBColumns && m*TILE_WIDTH+ty < numBRows) ds_N[ty][tx] = B[(m*TILE_WIDTH+ty)*numBColumns+Col]; else ds_N[ty][tx] = 0; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) Pvalue += ds_M[ty][k] * ds_N[k][tx]; __syncthreads(); } if (Row < numCRows && Col < numCColumns) C[Row*numCColumns+Col] = Pvalue ; } // Compute C = A * B __global__ void matrixMultiplyUpdateWeights_softmax(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns, float learning_rate) { //@@ Insert code to implement matrix multiplication here __shared__ float ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_N[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y, Row = by * TILE_WIDTH + ty, Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for (int m = 0; m < (numAColumns-1)/TILE_WIDTH+1; ++m) { if (Row < numARows && m*TILE_WIDTH+tx < numAColumns) ds_M[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH+tx]; else ds_M[ty][tx] = 0; if (Col < numBColumns && m*TILE_WIDTH+ty < numBRows) ds_N[ty][tx] = B[(m*TILE_WIDTH+ty)*numBColumns+Col]; else ds_N[ty][tx] = 0; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) Pvalue += ds_M[ty][k] * ds_N[k][tx]; __syncthreads(); } if (Row < numCRows && Col < numCColumns) C[Row*numCColumns+Col] = C[Row*numCColumns+Col] - learning_rate * (Pvalue / numAColumns); } __global__ void initializeBiasKernel_softmax(float* b, int size){ int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < size){ b[index] = 0.0; } } __global__ void updateBiasKernel_softmax(float* dZ, float* b, int cols, int row, float learning_rate){ int bid = blockIdx.x; extern __shared__ float _share[]; //float * _max = _share; float * _sum = _share; float* sp = dZ + cols * bid; _sum[threadIdx.x] = 0.0; for(int id = threadIdx.x ; id < cols; id += blockDim.x){ // int id = tid + threadIdx.x; //if(id < cols){ _sum[threadIdx.x] += sp[id]; //} } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { _sum[threadIdx.x] += _sum[threadIdx.x + skip]; } len = (len + 1) >> 1; } __syncthreads(); b[bid] -= learning_rate * (_sum[0]/cols); } LinearSoftmaxLayer::LinearSoftmaxLayer(std::string name, Shape W_shape) : W(W_shape), b(W_shape.y, 1) { this->name = name; b.allocateMemory(); W.allocateMemory(); initializeBiasWithZeros_softmax(); initializeWeightsRandomly_softmax(); } LinearSoftmaxLayer::~LinearSoftmaxLayer() { } void LinearSoftmaxLayer::initializeWeightsRandomly_softmax() { std::default_random_engine generator; std::normal_distribution<float> normal_distribution(0.0, 1.0); float weights_init_threshold = 0.01; for (int x = 0; x < W.shape.x; x++) { for (int y = 0; y < W.shape.y; y++) { W[y * W.shape.x + x] = normal_distribution(generator) * weights_init_threshold; } } W.copyHostToDevice(); /* dim3 blockDim(256); dim3 gridDim((W.shape.x * W.shape.y + blockDim.x - 1)/blockDim.x); initializeWeightsKernel<<<gridDim, blockDim>>>(W.data_device.get(), W.shape.x * W.shape.y); */ } void LinearSoftmaxLayer::initializeBiasWithZeros_softmax() { /* for (int x = 0; x < b.shape.x; x++) { b[x] = 0; } b.copyHostToDevice(); */ dim3 blockDim(256); dim3 gridDim((b.shape.x * b.shape.y + blockDim.x - 1)/blockDim.x); initializeBiasKernel_softmax<<<gridDim, blockDim>>>(b.data_device.get(), b.shape.x * b.shape.y); } Matrix& LinearSoftmaxLayer::forward(Matrix& A) { assert(W.shape.x == A.shape.y); this->A = A; Shape Z_shape(A.shape.x, W.shape.y); Z.allocateMemoryIfNotAllocated(Z_shape); T.allocateMemoryIfNotAllocated(Z_shape); computeAndStoreLayerOutput_softmax(A); NNException::throwIfDeviceErrorsOccurred("Cannot perform linear layer forward propagation."); return Z; } __global__ void softmax_linear(float* softmaxP, float* b, int rows, int cols){ int tid = threadIdx.x; int bid = blockIdx.x; float _max = -100000000.0; float sum = 0.0; extern __shared__ float _share[]; if(tid * cols + bid < rows * cols){ for(int i = 0 ; i < rows ; i++) _share[i] = b[i * cols + bid]; for(int i = 0 ; i < rows ; i++) _max = max(_max, _share[i]); for(int i = 0 ; i < rows ; i++) _share[i] = __expf(_share[i]-_max); for(int i = 0 ; i < rows ; i++) sum += _share[i]; for(int i = 0 ; i < rows ; i++) softmaxP[i * cols + bid] = _share[i]/sum; } } void LinearSoftmaxLayer::computeAndStoreLayerOutput_softmax(Matrix& A) { dim3 block_size(TILE_WIDTH, TILE_WIDTH); dim3 num_of_blocks( (Z.shape.x + block_size.x - 1) / block_size.x, (Z.shape.y + block_size.y - 1) / block_size.y); /* linearLayerForward<<<num_of_blocks, block_size>>>( W.data_device.get(), A.data_device.get(), Z.data_device.get(), b.data_device.get(), W.shape.x, W.shape.y, A.shape.x, A.shape.y); */ matrixMultiplyAndSoftmax<<<num_of_blocks, block_size>>>(W.data_device.get(), A.data_device.get(), T.data_device.get(), b.data_device.get(), W.shape.y, W.shape.x, A.shape.y, A.shape.x, T.shape.y, T.shape.x); dim3 block = T.shape.x; dim3 threads = 1; softmax_linear<<<block, threads, Z.shape.y * sizeof(float)>>>(Z.data_device.get(), T.data_device.get(), Z.shape.y, Z.shape.x); } Matrix& LinearSoftmaxLayer::backprop(Matrix& dZ, float learning_rate) { dA.allocateMemoryIfNotAllocated(A.shape); WT.allocateMemoryIfNotAllocated(Shape(W.shape.y, W.shape.x)); AT.allocateMemoryIfNotAllocated(Shape(A.shape.y, A.shape.x)); //std::cout << "Here" << std::endl; computeAndStoreBackpropError_softmax(dZ); NNException::throwIfDeviceErrorsOccurred("Cannot perform back propagation."); updateBias_softmax(dZ, learning_rate); NNException::throwIfDeviceErrorsOccurred("Cannot perform bias update."); updateWeights_softmax(dZ, learning_rate); NNException::throwIfDeviceErrorsOccurred("Cannot perform weights update."); return dA; } void LinearSoftmaxLayer::computeAndStoreBackpropError_softmax(Matrix& dZ) { dim3 block_size(TILE_WIDTH, TILE_WIDTH); dim3 num_of_blocks( (A.shape.x + block_size.x - 1) / block_size.x, (A.shape.y + block_size.y - 1) / block_size.y); /* linearLayerBackprop<<<num_of_blocks, block_size>>>( W.data_device.get(), dZ.data_device.get(), dA.data_device.get(), W.shape.x, W.shape.y, dZ.shape.x, dZ.shape.y); */ dim3 transpose_relu_block(BLOCK_DIM, BLOCK_DIM); dim3 num_t_blocks((W.shape.x + transpose_relu_block.x - 1) / transpose_relu_block.x, (W.shape.y + transpose_relu_block.y - 1) / transpose_relu_block.y); transpose_softmax<<<num_t_blocks, transpose_relu_block>>>(WT.data_device.get(), W.data_device.get(), W.shape.x, W.shape.y); matrixMultiplyBackPropSoftmax<<<num_of_blocks, block_size>>>(WT.data_device.get(), dZ.data_device.get(), dA.data_device.get(), WT.shape.y, WT.shape.x, dZ.shape.y, dZ.shape.x, dA.shape.y, dA.shape.x); } void LinearSoftmaxLayer::updateWeights_softmax(Matrix& dZ, float learning_rate) { dim3 block_size(TILE_WIDTH, TILE_WIDTH); dim3 num_of_blocks( (W.shape.x + block_size.x - 1) / block_size.x, (W.shape.y + block_size.y - 1) / block_size.y); /* linearLayerUpdateWeights<<<num_of_blocks, block_size>>>(dZ.data_device.get(), A.data_device.get(), W.data_device.get(), dZ.shape.x, dZ.shape.y, A.shape.x, A.shape.y, learning_rate); */ dim3 transpose_relu_block(BLOCK_DIM, BLOCK_DIM); dim3 num_t_blocks((A.shape.x + transpose_relu_block.x - 1) / transpose_relu_block.x, (A.shape.y + transpose_relu_block.y - 1) / transpose_relu_block.y); transpose_softmax<<<num_t_blocks, transpose_relu_block>>>(AT.data_device.get(), A.data_device.get(), A.shape.x, A.shape.y); matrixMultiplyUpdateWeights_softmax<<<num_of_blocks, block_size>>>(dZ.data_device.get(), AT.data_device.get(), W.data_device.get(), dZ.shape.y, dZ.shape.x, AT.shape.y, AT.shape.x, W.shape.y, W.shape.x, learning_rate); } void LinearSoftmaxLayer::updateBias_softmax(Matrix& dZ, float learning_rate) { /* dim3 block_size(256); dim3 num_of_blocks( (dZ.shape.y * dZ.shape.x + block_size.x - 1) / block_size.x); linearLayerUpdateBias<<<num_of_blocks, block_size>>>(dZ.data_device.get(), b.data_device.get(), dZ.shape.x, dZ.shape.y, b.shape.x, learning_rate); */ dim3 block_size(std::min(256, int(dZ.shape.x))); dim3 num_of_blocks(dZ.shape.y); updateBiasKernel_softmax<<<num_of_blocks, block_size, sizeof(float) * block_size.x>>>(dZ.data_device.get(), b.data_device.get(), dZ.shape.x, dZ.shape.y, learning_rate); } int LinearSoftmaxLayer::getXDim() const { return W.shape.x; } int LinearSoftmaxLayer::getYDim() const { return W.shape.y; } Matrix LinearSoftmaxLayer::getWeightsMatrix() const { return W; } Matrix LinearSoftmaxLayer::getBiasVector() const { return b; }
4c284d104ca5d1f839ddb4826046ef4316f3bf45.hip
// !!! This is a file automatically generated by hipify!!! // includes, system #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> // includes, project #include <hip/hip_runtime.h> #include <hipfft.h> #include <hipfftXt.h> #include <helper_cuda.h> #include <helper_functions.h> // Complex data type typedef float2 Complex; static __device__ __host__ inline Complex ComplexAdd(Complex, Complex); static __device__ __host__ inline Complex ComplexScale(Complex, float); static __device__ __host__ inline Complex ComplexMul(Complex, Complex); static __global__ void ComplexPointwiseMulAndScale(Complex *, const Complex *, int, float); // Filtering functions void Convolve(const Complex *, int, const Complex *, int, Complex *); // Padding functions int PadData(const Complex *, Complex **, int, const Complex *, Complex **, int); //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char **argv); // The filter size is assumed to be a number smaller than the signal size #define SIGNAL_SIZE 50 #define FILTER_KERNEL_SIZE 11 //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { runTest(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char **argv) { printf("[simpleCUFFT] is starting...\n"); findCudaDevice(argc, (const char **)argv); // Allocate host memory for the signal Complex *h_signal = reinterpret_cast<Complex *>(malloc(sizeof(Complex) * SIGNAL_SIZE)); // Initialize the memory for the signal for (unsigned int i = 0; i < SIGNAL_SIZE; ++i) { h_signal[i].x = rand() / static_cast<float>(RAND_MAX); h_signal[i].y = 0; } // Allocate host memory for the filter Complex *h_filter_kernel = reinterpret_cast<Complex *>(malloc(sizeof(Complex) * FILTER_KERNEL_SIZE)); // Initialize the memory for the filter for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i) { h_filter_kernel[i].x = rand() / static_cast<float>(RAND_MAX); h_filter_kernel[i].y = 0; } // Pad signal and filter kernel Complex *h_padded_signal; Complex *h_padded_filter_kernel; int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE, h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE); int mem_size = sizeof(Complex) * new_size; // Allocate device memory for signal Complex *d_signal; checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_signal), mem_size)); // Copy host memory to device checkCudaErrors( hipMemcpy(d_signal, h_padded_signal, mem_size, hipMemcpyHostToDevice)); // Allocate device memory for filter kernel Complex *d_filter_kernel; checkCudaErrors( hipMalloc(reinterpret_cast<void **>(&d_filter_kernel), mem_size)); // Copy host memory to device checkCudaErrors(hipMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size, hipMemcpyHostToDevice)); // CUFFT plan simple API hipfftHandle plan; checkCudaErrors(hipfftPlan1d(&plan, new_size, HIPFFT_C2C, 1)); // CUFFT plan advanced API hipfftHandle plan_adv; size_t workSize; long long int new_size_long = new_size; checkCudaErrors(hipfftCreate(&plan_adv)); checkCudaErrors(cufftXtMakePlanMany(plan_adv, 1, &new_size_long, NULL, 1, 1, HIP_C_32F, NULL, 1, 1, HIP_C_32F, 1, &workSize, HIP_C_32F)); printf("Temporary buffer size %li bytes\n", workSize); // Transform signal and kernel printf("Transforming signal hipfftExecC2C\n"); checkCudaErrors(hipfftExecC2C(plan, reinterpret_cast<hipfftComplex *>(d_signal), reinterpret_cast<hipfftComplex *>(d_signal), HIPFFT_FORWARD)); checkCudaErrors(hipfftExecC2C( plan_adv, reinterpret_cast<hipfftComplex *>(d_filter_kernel), reinterpret_cast<hipfftComplex *>(d_filter_kernel), HIPFFT_FORWARD)); // Multiply the coefficients together and normalize the result printf("Launching ComplexPointwiseMulAndScale<<< >>>\n"); hipLaunchKernelGGL(( ComplexPointwiseMulAndScale), dim3(32), dim3(256), 0, 0, d_signal, d_filter_kernel, new_size, 1.0f / new_size); // Check if kernel execution generated and error getLastCudaError("Kernel execution failed [ ComplexPointwiseMulAndScale ]"); // Transform signal back printf("Transforming signal back hipfftExecC2C\n"); checkCudaErrors(hipfftExecC2C(plan, reinterpret_cast<hipfftComplex *>(d_signal), reinterpret_cast<hipfftComplex *>(d_signal), HIPFFT_BACKWARD)); // Copy device memory to host Complex *h_convolved_signal = h_padded_signal; checkCudaErrors(hipMemcpy(h_convolved_signal, d_signal, mem_size, hipMemcpyDeviceToHost)); // Allocate host memory for the convolution result Complex *h_convolved_signal_ref = reinterpret_cast<Complex *>(malloc(sizeof(Complex) * SIGNAL_SIZE)); // Convolve on the host Convolve(h_signal, SIGNAL_SIZE, h_filter_kernel, FILTER_KERNEL_SIZE, h_convolved_signal_ref); // check result bool bTestResult = sdkCompareL2fe( reinterpret_cast<float *>(h_convolved_signal_ref), reinterpret_cast<float *>(h_convolved_signal), 2 * SIGNAL_SIZE, 1e-5f); // Destroy CUFFT context checkCudaErrors(hipfftDestroy(plan)); checkCudaErrors(hipfftDestroy(plan_adv)); // cleanup memory free(h_signal); free(h_filter_kernel); free(h_padded_signal); free(h_padded_filter_kernel); free(h_convolved_signal_ref); checkCudaErrors(hipFree(d_signal)); checkCudaErrors(hipFree(d_filter_kernel)); exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE); } // Pad data int PadData(const Complex *signal, Complex **padded_signal, int signal_size, const Complex *filter_kernel, Complex **padded_filter_kernel, int filter_kernel_size) { int minRadius = filter_kernel_size / 2; int maxRadius = filter_kernel_size - minRadius; int new_size = signal_size + maxRadius; // Pad signal Complex *new_data = reinterpret_cast<Complex *>(malloc(sizeof(Complex) * new_size)); memcpy(new_data + 0, signal, signal_size * sizeof(Complex)); memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex)); *padded_signal = new_data; // Pad filter new_data = reinterpret_cast<Complex *>(malloc(sizeof(Complex) * new_size)); memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex)); memset(new_data + maxRadius, 0, (new_size - filter_kernel_size) * sizeof(Complex)); memcpy(new_data + new_size - minRadius, filter_kernel, minRadius * sizeof(Complex)); *padded_filter_kernel = new_data; return new_size; } //////////////////////////////////////////////////////////////////////////////// // Filtering operations //////////////////////////////////////////////////////////////////////////////// // Computes convolution on the host void Convolve(const Complex *signal, int signal_size, const Complex *filter_kernel, int filter_kernel_size, Complex *filtered_signal) { int minRadius = filter_kernel_size / 2; int maxRadius = filter_kernel_size - minRadius; // Loop over output element indices for (int i = 0; i < signal_size; ++i) { filtered_signal[i].x = filtered_signal[i].y = 0; // Loop over convolution indices for (int j = -maxRadius + 1; j <= minRadius; ++j) { int k = i + j; if (k >= 0 && k < signal_size) { filtered_signal[i] = ComplexAdd(filtered_signal[i], ComplexMul(signal[k], filter_kernel[minRadius - j])); } } } } //////////////////////////////////////////////////////////////////////////////// // Complex operations //////////////////////////////////////////////////////////////////////////////// // Complex addition static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b) { Complex c; c.x = a.x + b.x; c.y = a.y + b.y; return c; } // Complex scale static __device__ __host__ inline Complex ComplexScale(Complex a, float s) { Complex c; c.x = s * a.x; c.y = s * a.y; return c; } // Complex multiplication static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b) { Complex c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; } // Complex pointwise multiplication static __global__ void ComplexPointwiseMulAndScale(Complex *a, const Complex *b, int size, float scale) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (int i = threadID; i < size; i += numThreads) { a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale); } }
4c284d104ca5d1f839ddb4826046ef4316f3bf45.cu
// includes, system #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> // includes, project #include <cuda_runtime.h> #include <cufft.h> #include <cufftXt.h> #include <helper_cuda.h> #include <helper_functions.h> // Complex data type typedef float2 Complex; static __device__ __host__ inline Complex ComplexAdd(Complex, Complex); static __device__ __host__ inline Complex ComplexScale(Complex, float); static __device__ __host__ inline Complex ComplexMul(Complex, Complex); static __global__ void ComplexPointwiseMulAndScale(Complex *, const Complex *, int, float); // Filtering functions void Convolve(const Complex *, int, const Complex *, int, Complex *); // Padding functions int PadData(const Complex *, Complex **, int, const Complex *, Complex **, int); //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char **argv); // The filter size is assumed to be a number smaller than the signal size #define SIGNAL_SIZE 50 #define FILTER_KERNEL_SIZE 11 //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { runTest(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char **argv) { printf("[simpleCUFFT] is starting...\n"); findCudaDevice(argc, (const char **)argv); // Allocate host memory for the signal Complex *h_signal = reinterpret_cast<Complex *>(malloc(sizeof(Complex) * SIGNAL_SIZE)); // Initialize the memory for the signal for (unsigned int i = 0; i < SIGNAL_SIZE; ++i) { h_signal[i].x = rand() / static_cast<float>(RAND_MAX); h_signal[i].y = 0; } // Allocate host memory for the filter Complex *h_filter_kernel = reinterpret_cast<Complex *>(malloc(sizeof(Complex) * FILTER_KERNEL_SIZE)); // Initialize the memory for the filter for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i) { h_filter_kernel[i].x = rand() / static_cast<float>(RAND_MAX); h_filter_kernel[i].y = 0; } // Pad signal and filter kernel Complex *h_padded_signal; Complex *h_padded_filter_kernel; int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE, h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE); int mem_size = sizeof(Complex) * new_size; // Allocate device memory for signal Complex *d_signal; checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_signal), mem_size)); // Copy host memory to device checkCudaErrors( cudaMemcpy(d_signal, h_padded_signal, mem_size, cudaMemcpyHostToDevice)); // Allocate device memory for filter kernel Complex *d_filter_kernel; checkCudaErrors( cudaMalloc(reinterpret_cast<void **>(&d_filter_kernel), mem_size)); // Copy host memory to device checkCudaErrors(cudaMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size, cudaMemcpyHostToDevice)); // CUFFT plan simple API cufftHandle plan; checkCudaErrors(cufftPlan1d(&plan, new_size, CUFFT_C2C, 1)); // CUFFT plan advanced API cufftHandle plan_adv; size_t workSize; long long int new_size_long = new_size; checkCudaErrors(cufftCreate(&plan_adv)); checkCudaErrors(cufftXtMakePlanMany(plan_adv, 1, &new_size_long, NULL, 1, 1, CUDA_C_32F, NULL, 1, 1, CUDA_C_32F, 1, &workSize, CUDA_C_32F)); printf("Temporary buffer size %li bytes\n", workSize); // Transform signal and kernel printf("Transforming signal cufftExecC2C\n"); checkCudaErrors(cufftExecC2C(plan, reinterpret_cast<cufftComplex *>(d_signal), reinterpret_cast<cufftComplex *>(d_signal), CUFFT_FORWARD)); checkCudaErrors(cufftExecC2C( plan_adv, reinterpret_cast<cufftComplex *>(d_filter_kernel), reinterpret_cast<cufftComplex *>(d_filter_kernel), CUFFT_FORWARD)); // Multiply the coefficients together and normalize the result printf("Launching ComplexPointwiseMulAndScale<<< >>>\n"); ComplexPointwiseMulAndScale<<<32, 256>>>(d_signal, d_filter_kernel, new_size, 1.0f / new_size); // Check if kernel execution generated and error getLastCudaError("Kernel execution failed [ ComplexPointwiseMulAndScale ]"); // Transform signal back printf("Transforming signal back cufftExecC2C\n"); checkCudaErrors(cufftExecC2C(plan, reinterpret_cast<cufftComplex *>(d_signal), reinterpret_cast<cufftComplex *>(d_signal), CUFFT_INVERSE)); // Copy device memory to host Complex *h_convolved_signal = h_padded_signal; checkCudaErrors(cudaMemcpy(h_convolved_signal, d_signal, mem_size, cudaMemcpyDeviceToHost)); // Allocate host memory for the convolution result Complex *h_convolved_signal_ref = reinterpret_cast<Complex *>(malloc(sizeof(Complex) * SIGNAL_SIZE)); // Convolve on the host Convolve(h_signal, SIGNAL_SIZE, h_filter_kernel, FILTER_KERNEL_SIZE, h_convolved_signal_ref); // check result bool bTestResult = sdkCompareL2fe( reinterpret_cast<float *>(h_convolved_signal_ref), reinterpret_cast<float *>(h_convolved_signal), 2 * SIGNAL_SIZE, 1e-5f); // Destroy CUFFT context checkCudaErrors(cufftDestroy(plan)); checkCudaErrors(cufftDestroy(plan_adv)); // cleanup memory free(h_signal); free(h_filter_kernel); free(h_padded_signal); free(h_padded_filter_kernel); free(h_convolved_signal_ref); checkCudaErrors(cudaFree(d_signal)); checkCudaErrors(cudaFree(d_filter_kernel)); exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE); } // Pad data int PadData(const Complex *signal, Complex **padded_signal, int signal_size, const Complex *filter_kernel, Complex **padded_filter_kernel, int filter_kernel_size) { int minRadius = filter_kernel_size / 2; int maxRadius = filter_kernel_size - minRadius; int new_size = signal_size + maxRadius; // Pad signal Complex *new_data = reinterpret_cast<Complex *>(malloc(sizeof(Complex) * new_size)); memcpy(new_data + 0, signal, signal_size * sizeof(Complex)); memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex)); *padded_signal = new_data; // Pad filter new_data = reinterpret_cast<Complex *>(malloc(sizeof(Complex) * new_size)); memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex)); memset(new_data + maxRadius, 0, (new_size - filter_kernel_size) * sizeof(Complex)); memcpy(new_data + new_size - minRadius, filter_kernel, minRadius * sizeof(Complex)); *padded_filter_kernel = new_data; return new_size; } //////////////////////////////////////////////////////////////////////////////// // Filtering operations //////////////////////////////////////////////////////////////////////////////// // Computes convolution on the host void Convolve(const Complex *signal, int signal_size, const Complex *filter_kernel, int filter_kernel_size, Complex *filtered_signal) { int minRadius = filter_kernel_size / 2; int maxRadius = filter_kernel_size - minRadius; // Loop over output element indices for (int i = 0; i < signal_size; ++i) { filtered_signal[i].x = filtered_signal[i].y = 0; // Loop over convolution indices for (int j = -maxRadius + 1; j <= minRadius; ++j) { int k = i + j; if (k >= 0 && k < signal_size) { filtered_signal[i] = ComplexAdd(filtered_signal[i], ComplexMul(signal[k], filter_kernel[minRadius - j])); } } } } //////////////////////////////////////////////////////////////////////////////// // Complex operations //////////////////////////////////////////////////////////////////////////////// // Complex addition static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b) { Complex c; c.x = a.x + b.x; c.y = a.y + b.y; return c; } // Complex scale static __device__ __host__ inline Complex ComplexScale(Complex a, float s) { Complex c; c.x = s * a.x; c.y = s * a.y; return c; } // Complex multiplication static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b) { Complex c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; } // Complex pointwise multiplication static __global__ void ComplexPointwiseMulAndScale(Complex *a, const Complex *b, int size, float scale) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (int i = threadID; i < size; i += numThreads) { a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale); } }
796bbdf45d349fce8c9d71434850a76d511d2ae1.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/native/TensorIterator.h> #include <ATen/native/ReduceOps.h> #include <ATen/native/Resize.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/Reduce.cuh> #include <ATen/native/hip/Normalization.cuh> #include <c10/hip/HIPMathCompat.h> namespace at { namespace native { namespace { inline bool batch_norm_use_channels_last_kernels(const at::Tensor& self) { return (self.is_contiguous(at::MemoryFormat::ChannelsLast) || (self.is_contiguous() && self.strides()[1] == 1)); } enum class Impl { Contiguous, ChannelsLast, General, }; inline Impl batch_norm_choose_impl(const Tensor& self) { if (!at::cuda::detail::canUse32BitIndexMath(self)) { return Impl::General; } if (self.is_contiguous()) { return self.strides()[1] == 1 ? Impl::ChannelsLast : Impl::Contiguous; } if (self.is_contiguous(at::MemoryFormat::ChannelsLast)) { return Impl::ChannelsLast; } return Impl::General; } inline Impl batch_norm_choose_impl(const Tensor& in1, const Tensor& in2) { auto imp1 = batch_norm_choose_impl(in1); if (imp1 == Impl::General) { return imp1; } auto imp2 = batch_norm_choose_impl(in2); return imp1 == imp2 ? imp1 : Impl::General; } void batch_norm_elementwise( const Tensor& out, const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean_, const Tensor& invstd_) { switch (batch_norm_choose_impl(self)) { case Impl::Contiguous: { c10::MaybeOwned<Tensor> weight = at::borrow_from_optional_tensor(weight_opt); c10::MaybeOwned<Tensor> bias = at::borrow_from_optional_tensor(bias_opt); AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(), "batch_norm_elementwise_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; if ((weight->defined() && weight->scalar_type() != self.scalar_type()) || (bias->defined() && bias->scalar_type() != self.scalar_type())) { batch_norm_elemt_cuda_template<scalar_t, accscalar_t, int32_t>( out, self, *weight, *bias, mean_, invstd_); } else { batch_norm_elemt_cuda_template<scalar_t, scalar_t, int32_t>( out, self, *weight, *bias, mean_, invstd_); } }); return; } case Impl::ChannelsLast: { auto weight = at::borrow_from_optional_tensor(weight_opt); auto bias = at::borrow_from_optional_tensor(bias_opt); if ((!weight->defined() || weight->is_contiguous()) && (!bias->defined() || bias->is_contiguous()) && (!mean_.defined() || mean_.is_contiguous()) && (!invstd_.defined() || invstd_.is_contiguous())) { batch_norm_elemt_channels_last_cuda_template( out, self, *weight, *bias, mean_, invstd_); return; } C10_FALLTHROUGH; } case Impl::General: { const int64_t ndim = self.dim(); DimVector sizes(ndim, 1), strides(ndim, 0); // Helper to convert 1d tensors to an nd tensor that broadcasts with input // All elements go into the channel dimension auto as_nd = [&](const Tensor& t) { TORCH_INTERNAL_ASSERT(t.defined() && t.dim() == 1); sizes[1] = t.sizes()[0]; strides[1] = t.strides()[0]; return t.as_strided(sizes, strides); }; auto weight = weight_opt.has_value() && weight_opt->defined() ? as_nd(*weight_opt) : at::scalar_tensor(1, mean_.options()); auto bias = bias_opt.has_value() && bias_opt->defined() ? as_nd(*bias_opt) : at::scalar_tensor(0, mean_.options()); auto mean = as_nd(mean_); auto invstd = as_nd(invstd_); auto iter = TensorIteratorConfig() .add_borrowed_output(out) .add_borrowed_input(self) .add_borrowed_input(weight) .add_borrowed_input(bias) .add_borrowed_input(mean) .add_borrowed_input(invstd) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(), "batch_norm_elementwise_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t input, acc_t weight, acc_t bias, acc_t mean, acc_t invstd) -> scalar_t { return ((input - mean) * invstd) * weight + bias; }); }); return; } } } Tensor batch_norm_elementwise_backward_train( const Tensor& grad_out, const Tensor& input, const Tensor& mean, const Tensor& invstd, const Tensor& weight, const Tensor& sum_dy, const Tensor& sum_dy_xmu) { switch (batch_norm_choose_impl(input, grad_out)) { case Impl::Contiguous: { return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "batch_norm_backward_elemt", [&] { using accscalar_t = at::acc_type<scalar_t, true>; if (weight.defined() && weight.scalar_type() != input.scalar_type()) { return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int32_t>( grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>( grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu); } }); } case Impl::ChannelsLast: { if ((!weight.defined() || weight.is_contiguous()) && mean.is_contiguous() && invstd.is_contiguous()) { return batch_norm_backward_elemt_channels_last_cuda_template( grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu); } C10_FALLTHROUGH; } case Impl::General: { const auto ndim = input.dim(); DimVector sizes(ndim, 1), strides(ndim, 0); auto as_nd = [&](const Tensor& t) { TORCH_INTERNAL_ASSERT(t.defined() && t.dim() == 1); sizes[1] = t.sizes()[0]; strides[1] = t.strides()[0]; return t.as_strided(sizes, strides); }; auto invstd_nd = as_nd(invstd); auto mean_nd = as_nd(mean); auto sum_dy_nd = as_nd(sum_dy); auto sum_dy_xmu_nd = as_nd(sum_dy_xmu); auto weight_nd = weight.defined() ? as_nd(weight) : at::scalar_tensor(1.0, input.options().dtype(mean.scalar_type())); Tensor grad_input = at::empty(input.sizes(), grad_out.options()); auto iter = TensorIteratorConfig() .add_output(grad_input) .add_input(grad_out) .add_input(input) .add_input(weight_nd) .add_input(mean_nd) .add_input(invstd_nd) .add_input(sum_dy_xmu_nd) .add_input(sum_dy_nd) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(), "batch_norm_eval_backward", [&]{ using accscalar_t = at::acc_type<scalar_t, true>; auto norm_fct = static_cast<accscalar_t>(1.0 / (input.numel() /input.size(1)) ); gpu_kernel(iter, [norm_fct] GPU_LAMBDA (scalar_t gO, scalar_t input, accscalar_t weight, accscalar_t mean, accscalar_t invstd, accscalar_t xmu, accscalar_t dy) -> scalar_t { auto factor_1_c = invstd * invstd * xmu * norm_fct; auto factor_2_c = weight * invstd; auto m_dy_c = dy * norm_fct; return (gO - m_dy_c - (input - mean) * factor_1_c) * factor_2_c; }); }); return grad_input; } } TORCH_INTERNAL_ASSERT(false); } Tensor batch_norm_elementwise_backward_eval( const Tensor& grad_out, const Tensor& input, const Tensor& invstd, const Tensor& weight) { const auto ndim = input.dim(); DimVector shape(ndim, 1), strides(ndim, 0); shape[1] = invstd.sizes()[0]; strides[1] = invstd.strides()[0]; auto invstd_nd = invstd.as_strided(shape, strides); Tensor grad_input = at::empty(input.sizes(), grad_out.options()); if (weight.defined()) { strides[1] = weight.strides()[0]; auto weight_nd = weight.as_strided(shape, strides); auto iter = TensorIteratorConfig() .add_output(grad_input) .add_input(grad_out) .add_input(invstd_nd) .add_input(weight_nd) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(), "batch_norm_eval_backward", [&]{ using accscalar_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gO, accscalar_t invstd, accscalar_t weight) -> scalar_t { return gO * weight * invstd; }); }); } else { auto iter = TensorIteratorConfig() .add_output(grad_input) .add_input(grad_out) .add_input(invstd_nd) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(), "batch_norm_eval_backward", [&]{ using accscalar_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gO, accscalar_t invstd) -> scalar_t { return gO * invstd; }); }); } return grad_input; } void batch_norm_mean_var(const Tensor& self, Tensor& save_mean, Tensor& save_var) { // NOTE: Epsilon is only used for InvStd, not Var. The value here is ignored. const double dummy_epsilon = 1e-5; switch (batch_norm_choose_impl(self)) { case Impl::Contiguous: { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { batch_norm_stats_cuda_template<scalar_t, int32_t, Var>( save_mean, save_var, self, dummy_epsilon); }); return; } case Impl::ChannelsLast: { if ((!save_mean.defined() || save_mean.is_contiguous()) && (!save_var.defined() || save_var.is_contiguous())) { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { batch_norm_stats_channels_last_cuda_template<scalar_t, Var>( save_mean, save_var, self, dummy_epsilon); }); return; } C10_FALLTHROUGH; } case Impl::General: { const int64_t ndim = self.dim(); DimVector reduce_dims(ndim - 1); reduce_dims[0] = 0; for (int64_t i = 2; i < ndim; ++i) { reduce_dims[i - 1] = i; } // For some reason this isn't an actual operator but it exists anyway... at::native::var_mean_out(save_var, save_mean, self, /*dims=*/reduce_dims, /*unbiased=*/false, /*keepdim=*/false); return; } } } void batch_norm_update_stats( const Tensor& save_mean, const Tensor& save_var, const Tensor& running_mean, const Tensor& running_var, double momentum_, int64_t N) { auto iter = TensorIteratorConfig() .add_borrowed_output(running_mean) .add_borrowed_output(running_var) .add_borrowed_input(save_mean) .add_borrowed_input(save_var) .add_borrowed_input(running_mean) .add_borrowed_input(running_var) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(), "batch_norm_update_stats_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; const auto bessel_correction_factor = static_cast<acc_t>( static_cast<double>(N) / static_cast<double>(N - 1)); const auto momentum = static_cast<acc_t>(momentum_); gpu_kernel_multiple_outputs( iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var) -> thrust::tuple<scalar_t, scalar_t> { const auto unbiased_var = var * bessel_correction_factor; return thrust::tuple<scalar_t, scalar_t>{ mean * momentum + (1 - momentum) * running_mean, unbiased_var * momentum + (1 - momentum) * running_var, }; }); }); } void batch_norm_update_stats_and_invert( const Tensor& save_mean, const Tensor& save_var, const Tensor& running_mean, const Tensor& running_var, double momentum_, double epsilon, int64_t N) { auto iter = TensorIteratorConfig() .add_borrowed_output(running_mean) .add_borrowed_output(running_var) .add_borrowed_output(save_var) .add_borrowed_input(save_mean) .add_borrowed_input(save_var) .add_borrowed_input(running_mean) .add_borrowed_input(running_var) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(), "batch_norm_update_stats_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; const auto bessel_correction_factor = static_cast<acc_t>( static_cast<double>(N) / static_cast<double>(N - 1)); const auto eps = static_cast<acc_t>(epsilon); const auto momentum = static_cast<acc_t>(momentum_); gpu_kernel_multiple_outputs( iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var) -> thrust::tuple<scalar_t, scalar_t, acc_t> { const auto unbiased_var = var * bessel_correction_factor; return thrust::tuple<scalar_t, scalar_t, acc_t>{ mean * momentum + (1 - momentum) * running_mean, unbiased_var * momentum + (1 - momentum) * running_var, c10::hip::compat::rsqrt(var + eps) }; }); }); } void batch_norm_calc_invstd(const Tensor& out_invstd, const Tensor& running_var, double epsilon) { auto iter = TensorIteratorConfig() .add_borrowed_output(out_invstd) .add_borrowed_input(running_var) .check_all_same_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_var.scalar_type(), "batch_norm_invert_std_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; auto eps = static_cast<acc_t>(epsilon); gpu_kernel(iter, [eps] GPU_LAMBDA (scalar_t var) -> acc_t { return c10::hip::compat::rsqrt(var + eps); }); }); } } std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) { const bool has_running_mean = (running_mean_opt.has_value() && running_mean_opt->defined()); const bool has_running_var = (running_mean_opt.has_value() && running_mean_opt->defined()); TORCH_CHECK(has_running_mean == has_running_var); if (train) { batch_norm_mean_var(self, save_mean, save_invstd); if (has_running_mean) { const int64_t N = self.numel() / save_mean.numel(); batch_norm_update_stats_and_invert( save_mean, save_invstd, *running_mean_opt, *running_var_opt, momentum, epsilon, N); } else { batch_norm_calc_invstd(save_invstd, save_invstd, epsilon); } } else { TORCH_CHECK(has_running_mean); at::native::resize_output(save_mean, running_mean_opt->sizes()); save_mean.copy_(*running_mean_opt, /*non_blocking=*/true); batch_norm_calc_invstd(save_invstd, running_var_opt.value(), epsilon); } batch_norm_elementwise(output, self, weight_opt, bias_opt, save_mean, save_invstd); return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_invstd); } std::tuple<Tensor, Tensor, Tensor> batch_norm_cuda(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon) { auto output = at::empty_like(self, at::MemoryFormat::Contiguous); int64_t n_input = self.size(1); auto options = self.options().dtype( at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); auto save_mean = at::empty({n_input}, options); auto save_invstd = at::empty({n_input}, options); at::native::batch_norm_cuda_out( self, weight_opt, bias_opt, running_mean_opt, running_var_opt, train, momentum, epsilon, output, save_mean, save_invstd); return std::make_tuple(output, save_mean, save_invstd); } std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& input, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, const c10::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_invstd_opt, bool train, double epsilon, std::array<bool,3> grad_input_mask) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight = at::borrow_from_optional_tensor(weight_opt); c10::MaybeOwned<Tensor> save_mean = at::borrow_from_optional_tensor(save_mean_opt); c10::MaybeOwned<Tensor> save_invstd = at::borrow_from_optional_tensor(save_invstd_opt); c10::MaybeOwned<Tensor> running_mean = at::borrow_from_optional_tensor(running_mean_opt); c10::MaybeOwned<Tensor> running_var = at::borrow_from_optional_tensor(running_var_opt); const bool needs_reduction = train || grad_input_mask[1] || grad_input_mask[2]; // Fused reducion & elementwise kernel if (needs_reduction && grad_input_mask[0] && !batch_norm_use_channels_last_kernels(input) && cuda::detail::canUse32BitIndexMath(input) && cuda::detail::canUse32BitIndexMath(grad_out)) { return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "batch_norm_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; if (weight->defined() && weight->scalar_type() != input.scalar_type()) { return batch_norm_backward_cuda_template<scalar_t, accscalar_t, int32_t>( grad_out, input, *weight, *running_mean, *running_var, *save_mean, *save_invstd, train, epsilon, grad_input_mask); } else { return batch_norm_backward_cuda_template<scalar_t, scalar_t, int32_t>( grad_out, input, *weight, *running_mean, *running_var, *save_mean, *save_invstd, train, epsilon, grad_input_mask); } }); } // NOTE: native_batch_norm always returns save_mean and save_invstd to be reused in backward. // However, this is also called from cudnn_batch_norm in eval mode which doesn't give // save_mean and save_invstd, so it needs recalculated. const auto acc_type = at::toAccumulateType(input.scalar_type(), /*is_cuda=*/true); Tensor mean; if (save_mean->defined()) { mean = *save_mean; } else if (needs_reduction) { TORCH_CHECK(!train && running_mean->defined()); mean = (running_mean->scalar_type() == acc_type) ? *running_mean : running_mean->to(acc_type); } Tensor invstd; if (save_invstd->defined()) { invstd = *save_invstd; } else { TORCH_CHECK(!train && running_var->defined()); auto n_channels = input.sizes()[1]; invstd = at::empty({n_channels}, input.options().dtype(acc_type)); batch_norm_calc_invstd(invstd, *running_var, epsilon); } Tensor sum_dy, sum_dy_xmu, grad_weight, grad_bias; if (needs_reduction) { std::tie(sum_dy, sum_dy_xmu, grad_weight, grad_bias) = batch_norm_backward_reduce_cuda( grad_out, input, mean, invstd, *weight, grad_input_mask[0], grad_input_mask[1], grad_input_mask[2]); } Tensor grad_input; if (grad_input_mask[0]) { if (train) { // NOTE: sum_dy and sum_dy_xmy are defined, as train implies needs_reduction grad_input = batch_norm_elementwise_backward_train( grad_out, input, mean, invstd, *weight, sum_dy, sum_dy_xmu); } else { grad_input = batch_norm_elementwise_backward_eval( grad_out, input, invstd, *weight); } } return std::make_tuple(grad_input, grad_weight, grad_bias); } std::tuple<Tensor, Tensor> batch_norm_stats_cuda(const Tensor& self, double epsilon) { auto options = self.options().dtype( at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); auto n_channels = self.size(1); auto save_mean = at::empty({n_channels}, options); auto save_invstd = at::empty({n_channels}, options); bool use_channels_last_kernel = batch_norm_use_channels_last_kernels(self); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { if (cuda::detail::canUse32BitIndexMath(self)) { if (use_channels_last_kernel) { batch_norm_stats_channels_last_cuda_template<scalar_t, InvStd>( save_mean, save_invstd, self, epsilon); } else { batch_norm_stats_cuda_template<scalar_t, int32_t, InvStd>( save_mean, save_invstd, self, epsilon); } } else { batch_norm_stats_cuda_template<scalar_t, int64_t, InvStd>( save_mean, save_invstd, self, epsilon); } }); return std::tuple<Tensor, Tensor>(save_mean, save_invstd); } Tensor batch_norm_elemt_cuda( const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean, const Tensor& invstd, double epsilon) { auto output = at::empty_like(self, self.suggest_memory_format()); // FIXME: Epsilon parameter isn't required, we don't take the reciprocal batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd); return output; } Tensor& batch_norm_elemt_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean, const Tensor& invstd, double epsilon, Tensor& output) { // FIXME: Epsilon parameter isn't required, we don't take the reciprocal batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd); return output; } // accepting input(self) here to determine template data types, since running_mean/running_var are optional std::tuple<Tensor, Tensor> batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum, double epsilon, int64_t count) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt); const Tensor& running_mean = *running_mean_maybe_owned; const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); std::vector<int64_t> counts(mean.size(0), count); Tensor counts_ = at::from_blob((void*)counts.data(), {(int64_t)counts.size()}, self.options().dtype(at::kLong).device(at::kCPU)); counts_ = counts_.to(self.device()).to(running_mean.defined() ? running_mean.dtype() : self.dtype()); return batch_norm_gather_stats_with_counts_cuda(self, mean, invstd, running_mean, running_var, momentum, epsilon, counts_); } std::tuple<Tensor, Tensor> batch_norm_gather_stats_with_counts_cuda( const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt /* optional */, const c10::optional<Tensor>& running_var_opt /* optional */, double momentum, double epsilon, const Tensor& counts) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt); const Tensor& running_mean = *running_mean_maybe_owned; const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); auto scalar_type = running_mean.defined() ? running_mean.scalar_type() : self.scalar_type(); return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "batch_norm_update_stats_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; if (cuda::detail::canUse32BitIndexMath(self)) { return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int32_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts); } else { return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int64_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts); } }); } std::tuple<Tensor, Tensor, Tensor, Tensor> batch_norm_backward_reduce_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, bool input_g, bool weight_g, bool bias_g) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; if (at::cuda::detail::canUse32BitIndexMath(grad_output) && batch_norm_use_channels_last_kernels(grad_output) && batch_norm_use_channels_last_kernels(input) && (!weight.defined() || weight.is_contiguous()) && mean.is_contiguous() && invstd.is_contiguous()){ return batch_norm_backward_reduce_cuda_channels_last_template( grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); } return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_output.scalar_type(), "batch_norm_backward_reduce", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); const bool is_mixed_type = weight.defined() && weight.scalar_type() != input.scalar_type(); using accscalar_t = at::acc_type<scalar_t, true>; if (cuda::detail::canUse32BitIndexMath(grad_output)) { if (is_mixed_type) { return batch_norm_backward_reduce_cuda_template<scalar_t, accscalar_t, int32_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); } else { return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int32_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); } } else { if (is_mixed_type) { return batch_norm_backward_reduce_cuda_template<scalar_t, accscalar_t, int64_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); } else { return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int64_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); } } }); } Tensor batch_norm_backward_elemt_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, const Tensor& sum_dy, const Tensor& sum_dy_xmu, const Tensor& count) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; if (at::cuda::detail::canUse32BitIndexMath(self) && batch_norm_use_channels_last_kernels(self)){ return batch_norm_backward_elemt_channels_last_cuda_template(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_elemt", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; using accscalar_t = at::acc_type<scalar_t, true>; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } } }); } std::tuple<Tensor, Tensor> batch_norm_update_stats_cuda( const Tensor& self, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum) { c10::MaybeOwned<Tensor> running_mean = at::borrow_from_optional_tensor(running_mean_opt); c10::MaybeOwned<Tensor> running_var = at::borrow_from_optional_tensor(running_var_opt); const int64_t n_input = self.size(1); auto options = self.options().dtype( at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); auto save_mean = at::empty({n_input}, options); auto save_var = at::empty({n_input}, options); batch_norm_mean_var(self, save_mean, save_var); TORCH_CHECK(running_mean->defined() == running_var->defined()); if (running_mean->defined()) { const int64_t N = self.numel() / save_mean.numel(); batch_norm_update_stats(save_mean, save_var, *running_mean, *running_var, momentum, N); } return std::tuple<Tensor, Tensor>(save_mean, save_var); } } } // namespace at::native
796bbdf45d349fce8c9d71434850a76d511d2ae1.cu
#include <ATen/native/TensorIterator.h> #include <ATen/native/ReduceOps.h> #include <ATen/native/Resize.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Reduce.cuh> #include <ATen/native/cuda/Normalization.cuh> #include <c10/cuda/CUDAMathCompat.h> namespace at { namespace native { namespace { inline bool batch_norm_use_channels_last_kernels(const at::Tensor& self) { return (self.is_contiguous(at::MemoryFormat::ChannelsLast) || (self.is_contiguous() && self.strides()[1] == 1)); } enum class Impl { Contiguous, ChannelsLast, General, }; inline Impl batch_norm_choose_impl(const Tensor& self) { if (!at::cuda::detail::canUse32BitIndexMath(self)) { return Impl::General; } if (self.is_contiguous()) { return self.strides()[1] == 1 ? Impl::ChannelsLast : Impl::Contiguous; } if (self.is_contiguous(at::MemoryFormat::ChannelsLast)) { return Impl::ChannelsLast; } return Impl::General; } inline Impl batch_norm_choose_impl(const Tensor& in1, const Tensor& in2) { auto imp1 = batch_norm_choose_impl(in1); if (imp1 == Impl::General) { return imp1; } auto imp2 = batch_norm_choose_impl(in2); return imp1 == imp2 ? imp1 : Impl::General; } void batch_norm_elementwise( const Tensor& out, const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean_, const Tensor& invstd_) { switch (batch_norm_choose_impl(self)) { case Impl::Contiguous: { c10::MaybeOwned<Tensor> weight = at::borrow_from_optional_tensor(weight_opt); c10::MaybeOwned<Tensor> bias = at::borrow_from_optional_tensor(bias_opt); AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(), "batch_norm_elementwise_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; if ((weight->defined() && weight->scalar_type() != self.scalar_type()) || (bias->defined() && bias->scalar_type() != self.scalar_type())) { batch_norm_elemt_cuda_template<scalar_t, accscalar_t, int32_t>( out, self, *weight, *bias, mean_, invstd_); } else { batch_norm_elemt_cuda_template<scalar_t, scalar_t, int32_t>( out, self, *weight, *bias, mean_, invstd_); } }); return; } case Impl::ChannelsLast: { auto weight = at::borrow_from_optional_tensor(weight_opt); auto bias = at::borrow_from_optional_tensor(bias_opt); if ((!weight->defined() || weight->is_contiguous()) && (!bias->defined() || bias->is_contiguous()) && (!mean_.defined() || mean_.is_contiguous()) && (!invstd_.defined() || invstd_.is_contiguous())) { batch_norm_elemt_channels_last_cuda_template( out, self, *weight, *bias, mean_, invstd_); return; } C10_FALLTHROUGH; } case Impl::General: { const int64_t ndim = self.dim(); DimVector sizes(ndim, 1), strides(ndim, 0); // Helper to convert 1d tensors to an nd tensor that broadcasts with input // All elements go into the channel dimension auto as_nd = [&](const Tensor& t) { TORCH_INTERNAL_ASSERT(t.defined() && t.dim() == 1); sizes[1] = t.sizes()[0]; strides[1] = t.strides()[0]; return t.as_strided(sizes, strides); }; auto weight = weight_opt.has_value() && weight_opt->defined() ? as_nd(*weight_opt) : at::scalar_tensor(1, mean_.options()); auto bias = bias_opt.has_value() && bias_opt->defined() ? as_nd(*bias_opt) : at::scalar_tensor(0, mean_.options()); auto mean = as_nd(mean_); auto invstd = as_nd(invstd_); auto iter = TensorIteratorConfig() .add_borrowed_output(out) .add_borrowed_input(self) .add_borrowed_input(weight) .add_borrowed_input(bias) .add_borrowed_input(mean) .add_borrowed_input(invstd) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(), "batch_norm_elementwise_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t input, acc_t weight, acc_t bias, acc_t mean, acc_t invstd) -> scalar_t { return ((input - mean) * invstd) * weight + bias; }); }); return; } } } Tensor batch_norm_elementwise_backward_train( const Tensor& grad_out, const Tensor& input, const Tensor& mean, const Tensor& invstd, const Tensor& weight, const Tensor& sum_dy, const Tensor& sum_dy_xmu) { switch (batch_norm_choose_impl(input, grad_out)) { case Impl::Contiguous: { return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "batch_norm_backward_elemt", [&] { using accscalar_t = at::acc_type<scalar_t, true>; if (weight.defined() && weight.scalar_type() != input.scalar_type()) { return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int32_t>( grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>( grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu); } }); } case Impl::ChannelsLast: { if ((!weight.defined() || weight.is_contiguous()) && mean.is_contiguous() && invstd.is_contiguous()) { return batch_norm_backward_elemt_channels_last_cuda_template( grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu); } C10_FALLTHROUGH; } case Impl::General: { const auto ndim = input.dim(); DimVector sizes(ndim, 1), strides(ndim, 0); auto as_nd = [&](const Tensor& t) { TORCH_INTERNAL_ASSERT(t.defined() && t.dim() == 1); sizes[1] = t.sizes()[0]; strides[1] = t.strides()[0]; return t.as_strided(sizes, strides); }; auto invstd_nd = as_nd(invstd); auto mean_nd = as_nd(mean); auto sum_dy_nd = as_nd(sum_dy); auto sum_dy_xmu_nd = as_nd(sum_dy_xmu); auto weight_nd = weight.defined() ? as_nd(weight) : at::scalar_tensor(1.0, input.options().dtype(mean.scalar_type())); Tensor grad_input = at::empty(input.sizes(), grad_out.options()); auto iter = TensorIteratorConfig() .add_output(grad_input) .add_input(grad_out) .add_input(input) .add_input(weight_nd) .add_input(mean_nd) .add_input(invstd_nd) .add_input(sum_dy_xmu_nd) .add_input(sum_dy_nd) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(), "batch_norm_eval_backward", [&]{ using accscalar_t = at::acc_type<scalar_t, true>; auto norm_fct = static_cast<accscalar_t>(1.0 / (input.numel() /input.size(1)) ); gpu_kernel(iter, [norm_fct] GPU_LAMBDA (scalar_t gO, scalar_t input, accscalar_t weight, accscalar_t mean, accscalar_t invstd, accscalar_t xmu, accscalar_t dy) -> scalar_t { auto factor_1_c = invstd * invstd * xmu * norm_fct; auto factor_2_c = weight * invstd; auto m_dy_c = dy * norm_fct; return (gO - m_dy_c - (input - mean) * factor_1_c) * factor_2_c; }); }); return grad_input; } } TORCH_INTERNAL_ASSERT(false); } Tensor batch_norm_elementwise_backward_eval( const Tensor& grad_out, const Tensor& input, const Tensor& invstd, const Tensor& weight) { const auto ndim = input.dim(); DimVector shape(ndim, 1), strides(ndim, 0); shape[1] = invstd.sizes()[0]; strides[1] = invstd.strides()[0]; auto invstd_nd = invstd.as_strided(shape, strides); Tensor grad_input = at::empty(input.sizes(), grad_out.options()); if (weight.defined()) { strides[1] = weight.strides()[0]; auto weight_nd = weight.as_strided(shape, strides); auto iter = TensorIteratorConfig() .add_output(grad_input) .add_input(grad_out) .add_input(invstd_nd) .add_input(weight_nd) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(), "batch_norm_eval_backward", [&]{ using accscalar_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gO, accscalar_t invstd, accscalar_t weight) -> scalar_t { return gO * weight * invstd; }); }); } else { auto iter = TensorIteratorConfig() .add_output(grad_input) .add_input(grad_out) .add_input(invstd_nd) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(), "batch_norm_eval_backward", [&]{ using accscalar_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gO, accscalar_t invstd) -> scalar_t { return gO * invstd; }); }); } return grad_input; } void batch_norm_mean_var(const Tensor& self, Tensor& save_mean, Tensor& save_var) { // NOTE: Epsilon is only used for InvStd, not Var. The value here is ignored. const double dummy_epsilon = 1e-5; switch (batch_norm_choose_impl(self)) { case Impl::Contiguous: { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { batch_norm_stats_cuda_template<scalar_t, int32_t, Var>( save_mean, save_var, self, dummy_epsilon); }); return; } case Impl::ChannelsLast: { if ((!save_mean.defined() || save_mean.is_contiguous()) && (!save_var.defined() || save_var.is_contiguous())) { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { batch_norm_stats_channels_last_cuda_template<scalar_t, Var>( save_mean, save_var, self, dummy_epsilon); }); return; } C10_FALLTHROUGH; } case Impl::General: { const int64_t ndim = self.dim(); DimVector reduce_dims(ndim - 1); reduce_dims[0] = 0; for (int64_t i = 2; i < ndim; ++i) { reduce_dims[i - 1] = i; } // For some reason this isn't an actual operator but it exists anyway... at::native::var_mean_out(save_var, save_mean, self, /*dims=*/reduce_dims, /*unbiased=*/false, /*keepdim=*/false); return; } } } void batch_norm_update_stats( const Tensor& save_mean, const Tensor& save_var, const Tensor& running_mean, const Tensor& running_var, double momentum_, int64_t N) { auto iter = TensorIteratorConfig() .add_borrowed_output(running_mean) .add_borrowed_output(running_var) .add_borrowed_input(save_mean) .add_borrowed_input(save_var) .add_borrowed_input(running_mean) .add_borrowed_input(running_var) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(), "batch_norm_update_stats_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; const auto bessel_correction_factor = static_cast<acc_t>( static_cast<double>(N) / static_cast<double>(N - 1)); const auto momentum = static_cast<acc_t>(momentum_); gpu_kernel_multiple_outputs( iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var) -> thrust::tuple<scalar_t, scalar_t> { const auto unbiased_var = var * bessel_correction_factor; return thrust::tuple<scalar_t, scalar_t>{ mean * momentum + (1 - momentum) * running_mean, unbiased_var * momentum + (1 - momentum) * running_var, }; }); }); } void batch_norm_update_stats_and_invert( const Tensor& save_mean, const Tensor& save_var, const Tensor& running_mean, const Tensor& running_var, double momentum_, double epsilon, int64_t N) { auto iter = TensorIteratorConfig() .add_borrowed_output(running_mean) .add_borrowed_output(running_var) .add_borrowed_output(save_var) .add_borrowed_input(save_mean) .add_borrowed_input(save_var) .add_borrowed_input(running_mean) .add_borrowed_input(running_var) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(), "batch_norm_update_stats_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; const auto bessel_correction_factor = static_cast<acc_t>( static_cast<double>(N) / static_cast<double>(N - 1)); const auto eps = static_cast<acc_t>(epsilon); const auto momentum = static_cast<acc_t>(momentum_); gpu_kernel_multiple_outputs( iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var) -> thrust::tuple<scalar_t, scalar_t, acc_t> { const auto unbiased_var = var * bessel_correction_factor; return thrust::tuple<scalar_t, scalar_t, acc_t>{ mean * momentum + (1 - momentum) * running_mean, unbiased_var * momentum + (1 - momentum) * running_var, c10::cuda::compat::rsqrt(var + eps) }; }); }); } void batch_norm_calc_invstd(const Tensor& out_invstd, const Tensor& running_var, double epsilon) { auto iter = TensorIteratorConfig() .add_borrowed_output(out_invstd) .add_borrowed_input(running_var) .check_all_same_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_var.scalar_type(), "batch_norm_invert_std_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; auto eps = static_cast<acc_t>(epsilon); gpu_kernel(iter, [eps] GPU_LAMBDA (scalar_t var) -> acc_t { return c10::cuda::compat::rsqrt(var + eps); }); }); } } std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) { const bool has_running_mean = (running_mean_opt.has_value() && running_mean_opt->defined()); const bool has_running_var = (running_mean_opt.has_value() && running_mean_opt->defined()); TORCH_CHECK(has_running_mean == has_running_var); if (train) { batch_norm_mean_var(self, save_mean, save_invstd); if (has_running_mean) { const int64_t N = self.numel() / save_mean.numel(); batch_norm_update_stats_and_invert( save_mean, save_invstd, *running_mean_opt, *running_var_opt, momentum, epsilon, N); } else { batch_norm_calc_invstd(save_invstd, save_invstd, epsilon); } } else { TORCH_CHECK(has_running_mean); at::native::resize_output(save_mean, running_mean_opt->sizes()); save_mean.copy_(*running_mean_opt, /*non_blocking=*/true); batch_norm_calc_invstd(save_invstd, running_var_opt.value(), epsilon); } batch_norm_elementwise(output, self, weight_opt, bias_opt, save_mean, save_invstd); return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_invstd); } std::tuple<Tensor, Tensor, Tensor> batch_norm_cuda(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon) { auto output = at::empty_like(self, at::MemoryFormat::Contiguous); int64_t n_input = self.size(1); auto options = self.options().dtype( at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); auto save_mean = at::empty({n_input}, options); auto save_invstd = at::empty({n_input}, options); at::native::batch_norm_cuda_out( self, weight_opt, bias_opt, running_mean_opt, running_var_opt, train, momentum, epsilon, output, save_mean, save_invstd); return std::make_tuple(output, save_mean, save_invstd); } std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& input, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, const c10::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_invstd_opt, bool train, double epsilon, std::array<bool,3> grad_input_mask) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight = at::borrow_from_optional_tensor(weight_opt); c10::MaybeOwned<Tensor> save_mean = at::borrow_from_optional_tensor(save_mean_opt); c10::MaybeOwned<Tensor> save_invstd = at::borrow_from_optional_tensor(save_invstd_opt); c10::MaybeOwned<Tensor> running_mean = at::borrow_from_optional_tensor(running_mean_opt); c10::MaybeOwned<Tensor> running_var = at::borrow_from_optional_tensor(running_var_opt); const bool needs_reduction = train || grad_input_mask[1] || grad_input_mask[2]; // Fused reducion & elementwise kernel if (needs_reduction && grad_input_mask[0] && !batch_norm_use_channels_last_kernels(input) && cuda::detail::canUse32BitIndexMath(input) && cuda::detail::canUse32BitIndexMath(grad_out)) { return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "batch_norm_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; if (weight->defined() && weight->scalar_type() != input.scalar_type()) { return batch_norm_backward_cuda_template<scalar_t, accscalar_t, int32_t>( grad_out, input, *weight, *running_mean, *running_var, *save_mean, *save_invstd, train, epsilon, grad_input_mask); } else { return batch_norm_backward_cuda_template<scalar_t, scalar_t, int32_t>( grad_out, input, *weight, *running_mean, *running_var, *save_mean, *save_invstd, train, epsilon, grad_input_mask); } }); } // NOTE: native_batch_norm always returns save_mean and save_invstd to be reused in backward. // However, this is also called from cudnn_batch_norm in eval mode which doesn't give // save_mean and save_invstd, so it needs recalculated. const auto acc_type = at::toAccumulateType(input.scalar_type(), /*is_cuda=*/true); Tensor mean; if (save_mean->defined()) { mean = *save_mean; } else if (needs_reduction) { TORCH_CHECK(!train && running_mean->defined()); mean = (running_mean->scalar_type() == acc_type) ? *running_mean : running_mean->to(acc_type); } Tensor invstd; if (save_invstd->defined()) { invstd = *save_invstd; } else { TORCH_CHECK(!train && running_var->defined()); auto n_channels = input.sizes()[1]; invstd = at::empty({n_channels}, input.options().dtype(acc_type)); batch_norm_calc_invstd(invstd, *running_var, epsilon); } Tensor sum_dy, sum_dy_xmu, grad_weight, grad_bias; if (needs_reduction) { std::tie(sum_dy, sum_dy_xmu, grad_weight, grad_bias) = batch_norm_backward_reduce_cuda( grad_out, input, mean, invstd, *weight, grad_input_mask[0], grad_input_mask[1], grad_input_mask[2]); } Tensor grad_input; if (grad_input_mask[0]) { if (train) { // NOTE: sum_dy and sum_dy_xmy are defined, as train implies needs_reduction grad_input = batch_norm_elementwise_backward_train( grad_out, input, mean, invstd, *weight, sum_dy, sum_dy_xmu); } else { grad_input = batch_norm_elementwise_backward_eval( grad_out, input, invstd, *weight); } } return std::make_tuple(grad_input, grad_weight, grad_bias); } std::tuple<Tensor, Tensor> batch_norm_stats_cuda(const Tensor& self, double epsilon) { auto options = self.options().dtype( at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); auto n_channels = self.size(1); auto save_mean = at::empty({n_channels}, options); auto save_invstd = at::empty({n_channels}, options); bool use_channels_last_kernel = batch_norm_use_channels_last_kernels(self); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { if (cuda::detail::canUse32BitIndexMath(self)) { if (use_channels_last_kernel) { batch_norm_stats_channels_last_cuda_template<scalar_t, InvStd>( save_mean, save_invstd, self, epsilon); } else { batch_norm_stats_cuda_template<scalar_t, int32_t, InvStd>( save_mean, save_invstd, self, epsilon); } } else { batch_norm_stats_cuda_template<scalar_t, int64_t, InvStd>( save_mean, save_invstd, self, epsilon); } }); return std::tuple<Tensor, Tensor>(save_mean, save_invstd); } Tensor batch_norm_elemt_cuda( const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean, const Tensor& invstd, double epsilon) { auto output = at::empty_like(self, self.suggest_memory_format()); // FIXME: Epsilon parameter isn't required, we don't take the reciprocal batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd); return output; } Tensor& batch_norm_elemt_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean, const Tensor& invstd, double epsilon, Tensor& output) { // FIXME: Epsilon parameter isn't required, we don't take the reciprocal batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd); return output; } // accepting input(self) here to determine template data types, since running_mean/running_var are optional std::tuple<Tensor, Tensor> batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum, double epsilon, int64_t count) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt); const Tensor& running_mean = *running_mean_maybe_owned; const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); std::vector<int64_t> counts(mean.size(0), count); Tensor counts_ = at::from_blob((void*)counts.data(), {(int64_t)counts.size()}, self.options().dtype(at::kLong).device(at::kCPU)); counts_ = counts_.to(self.device()).to(running_mean.defined() ? running_mean.dtype() : self.dtype()); return batch_norm_gather_stats_with_counts_cuda(self, mean, invstd, running_mean, running_var, momentum, epsilon, counts_); } std::tuple<Tensor, Tensor> batch_norm_gather_stats_with_counts_cuda( const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt /* optional */, const c10::optional<Tensor>& running_var_opt /* optional */, double momentum, double epsilon, const Tensor& counts) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt); const Tensor& running_mean = *running_mean_maybe_owned; const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); auto scalar_type = running_mean.defined() ? running_mean.scalar_type() : self.scalar_type(); return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "batch_norm_update_stats_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; if (cuda::detail::canUse32BitIndexMath(self)) { return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int32_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts); } else { return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int64_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts); } }); } std::tuple<Tensor, Tensor, Tensor, Tensor> batch_norm_backward_reduce_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, bool input_g, bool weight_g, bool bias_g) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; if (at::cuda::detail::canUse32BitIndexMath(grad_output) && batch_norm_use_channels_last_kernels(grad_output) && batch_norm_use_channels_last_kernels(input) && (!weight.defined() || weight.is_contiguous()) && mean.is_contiguous() && invstd.is_contiguous()){ return batch_norm_backward_reduce_cuda_channels_last_template( grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); } return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_output.scalar_type(), "batch_norm_backward_reduce", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); const bool is_mixed_type = weight.defined() && weight.scalar_type() != input.scalar_type(); using accscalar_t = at::acc_type<scalar_t, true>; if (cuda::detail::canUse32BitIndexMath(grad_output)) { if (is_mixed_type) { return batch_norm_backward_reduce_cuda_template<scalar_t, accscalar_t, int32_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); } else { return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int32_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); } } else { if (is_mixed_type) { return batch_norm_backward_reduce_cuda_template<scalar_t, accscalar_t, int64_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); } else { return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int64_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); } } }); } Tensor batch_norm_backward_elemt_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, const Tensor& sum_dy, const Tensor& sum_dy_xmu, const Tensor& count) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; if (at::cuda::detail::canUse32BitIndexMath(self) && batch_norm_use_channels_last_kernels(self)){ return batch_norm_backward_elemt_channels_last_cuda_template(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_elemt", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; using accscalar_t = at::acc_type<scalar_t, true>; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } } }); } std::tuple<Tensor, Tensor> batch_norm_update_stats_cuda( const Tensor& self, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum) { c10::MaybeOwned<Tensor> running_mean = at::borrow_from_optional_tensor(running_mean_opt); c10::MaybeOwned<Tensor> running_var = at::borrow_from_optional_tensor(running_var_opt); const int64_t n_input = self.size(1); auto options = self.options().dtype( at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); auto save_mean = at::empty({n_input}, options); auto save_var = at::empty({n_input}, options); batch_norm_mean_var(self, save_mean, save_var); TORCH_CHECK(running_mean->defined() == running_var->defined()); if (running_mean->defined()) { const int64_t N = self.numel() / save_mean.numel(); batch_norm_update_stats(save_mean, save_var, *running_mean, *running_var, momentum, N); } return std::tuple<Tensor, Tensor>(save_mean, save_var); } } } // namespace at::native
de401239e63ef1fcdf83e732c4dee8d34fa138e1.hip
// !!! This is a file automatically generated by hipify!!! // CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania // Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania // This file includes code from: // Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097 // Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/ // Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com #include <stdio.h> #include <hip/hip_runtime.h> #include <cmath> #include <ctime> #include <random> #include "sceneStructs.h" #include "glm/glm.hpp" #include "utilities.h" #include "raytraceKernel.h" #include "intersections.h" #include "interactions.h" #if TORCH_HIP_VERSION >= 5000 #include <helper_math.h> #else #include <cutil_math.h> #endif const glm::vec3 bgColour = glm::vec3 (0.55, 0.25, 0); void checkCUDAError(const char *msg) { hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); std::cin.get (); exit(EXIT_FAILURE); } } //Sets up the projection half vectors. void setupProjection (projectionInfo &ProjectionParams, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov) { //Set up the projection variables: float degToRad = 3.1415926 / 180.0; float radToDeg = 1.0 / degToRad; ProjectionParams.centreProj = eye+view; glm::vec3 eyeToProjCentre = ProjectionParams.centreProj - eye; glm::vec3 A = glm::cross (ProjectionParams.centreProj, up); glm::vec3 B = glm::cross (A, ProjectionParams.centreProj); float lenEyeToProjCentre = glm::length (eyeToProjCentre); ProjectionParams.halfVecH = glm::normalize (A) * lenEyeToProjCentre * (float)tan ((fov.x*degToRad)); ProjectionParams.halfVecV = glm::normalize (B) * lenEyeToProjCentre * (float)tan ((fov.y*degToRad)); } // Reflects the incidentRay around the normal. __host__ __device__ glm::vec3 reflectRay (glm::vec3 incidentRay, glm::vec3 normal) { glm::vec3 reflectedRay = incidentRay - (2.0f*glm::dot (incidentRay, normal))*normal; return reflectedRay; } //LOOK: This function demonstrates how to use thrust for random number generation on the GPU! //Function that generates static. __host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){ int index = x + (y * resolution.x); thrust::default_random_engine rng(hash(index*time)); thrust::uniform_real_distribution<float> u01(0,1); return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng)); } //Function that does the initial raycast from the camera __host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov, glm::vec3 centreProj, glm::vec3 halfVecH, glm::vec3 halfVecV) { ray r; r.origin = eye; r.direction = glm::vec3(0,0,-1); float normDeviceX = (float)x / (resolution.x-1); float normDeviceY = 1 - ((float)y / (resolution.y-1)); glm::vec3 P = centreProj + (2*normDeviceX - 1)*halfVecH + (2*normDeviceY - 1)*halfVecV; r.direction = glm::normalize (P - r.origin); return r; } //Kernel that blacks out a given image buffer __global__ void clearImage(glm::vec2 resolution, glm::vec3* image){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ image[index] = glm::vec3(0,0,0); } } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image, int nLights){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ image [index] /= nLights; glm::vec3 color; color.x = image[index].x*255.0; color.y = image[index].y*255.0; color.z = image[index].z*255.0; if(color.x>255){ color.x = 255; } if(color.y>255){ color.y = 255; } if(color.z>255){ color.z = 255; } // Each thread writes one pixel location in the texture (textel) PBOpos[index].w = 0; PBOpos[index].x = color.x; PBOpos[index].y = color.y; PBOpos[index].z = color.z; } } // Intersects the castRay with all the geometry in the scene (geoms) and returns the intercept information. __device__ interceptInfo getIntercept (staticGeom * geoms, sceneInfo objectCountInfo, ray castRay, material* textureArray) { glm::vec3 intrPoint = glm::vec3 (0, 0, 0); glm::vec3 intrNormal = glm::vec3 (0, 0, 0); glm::vec2 UVcoords = glm::vec2 (0, 0); float interceptValue = -32767; material newMaterial; newMaterial.color = glm::vec3 (0,0,0); newMaterial.specularExponent = 1.0; newMaterial.hasReflective = 0.0; newMaterial.hasRefractive = 0.0; interceptInfo theRightIntercept; // Stores the lowest intercept. theRightIntercept.interceptVal = interceptValue; // Initially, it is empty/invalid theRightIntercept.intrNormal = intrNormal; // Intially, Normal - 0,0,0 theRightIntercept.intrMaterial = newMaterial; float min = 1e6; // Two different loops to intersect ray with cubes and spheres. for (int i = 0; i < objectCountInfo.nCubes; ++i) { staticGeom currentGeom = geoms [i]; interceptValue = boxIntersectionTest(currentGeom, castRay, intrPoint, intrNormal, UVcoords); if (interceptValue > 0) { if (interceptValue < min) { min = interceptValue; theRightIntercept.interceptVal = min; theRightIntercept.intrNormal = intrNormal; theRightIntercept.intrMaterial = textureArray [currentGeom.materialid]; theRightIntercept.UV = UVcoords; } } } for (int i = objectCountInfo.nCubes; i <= (objectCountInfo.nCubes+objectCountInfo.nSpheres); ++i) { staticGeom currentGeom = geoms [i]; interceptValue = sphereIntersectionTest(currentGeom, castRay, intrPoint, intrNormal); if (interceptValue > 0) { if (interceptValue < min) { min = interceptValue; theRightIntercept.interceptVal = min; theRightIntercept.intrNormal = intrNormal; theRightIntercept.intrMaterial = textureArray [currentGeom.materialid]; } } } return theRightIntercept; } // Given MaxWidth of a 2D array, and the x and y co-ordinates or indices of an element, returns the equivalent 1D array index. __device__ unsigned long getIndex (int x, int y, int MaxWidth) { return (unsigned long) y*MaxWidth + x ; } // Check for approximate equality. __host__ __device__ bool isApproximate (float valToBeCompared, float valToBeCheckedAgainst) { if ((valToBeCompared >= valToBeCheckedAgainst-0.001) && (valToBeCompared <= valToBeCheckedAgainst+0.001)) return true; return false; } // Given the UV coordinates (UVcoords) and a Texture, this returns the bilinearly interpolated colour at that point. __device__ glm::vec3 getColour (mytexture &Texture, glm::vec2 UVcoords) { unsigned long texelXY, texelXPlusOneY, texelXYPlusOne, texelXPlusOneYPlusOne; float xInterp = (Texture.texelWidth * UVcoords.x) - floor (Texture.texelWidth * UVcoords.x); float yInterp = (Texture.texelHeight * UVcoords.y) - floor (Texture.texelHeight * UVcoords.y); texelXY = getIndex ((int)floor (Texture.texelWidth * UVcoords.x), (int)floor (Texture.texelHeight * UVcoords.y), Texture.texelWidth); texelXPlusOneY = getIndex ((int)ceil (Texture.texelWidth * UVcoords.x), (int)floor (Texture.texelHeight * UVcoords.y), Texture.texelWidth); texelXYPlusOne = getIndex ((int)floor (Texture.texelWidth * UVcoords.x), (int)ceil (Texture.texelHeight * UVcoords.y), Texture.texelWidth); texelXPlusOneYPlusOne = getIndex ((int)ceil (Texture.texelWidth * UVcoords.x), (int)ceil (Texture.texelHeight * UVcoords.y), Texture.texelWidth); glm::vec3 xInterpedColour1, xInterpedColour2, finalColour; xInterpedColour1 = xInterp * Texture.texels [texelXPlusOneY] + (1-xInterp)* Texture.texels [texelXY]; xInterpedColour2 = xInterp * Texture.texels [texelXPlusOneYPlusOne] + (1-xInterp)* Texture.texels [texelXYPlusOne]; finalColour = yInterp * xInterpedColour2 + (1-yInterp) * xInterpedColour1; return finalColour; } // Calclates the direct lighting at a given point, which is calculated from castRay and interceptVal of theRightIntercept. __device__ glm::vec3 calcShade (interceptInfo theRightIntercept, mytexture* textureArray) { glm::vec3 shadedColour = glm::vec3 (0,0,0); if ((theRightIntercept.interceptVal > 0)) { if ((theRightIntercept.intrMaterial.hasReflective >= 1.0) || (theRightIntercept.intrMaterial.hasRefractive >= 1.0)) shadedColour = theRightIntercept.intrMaterial.specularColor; // else if (theRightIntercept.intrMaterial.hasTexture) // shadedColour = getColour (textureArray [theRightIntercept.intrMaterial.textureid], theRightIntercept.UV); else shadedColour = theRightIntercept.intrMaterial.color; } return shadedColour; } //TODO: Done! //Core raytracer kernel __global__ void raytraceRay (float time, cameraData cam, int rayDepth, glm::vec3* colors, staticGeom* geoms, material* textureArray, mytexture * Textures, sceneInfo objectCountInfo, bool *primaryArrayOnDevice, ray *rayPoolOnDevice, int rayPoolLength) { extern __shared__ glm::vec3 arrayPool []; __shared__ glm::vec3 *colourBlock; __shared__ bool *primArrayBlock; __shared__ ray *rayPoolBlock; if ((threadIdx.x == 0) && (threadIdx.y == 0)) { colourBlock = arrayPool; primArrayBlock = (bool *) &colourBlock [blockDim.x * blockDim.y]; rayPoolBlock = (ray *) &primArrayBlock [blockDim.x * blockDim.y]; } __syncthreads (); // Block all threads until the colourBlock, rayPoolBlock // and primArrayBlock pointers have been bound properly. // We have a 1-D array of blocks in the grid. From a thread's perspective, it is a 2-D array. // Ray pool is a massive 1-D array, so we need to compute the index of the element of ray pool // that each thread will handle. int index = (blockIdx.x * blockDim.x) + threadIdx.x + // X-part: straightforward (threadIdx.y * (int)(blockDim.x * ceil ((float)rayPoolLength / (float)(blockDim.x*blockDim.y)))); // Y-part: as below: // No. of blocks in the grid = ceil (rayPoolLength / (blockDim.x*blockDim.y)) // Multiplying that with the no. threads in a block gives the no. of threads in a single row of grid. // Multiplying that with row number (threadIdx.y) and adding the x offset (X-part) gives the index. // threadID gives the index of the thread when the block of threads is flattened out into a 1D array. // We need this because we're using shared memory. int threadID = threadIdx.y*blockDim.x + threadIdx.x; int colourIndex; glm::vec3 shadedColour = glm::vec3 (0); if (index < rayPoolLength) { primArrayBlock [threadID] = primaryArrayOnDevice [index]; rayPoolBlock [threadID] = rayPoolOnDevice [index]; // We compute the index for the colour array separately since it represents a frame // and each index represents a pixel. If we don't, stream compaction would mess things up. colourIndex = rayPoolBlock [threadID].y*cam.resolution.x + rayPoolBlock [threadID].x; colourBlock [threadID] = colors [colourIndex]; // colourBlock [threadID] therefore represents colour computed by ray through the pixel (x,y) interceptInfo theRightIntercept = getIntercept (geoms, objectCountInfo, rayPoolBlock [threadID], textureArray); shadedColour += calcShade (theRightIntercept, Textures); if ((theRightIntercept.intrMaterial.emittance > 0) || (theRightIntercept.interceptVal < 0)) primArrayBlock [threadID] = false; // Ray did not hit anything or it hit light, so kill it. else calculateBSDF (rayPoolBlock [threadID], rayPoolBlock [threadID].origin + rayPoolBlock [threadID].direction * theRightIntercept.interceptVal, theRightIntercept.intrNormal, glm::vec3 (0), AbsorptionAndScatteringProperties (), index*time, theRightIntercept.intrMaterial.color, glm::vec3 (0), theRightIntercept.intrMaterial); if (glm::length (colourBlock [threadID]) > 0) colourBlock [threadID] *= shadedColour; // Add computed shade to shadedColour. else colourBlock [threadID] = shadedColour; } __syncthreads (); // Copy the rayPool, Colour and Primary arrays back to global memory. if (index < rayPoolLength) { primaryArrayOnDevice [index] = primArrayBlock [threadID]; rayPoolOnDevice [index] = rayPoolBlock [threadID]; colors [colourIndex] = colourBlock [threadID]; } } // Kernel to create the initial pool of rays. __global__ void createRayPool (ray *rayPool, bool *primaryArray, int *secondaryArray, cameraData cam, projectionInfo ProjectionParams) { int x = (blockDim.x * blockIdx.x) + threadIdx.x; int y = (blockDim.y * blockIdx.y) + threadIdx.y; int threadID = x + y * cam.resolution.y; if (threadID < cam.resolution.x*cam.resolution.y) { rayPool [threadID] = raycastFromCameraKernel (cam.resolution, 0, x, y, cam.position, cam.view, cam.up, cam.fov, ProjectionParams.centreProj, ProjectionParams.halfVecH, ProjectionParams.halfVecV); rayPool [threadID].x = (blockDim.x * blockIdx.x) + threadIdx.x; rayPool [threadID].y = (blockDim.y * blockIdx.y) + threadIdx.y; primaryArray [threadID] = true; secondaryArray [threadID] = 0; } } __global__ void copyArray (bool *from, int *to, int fromLength) { int globalIndex = blockDim.x*blockIdx.x + threadIdx.x; if (globalIndex < fromLength) to [globalIndex] = (int)from [globalIndex]; } __global__ void copyArray (ray *from, ray *to, int fromLength) { int globalIndex = blockDim.x*blockIdx.x + threadIdx.x; if (globalIndex < fromLength) to [globalIndex] = from [globalIndex]; } __global__ void copyArray (int *from, int *to, int fromLength) { int globalIndex = blockDim.x*blockIdx.x + threadIdx.x; if (globalIndex < fromLength) to [globalIndex] = from [globalIndex]; } // Kernel to do inclusive scan. // Do NOT copy the results back in the same kernel as threads in other blocks might be still accessing the same location in // global memory, causing a read/write conflict. Use copyArray or hipMemcpy. __global__ void inclusiveScan (int *secondaryArray, int *tmpArray, int primArrayLength, int iteration) { unsigned long curIndex = blockDim.x*blockIdx.x + threadIdx.x; long prevIndex = curIndex - floor (pow ((float)2.0, (float)(iteration-1))); if (curIndex < primArrayLength) { if (/*curIndex >= floor (pow ((float)2.0, (float)(iteration-1)))*/prevIndex >= 0) tmpArray [curIndex] = secondaryArray [curIndex] + secondaryArray [prevIndex]; } } // Kernel to shift all elements of Array to the right. // The last element is thrown out in the process and the first element becomes 0. // Can convert an inclusive scan result to an exclusive scan. // Do NOT copy the results back in the same kernel as threads in other blocks might be still accessing the same location in // global memory, causing a read/write conflict and erroneous values. Use copyArray or hipMemcpy. __global__ void shiftRight (int *Array, bool *primaryArray, int arrayLength) { unsigned long curIndex = blockDim.x*blockIdx.x + threadIdx.x; if (curIndex < arrayLength) { if (primaryArray [curIndex]) Array [curIndex] = Array [curIndex] - 1; } } // Kernel to do stream compaction. __global__ void compactStream (ray *rayPoolOnDevice, ray *tempRayPool, bool *primaryArrayOnDevice, int *secondaryArray, int rayPoolLengthOnDevice) { unsigned long curIndex = blockDim.x*blockIdx.x + threadIdx.x; if (curIndex < rayPoolLengthOnDevice) { int secondArrayIndex = secondaryArray [curIndex]; if (primaryArrayOnDevice [curIndex]) tempRayPool [secondArrayIndex] = rayPoolOnDevice [curIndex]; } } // This kernel will accumulate all the colours calculated in an iteration into the actual colour array. __global__ void accumulateIterationColour (glm::vec3* accumulator, glm::vec3* iterationColour, glm::vec2 resolution) { int index = (blockDim.y*blockIdx.y + threadIdx.y) * resolution.x + (blockDim.x*blockIdx.x + threadIdx.x); if (index < resolution.x*resolution.y) accumulator [index] += iterationColour [index]; } // This kernel replaces the colours of the respective pixels of all the rays in the ray pool with noise (0,0,0) __global__ void addNoise (glm::vec3 *localColours, ray *rayPoolOnDevice, int rayPoolLength, glm::vec2 resolution) { // Index calculation, as in raytraceRay int index = (blockIdx.x * blockDim.x) + threadIdx.x + // X-part (threadIdx.y * (int)(blockDim.x * ceil ((float)rayPoolLength / (float)(blockDim.x*blockDim.y)))); // Y-part if (index < rayPoolLength) { // Index re-calculation for colour array, as in raytraceRay ray currentRay = rayPoolOnDevice [index]; int colourIndex = currentRay.y * resolution.x + currentRay.x; localColours [colourIndex] = glm::vec3 (0); } } //TODO: Done! // Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms, mytexture* textures, int numberOfTextures){ int traceDepth = 1; //determines how many bounces the raytracer traces projectionInfo ProjectionParams; float degToRad = 3.1415926 / 180.0; // Set up projection. ProjectionParams.centreProj = renderCam->positions [frame]+renderCam->views [frame]; glm::vec3 eyeToProjCentre = ProjectionParams.centreProj - renderCam->positions [frame]; glm::vec3 A = glm::cross (eyeToProjCentre, renderCam->ups [frame]); glm::vec3 B = glm::cross (A, eyeToProjCentre); float lenEyeToProjCentre = glm::length (eyeToProjCentre); ProjectionParams.halfVecH = glm::normalize (A) * lenEyeToProjCentre * (float)tan ((renderCam->fov.x*degToRad) / 2.0); ProjectionParams.halfVecV = glm::normalize (B) * lenEyeToProjCentre * (float)tan ((renderCam->fov.y*degToRad) / 2.0); // set up crucial magic int tileSize = 8; dim3 threadsPerBlock(tileSize, tileSize); dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize))); //send image to GPU glm::vec3* cudaFinalImage = NULL; hipMalloc((void**)&cudaFinalImage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3)); hipMemcpy( cudaFinalImage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyHostToDevice); // package geometry to be sent to GPU global memory staticGeom* geomList = new staticGeom[numberOfGeoms]; sceneInfo primCounts; // Reorder geometry so that light is the first item in geomList, // followed by cubes and then spheres. Doing so reduces divergence. int count = 1; int lightIndex = 0; bool lightSet = false; for(int i=0; i<numberOfGeoms; i++) { if ((geoms [i].materialid == 8) && !lightSet) { staticGeom newStaticGeom; newStaticGeom.type = geoms[i].type; newStaticGeom.materialid = geoms[i].materialid; newStaticGeom.translation = geoms[i].translations[frame]; newStaticGeom.rotation = geoms[i].rotations[frame]; newStaticGeom.scale = geoms[i].scales[frame]; newStaticGeom.transform = geoms[i].transforms[frame]; newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame]; geomList[0] = newStaticGeom; lightSet = true; lightIndex = i; } else if (geoms [i].type == CUBE) { staticGeom newStaticGeom; newStaticGeom.type = geoms[i].type; newStaticGeom.materialid = geoms[i].materialid; newStaticGeom.translation = geoms[i].translations[frame]; newStaticGeom.rotation = geoms[i].rotations[frame]; newStaticGeom.scale = geoms[i].scales[frame]; newStaticGeom.transform = geoms[i].transforms[frame]; newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame]; geomList[count] = newStaticGeom; count ++; } } if (!lightSet) { geomList [0] = geomList [count-1]; count --; } // Lights may only be cubes. primCounts.nCubes = count; for(int i=0; i<numberOfGeoms; i++) { if (geoms [i].type == SPHERE) { staticGeom newStaticGeom; newStaticGeom.type = geoms[i].type; newStaticGeom.materialid = geoms[i].materialid; newStaticGeom.translation = geoms[i].translations[frame]; newStaticGeom.rotation = geoms[i].rotations[frame]; newStaticGeom.scale = geoms[i].scales[frame]; newStaticGeom.transform = geoms[i].transforms[frame]; newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame]; geomList[count] = newStaticGeom; count ++; } } primCounts.nSpheres = count - primCounts.nCubes; primCounts.nMeshes = 0; materials [geoms [lightIndex].materialid].color *= materials [geoms [lightIndex].materialid].emittance; // Allocate memory. We'll copy it later (because we're moving objects around for Motion blur). staticGeom* cudageoms = NULL; hipMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom)); hipMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), hipMemcpyHostToDevice); // Copy materials to GPU global memory: material *materialColours = NULL; glm::vec3 *colourArray = NULL; int sizeOfMaterialsArr = numberOfMaterials * (sizeof (material)); hipMalloc((void**)&materialColours, numberOfMaterials*sizeof(material)); checkCUDAError ("Could not create Materials Array!: "); hipMemcpy (materialColours, materials, numberOfMaterials*sizeof(material), hipMemcpyHostToDevice); // Package all the texture data into an array of structures. mytexture *textureList = new mytexture [numberOfTextures]; for (int i=0; i < numberOfTextures; i++) { textureList [i].texelWidth = textures [i].texelWidth; textureList [i].texelHeight = textures [i].texelHeight; // Malloc for texture data (RGB values) and store the pointer to device memory in texels. // So that when this structure is accessed from the device, the pointer reference is valid. int nTexelElements = textureList [i].texelWidth*textureList [i].texelHeight; hipMalloc((void**)&textureList [i].texels, nTexelElements*sizeof(glm::vec3)); checkCUDAError ("Error allocing memory for texture data! "); hipMemcpy (textureList [i].texels, textures [i].texels, nTexelElements*sizeof(glm::vec3), hipMemcpyHostToDevice); } // Send the array of textures to the GPU. mytexture * textureArray = NULL; hipMalloc((void**)&textureArray, numberOfTextures*sizeof(mytexture)); checkCUDAError ("Error allocing memory for texture array! "); hipMemcpy (textureArray, textureList, numberOfTextures*sizeof(mytexture), hipMemcpyHostToDevice); delete [] textureList; glm::vec3 lightPosInBodySpace = glm::vec3 (0, -0.6, 0); //package camera cameraData cam; cam.resolution = renderCam->resolution; cam.position = renderCam->positions[frame]; cam.view = renderCam->views[frame]; cam.up = renderCam->ups[frame]; cam.fov = renderCam->fov; unsigned int nIterations = renderCam->iterations; time_t startTime = time (NULL); std::default_random_engine randomNumGen (hash (startTime)); std::uniform_real_distribution<float> jitter ((float)0, (float)0.142); float movement = 3.0/nIterations; // For motion blur. int nBounces = 6; int oneEighthDivisor = nIterations / 8; // For antialiasing. int errCount = 0; // For each point sampled in the area light, launch the raytraceRay Kernel which will compute the diffuse, specular, ambient // and shadow colours. It will also compute reflected colours for reflective surfaces. for (int i = 0; i < nIterations; i ++) { glm::vec3* cudaimage = NULL; hipMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3)); hipMemset (cudaimage, 0, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3)); float zAdd = jitter (randomNumGen); float xAdd = jitter (randomNumGen); glm::vec3 curLightSamplePos = lightPosInBodySpace; if (!(i%oneEighthDivisor)) // Supersampling at 8x! { cam.position.y += zAdd*0.002; cam.position.x += xAdd*0.002; } if (!((i*4)/(3*nIterations))) { // Motion blur! geomList [primCounts.nCubes].translation += glm::vec3 (movement, 0, 0); glm::mat4 transform = utilityCore::buildTransformationMatrix(geomList [primCounts.nCubes].translation, geomList [primCounts.nCubes].rotation, geomList [primCounts.nCubes].scale); geomList [primCounts.nCubes].transform = utilityCore::glmMat4ToCudaMat4(transform); geomList [primCounts.nCubes].inverseTransform = utilityCore::glmMat4ToCudaMat4(glm::inverse(transform)); } // Now copy the geometry list to the GPU global memory. hipMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), hipMemcpyHostToDevice); // Create Ray Pool. int rayPoolLength = cam.resolution.x * cam.resolution.y; ray *rayPoolOnDevice = NULL; hipMalloc ((void **)&rayPoolOnDevice, rayPoolLength * sizeof (ray)); // Primary Array -> Array holding the true/false value specifying whether the ray is alive (true) or dead (false). bool *primaryArrayOnHost = new bool [rayPoolLength]; memset (primaryArrayOnHost, true, rayPoolLength * sizeof(bool)); bool *primaryArrayOnDevice = NULL; hipMalloc ((void **)&primaryArrayOnDevice, rayPoolLength * sizeof (bool)); // Secondary Array -> Array that will hold the indices of rays that are alive. Used in stream compaction. int *secondaryArrayOnDevice = NULL; hipMalloc ((void **)&secondaryArrayOnDevice, rayPoolLength * sizeof (int)); int *secondaryArrayOnHost = new int [rayPoolLength]; // Launch createRayPool kernel to create the ray pool and populate the primary and secondary arrays. fullBlocksPerGrid = dim3 ((int)ceil(float(cam.resolution.x)/threadsPerBlock.x), (int)ceil(float(cam.resolution.y)/threadsPerBlock.y)); hipLaunchKernelGGL(( createRayPool), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, rayPoolOnDevice, primaryArrayOnDevice, secondaryArrayOnDevice, cam, ProjectionParams); dim3 threadsPerBlock1D (threadsPerBlock.x*threadsPerBlock.y); // Iterate until nBounces: launch kernel to trace each ray bounce. for (int j = 0; j < nBounces; ++j) { // The core raytraceRay kernel launch fullBlocksPerGrid = dim3 ((int)ceil(float(rayPoolLength)/(threadsPerBlock.x*threadsPerBlock.y))); hipLaunchKernelGGL(( raytraceRay), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), threadsPerBlock.x*threadsPerBlock.y*(sizeof(glm::vec3) + sizeof (bool) + sizeof(ray)), 0, (float)j+(i*nBounces), cam, j, cudaimage, cudageoms, materialColours, textureArray, primCounts, primaryArrayOnDevice, rayPoolOnDevice, rayPoolLength); /// ----- CPU/GPU Hybrid Stream Compaction ----- /// // Scan is done on the CPU, the actual compaction happens on the GPU. // ------------------------------------------------------------------ // Copy the primary array from device to host. hipMemcpy (primaryArrayOnHost, primaryArrayOnDevice, rayPoolLength * sizeof (bool), hipMemcpyDeviceToHost); // Exclusive scan. secondaryArrayOnHost [0] = 0; for (int k = 1; k < rayPoolLength; ++ k) secondaryArrayOnHost [k] = secondaryArrayOnHost [k-1] + primaryArrayOnHost [k-1]; // This is because the compactStream kernel should run on the whole, uncompacted array. // We'll set this to rayPoolLength once compactStream has done its job. int compactedRayPoolLength = secondaryArrayOnHost [rayPoolLength-1] + primaryArrayOnHost [rayPoolLength-1]; // Stream compaction. Compact the ray pool into tmpRayPool. ray *tmpRayPool = NULL; hipMalloc ((void **)&tmpRayPool, rayPoolLength * sizeof (ray)); hipMemcpy (secondaryArrayOnDevice, secondaryArrayOnHost, rayPoolLength * sizeof (int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( compactStream), dim3(fullBlocksPerGrid), dim3(threadsPerBlock1D), 0, 0, rayPoolOnDevice, tmpRayPool, primaryArrayOnDevice, secondaryArrayOnDevice, rayPoolLength); // Now set rayPoolLength to the compacted array size, compactedRayPoolLength. rayPoolLength = compactedRayPoolLength; // Copy the ray pool from tmpRayPool back into rayPoolOnDevice. hipLaunchKernelGGL(( copyArray), dim3(fullBlocksPerGrid), dim3(threadsPerBlock1D), 0, 0, tmpRayPool, rayPoolOnDevice, rayPoolLength); hipFree (tmpRayPool); // Set the primary array to all trues because all rays in the ray pool are alive, // now that stream compaction has already happened. hipMemset (primaryArrayOnDevice, true, rayPoolLength * sizeof (bool)); } checkCUDAError ("One or more of the raytrace/stream compaction kernels failed. "); // At this point, since stream compaction has already taken place, // it means that rayPoolOnDevice contains only rays that are still alive. fullBlocksPerGrid = dim3 ((int)ceil(float(rayPoolLength)/(threadsPerBlock.x*threadsPerBlock.y))); hipLaunchKernelGGL(( addNoise), dim3(fullBlocksPerGrid),dim3(threadsPerBlock), 0, 0, cudaimage, rayPoolOnDevice, rayPoolLength, cam.resolution); fullBlocksPerGrid = dim3 ((int)ceil(float(cam.resolution.x)/threadsPerBlock.x), (int)ceil(float(cam.resolution.y)/threadsPerBlock.y)); hipLaunchKernelGGL(( accumulateIterationColour), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, cudaFinalImage, cudaimage, cam.resolution); checkCUDAError("accumulateIterationColour Kernel failed!"); hipFree (rayPoolOnDevice); hipFree (primaryArrayOnDevice); hipFree (secondaryArrayOnDevice); hipFree (cudaimage); rayPoolOnDevice = NULL; primaryArrayOnDevice = NULL; cudaimage = NULL; delete [] primaryArrayOnHost; delete [] secondaryArrayOnHost; std::cout << "\rRendering.. " << ceil ((float)i/(nIterations-1) * 100) << " percent complete."; } // Accumulate all the colours in the cudaFinalImage memory block on the GPU, and divide // by the no. of light samples to get the final colour. hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, renderCam->resolution, cudaFinalImage, nIterations); std::cout.precision (4); std::cout << "\nRendered in " << difftime (time (NULL), startTime) << " seconds. \n\n"; //retrieve image from GPU hipMemcpy( renderCam->image, cudaFinalImage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyDeviceToHost); //free up stuff, or else we'll leak memory like a madman if (cudaFinalImage) hipFree( cudaFinalImage ); if (cudageoms) hipFree( cudageoms ); if (materialColours) { hipFree (materialColours); } if (textureArray) { hipFree (textureArray); } cudaFinalImage = NULL; cudageoms = NULL; materialColours = NULL; // make certain the kernel has completed hipDeviceSynchronize(); delete [] geomList; checkCUDAError("Kernel failed!"); }
de401239e63ef1fcdf83e732c4dee8d34fa138e1.cu
// CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania // Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania // This file includes code from: // Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097 // Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/ // Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com #include <stdio.h> #include <cuda.h> #include <cmath> #include <ctime> #include <random> #include "sceneStructs.h" #include "glm/glm.hpp" #include "utilities.h" #include "raytraceKernel.h" #include "intersections.h" #include "interactions.h" #if CUDA_VERSION >= 5000 #include <helper_math.h> #else #include <cutil_math.h> #endif const glm::vec3 bgColour = glm::vec3 (0.55, 0.25, 0); void checkCUDAError(const char *msg) { cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); std::cin.get (); exit(EXIT_FAILURE); } } //Sets up the projection half vectors. void setupProjection (projectionInfo &ProjectionParams, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov) { //Set up the projection variables: float degToRad = 3.1415926 / 180.0; float radToDeg = 1.0 / degToRad; ProjectionParams.centreProj = eye+view; glm::vec3 eyeToProjCentre = ProjectionParams.centreProj - eye; glm::vec3 A = glm::cross (ProjectionParams.centreProj, up); glm::vec3 B = glm::cross (A, ProjectionParams.centreProj); float lenEyeToProjCentre = glm::length (eyeToProjCentre); ProjectionParams.halfVecH = glm::normalize (A) * lenEyeToProjCentre * (float)tan ((fov.x*degToRad)); ProjectionParams.halfVecV = glm::normalize (B) * lenEyeToProjCentre * (float)tan ((fov.y*degToRad)); } // Reflects the incidentRay around the normal. __host__ __device__ glm::vec3 reflectRay (glm::vec3 incidentRay, glm::vec3 normal) { glm::vec3 reflectedRay = incidentRay - (2.0f*glm::dot (incidentRay, normal))*normal; return reflectedRay; } //LOOK: This function demonstrates how to use thrust for random number generation on the GPU! //Function that generates static. __host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){ int index = x + (y * resolution.x); thrust::default_random_engine rng(hash(index*time)); thrust::uniform_real_distribution<float> u01(0,1); return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng)); } //Function that does the initial raycast from the camera __host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov, glm::vec3 centreProj, glm::vec3 halfVecH, glm::vec3 halfVecV) { ray r; r.origin = eye; r.direction = glm::vec3(0,0,-1); float normDeviceX = (float)x / (resolution.x-1); float normDeviceY = 1 - ((float)y / (resolution.y-1)); glm::vec3 P = centreProj + (2*normDeviceX - 1)*halfVecH + (2*normDeviceY - 1)*halfVecV; r.direction = glm::normalize (P - r.origin); return r; } //Kernel that blacks out a given image buffer __global__ void clearImage(glm::vec2 resolution, glm::vec3* image){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ image[index] = glm::vec3(0,0,0); } } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image, int nLights){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ image [index] /= nLights; glm::vec3 color; color.x = image[index].x*255.0; color.y = image[index].y*255.0; color.z = image[index].z*255.0; if(color.x>255){ color.x = 255; } if(color.y>255){ color.y = 255; } if(color.z>255){ color.z = 255; } // Each thread writes one pixel location in the texture (textel) PBOpos[index].w = 0; PBOpos[index].x = color.x; PBOpos[index].y = color.y; PBOpos[index].z = color.z; } } // Intersects the castRay with all the geometry in the scene (geoms) and returns the intercept information. __device__ interceptInfo getIntercept (staticGeom * geoms, sceneInfo objectCountInfo, ray castRay, material* textureArray) { glm::vec3 intrPoint = glm::vec3 (0, 0, 0); glm::vec3 intrNormal = glm::vec3 (0, 0, 0); glm::vec2 UVcoords = glm::vec2 (0, 0); float interceptValue = -32767; material newMaterial; newMaterial.color = glm::vec3 (0,0,0); newMaterial.specularExponent = 1.0; newMaterial.hasReflective = 0.0; newMaterial.hasRefractive = 0.0; interceptInfo theRightIntercept; // Stores the lowest intercept. theRightIntercept.interceptVal = interceptValue; // Initially, it is empty/invalid theRightIntercept.intrNormal = intrNormal; // Intially, Normal - 0,0,0 theRightIntercept.intrMaterial = newMaterial; float min = 1e6; // Two different loops to intersect ray with cubes and spheres. for (int i = 0; i < objectCountInfo.nCubes; ++i) { staticGeom currentGeom = geoms [i]; interceptValue = boxIntersectionTest(currentGeom, castRay, intrPoint, intrNormal, UVcoords); if (interceptValue > 0) { if (interceptValue < min) { min = interceptValue; theRightIntercept.interceptVal = min; theRightIntercept.intrNormal = intrNormal; theRightIntercept.intrMaterial = textureArray [currentGeom.materialid]; theRightIntercept.UV = UVcoords; } } } for (int i = objectCountInfo.nCubes; i <= (objectCountInfo.nCubes+objectCountInfo.nSpheres); ++i) { staticGeom currentGeom = geoms [i]; interceptValue = sphereIntersectionTest(currentGeom, castRay, intrPoint, intrNormal); if (interceptValue > 0) { if (interceptValue < min) { min = interceptValue; theRightIntercept.interceptVal = min; theRightIntercept.intrNormal = intrNormal; theRightIntercept.intrMaterial = textureArray [currentGeom.materialid]; } } } return theRightIntercept; } // Given MaxWidth of a 2D array, and the x and y co-ordinates or indices of an element, returns the equivalent 1D array index. __device__ unsigned long getIndex (int x, int y, int MaxWidth) { return (unsigned long) y*MaxWidth + x ; } // Check for approximate equality. __host__ __device__ bool isApproximate (float valToBeCompared, float valToBeCheckedAgainst) { if ((valToBeCompared >= valToBeCheckedAgainst-0.001) && (valToBeCompared <= valToBeCheckedAgainst+0.001)) return true; return false; } // Given the UV coordinates (UVcoords) and a Texture, this returns the bilinearly interpolated colour at that point. __device__ glm::vec3 getColour (mytexture &Texture, glm::vec2 UVcoords) { unsigned long texelXY, texelXPlusOneY, texelXYPlusOne, texelXPlusOneYPlusOne; float xInterp = (Texture.texelWidth * UVcoords.x) - floor (Texture.texelWidth * UVcoords.x); float yInterp = (Texture.texelHeight * UVcoords.y) - floor (Texture.texelHeight * UVcoords.y); texelXY = getIndex ((int)floor (Texture.texelWidth * UVcoords.x), (int)floor (Texture.texelHeight * UVcoords.y), Texture.texelWidth); texelXPlusOneY = getIndex ((int)ceil (Texture.texelWidth * UVcoords.x), (int)floor (Texture.texelHeight * UVcoords.y), Texture.texelWidth); texelXYPlusOne = getIndex ((int)floor (Texture.texelWidth * UVcoords.x), (int)ceil (Texture.texelHeight * UVcoords.y), Texture.texelWidth); texelXPlusOneYPlusOne = getIndex ((int)ceil (Texture.texelWidth * UVcoords.x), (int)ceil (Texture.texelHeight * UVcoords.y), Texture.texelWidth); glm::vec3 xInterpedColour1, xInterpedColour2, finalColour; xInterpedColour1 = xInterp * Texture.texels [texelXPlusOneY] + (1-xInterp)* Texture.texels [texelXY]; xInterpedColour2 = xInterp * Texture.texels [texelXPlusOneYPlusOne] + (1-xInterp)* Texture.texels [texelXYPlusOne]; finalColour = yInterp * xInterpedColour2 + (1-yInterp) * xInterpedColour1; return finalColour; } // Calclates the direct lighting at a given point, which is calculated from castRay and interceptVal of theRightIntercept. __device__ glm::vec3 calcShade (interceptInfo theRightIntercept, mytexture* textureArray) { glm::vec3 shadedColour = glm::vec3 (0,0,0); if ((theRightIntercept.interceptVal > 0)) { if ((theRightIntercept.intrMaterial.hasReflective >= 1.0) || (theRightIntercept.intrMaterial.hasRefractive >= 1.0)) shadedColour = theRightIntercept.intrMaterial.specularColor; // else if (theRightIntercept.intrMaterial.hasTexture) // shadedColour = getColour (textureArray [theRightIntercept.intrMaterial.textureid], theRightIntercept.UV); else shadedColour = theRightIntercept.intrMaterial.color; } return shadedColour; } //TODO: Done! //Core raytracer kernel __global__ void raytraceRay (float time, cameraData cam, int rayDepth, glm::vec3* colors, staticGeom* geoms, material* textureArray, mytexture * Textures, sceneInfo objectCountInfo, bool *primaryArrayOnDevice, ray *rayPoolOnDevice, int rayPoolLength) { extern __shared__ glm::vec3 arrayPool []; __shared__ glm::vec3 *colourBlock; __shared__ bool *primArrayBlock; __shared__ ray *rayPoolBlock; if ((threadIdx.x == 0) && (threadIdx.y == 0)) { colourBlock = arrayPool; primArrayBlock = (bool *) &colourBlock [blockDim.x * blockDim.y]; rayPoolBlock = (ray *) &primArrayBlock [blockDim.x * blockDim.y]; } __syncthreads (); // Block all threads until the colourBlock, rayPoolBlock // and primArrayBlock pointers have been bound properly. // We have a 1-D array of blocks in the grid. From a thread's perspective, it is a 2-D array. // Ray pool is a massive 1-D array, so we need to compute the index of the element of ray pool // that each thread will handle. int index = (blockIdx.x * blockDim.x) + threadIdx.x + // X-part: straightforward (threadIdx.y * (int)(blockDim.x * ceil ((float)rayPoolLength / (float)(blockDim.x*blockDim.y)))); // Y-part: as below: // No. of blocks in the grid = ceil (rayPoolLength / (blockDim.x*blockDim.y)) // Multiplying that with the no. threads in a block gives the no. of threads in a single row of grid. // Multiplying that with row number (threadIdx.y) and adding the x offset (X-part) gives the index. // threadID gives the index of the thread when the block of threads is flattened out into a 1D array. // We need this because we're using shared memory. int threadID = threadIdx.y*blockDim.x + threadIdx.x; int colourIndex; glm::vec3 shadedColour = glm::vec3 (0); if (index < rayPoolLength) { primArrayBlock [threadID] = primaryArrayOnDevice [index]; rayPoolBlock [threadID] = rayPoolOnDevice [index]; // We compute the index for the colour array separately since it represents a frame // and each index represents a pixel. If we don't, stream compaction would mess things up. colourIndex = rayPoolBlock [threadID].y*cam.resolution.x + rayPoolBlock [threadID].x; colourBlock [threadID] = colors [colourIndex]; // colourBlock [threadID] therefore represents colour computed by ray through the pixel (x,y) interceptInfo theRightIntercept = getIntercept (geoms, objectCountInfo, rayPoolBlock [threadID], textureArray); shadedColour += calcShade (theRightIntercept, Textures); if ((theRightIntercept.intrMaterial.emittance > 0) || (theRightIntercept.interceptVal < 0)) primArrayBlock [threadID] = false; // Ray did not hit anything or it hit light, so kill it. else calculateBSDF (rayPoolBlock [threadID], rayPoolBlock [threadID].origin + rayPoolBlock [threadID].direction * theRightIntercept.interceptVal, theRightIntercept.intrNormal, glm::vec3 (0), AbsorptionAndScatteringProperties (), index*time, theRightIntercept.intrMaterial.color, glm::vec3 (0), theRightIntercept.intrMaterial); if (glm::length (colourBlock [threadID]) > 0) colourBlock [threadID] *= shadedColour; // Add computed shade to shadedColour. else colourBlock [threadID] = shadedColour; } __syncthreads (); // Copy the rayPool, Colour and Primary arrays back to global memory. if (index < rayPoolLength) { primaryArrayOnDevice [index] = primArrayBlock [threadID]; rayPoolOnDevice [index] = rayPoolBlock [threadID]; colors [colourIndex] = colourBlock [threadID]; } } // Kernel to create the initial pool of rays. __global__ void createRayPool (ray *rayPool, bool *primaryArray, int *secondaryArray, cameraData cam, projectionInfo ProjectionParams) { int x = (blockDim.x * blockIdx.x) + threadIdx.x; int y = (blockDim.y * blockIdx.y) + threadIdx.y; int threadID = x + y * cam.resolution.y; if (threadID < cam.resolution.x*cam.resolution.y) { rayPool [threadID] = raycastFromCameraKernel (cam.resolution, 0, x, y, cam.position, cam.view, cam.up, cam.fov, ProjectionParams.centreProj, ProjectionParams.halfVecH, ProjectionParams.halfVecV); rayPool [threadID].x = (blockDim.x * blockIdx.x) + threadIdx.x; rayPool [threadID].y = (blockDim.y * blockIdx.y) + threadIdx.y; primaryArray [threadID] = true; secondaryArray [threadID] = 0; } } __global__ void copyArray (bool *from, int *to, int fromLength) { int globalIndex = blockDim.x*blockIdx.x + threadIdx.x; if (globalIndex < fromLength) to [globalIndex] = (int)from [globalIndex]; } __global__ void copyArray (ray *from, ray *to, int fromLength) { int globalIndex = blockDim.x*blockIdx.x + threadIdx.x; if (globalIndex < fromLength) to [globalIndex] = from [globalIndex]; } __global__ void copyArray (int *from, int *to, int fromLength) { int globalIndex = blockDim.x*blockIdx.x + threadIdx.x; if (globalIndex < fromLength) to [globalIndex] = from [globalIndex]; } // Kernel to do inclusive scan. // Do NOT copy the results back in the same kernel as threads in other blocks might be still accessing the same location in // global memory, causing a read/write conflict. Use copyArray or cudaMemcpy. __global__ void inclusiveScan (int *secondaryArray, int *tmpArray, int primArrayLength, int iteration) { unsigned long curIndex = blockDim.x*blockIdx.x + threadIdx.x; long prevIndex = curIndex - floor (pow ((float)2.0, (float)(iteration-1))); if (curIndex < primArrayLength) { if (/*curIndex >= floor (pow ((float)2.0, (float)(iteration-1)))*/prevIndex >= 0) tmpArray [curIndex] = secondaryArray [curIndex] + secondaryArray [prevIndex]; } } // Kernel to shift all elements of Array to the right. // The last element is thrown out in the process and the first element becomes 0. // Can convert an inclusive scan result to an exclusive scan. // Do NOT copy the results back in the same kernel as threads in other blocks might be still accessing the same location in // global memory, causing a read/write conflict and erroneous values. Use copyArray or cudaMemcpy. __global__ void shiftRight (int *Array, bool *primaryArray, int arrayLength) { unsigned long curIndex = blockDim.x*blockIdx.x + threadIdx.x; if (curIndex < arrayLength) { if (primaryArray [curIndex]) Array [curIndex] = Array [curIndex] - 1; } } // Kernel to do stream compaction. __global__ void compactStream (ray *rayPoolOnDevice, ray *tempRayPool, bool *primaryArrayOnDevice, int *secondaryArray, int rayPoolLengthOnDevice) { unsigned long curIndex = blockDim.x*blockIdx.x + threadIdx.x; if (curIndex < rayPoolLengthOnDevice) { int secondArrayIndex = secondaryArray [curIndex]; if (primaryArrayOnDevice [curIndex]) tempRayPool [secondArrayIndex] = rayPoolOnDevice [curIndex]; } } // This kernel will accumulate all the colours calculated in an iteration into the actual colour array. __global__ void accumulateIterationColour (glm::vec3* accumulator, glm::vec3* iterationColour, glm::vec2 resolution) { int index = (blockDim.y*blockIdx.y + threadIdx.y) * resolution.x + (blockDim.x*blockIdx.x + threadIdx.x); if (index < resolution.x*resolution.y) accumulator [index] += iterationColour [index]; } // This kernel replaces the colours of the respective pixels of all the rays in the ray pool with noise (0,0,0) __global__ void addNoise (glm::vec3 *localColours, ray *rayPoolOnDevice, int rayPoolLength, glm::vec2 resolution) { // Index calculation, as in raytraceRay int index = (blockIdx.x * blockDim.x) + threadIdx.x + // X-part (threadIdx.y * (int)(blockDim.x * ceil ((float)rayPoolLength / (float)(blockDim.x*blockDim.y)))); // Y-part if (index < rayPoolLength) { // Index re-calculation for colour array, as in raytraceRay ray currentRay = rayPoolOnDevice [index]; int colourIndex = currentRay.y * resolution.x + currentRay.x; localColours [colourIndex] = glm::vec3 (0); } } //TODO: Done! // Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms, mytexture* textures, int numberOfTextures){ int traceDepth = 1; //determines how many bounces the raytracer traces projectionInfo ProjectionParams; float degToRad = 3.1415926 / 180.0; // Set up projection. ProjectionParams.centreProj = renderCam->positions [frame]+renderCam->views [frame]; glm::vec3 eyeToProjCentre = ProjectionParams.centreProj - renderCam->positions [frame]; glm::vec3 A = glm::cross (eyeToProjCentre, renderCam->ups [frame]); glm::vec3 B = glm::cross (A, eyeToProjCentre); float lenEyeToProjCentre = glm::length (eyeToProjCentre); ProjectionParams.halfVecH = glm::normalize (A) * lenEyeToProjCentre * (float)tan ((renderCam->fov.x*degToRad) / 2.0); ProjectionParams.halfVecV = glm::normalize (B) * lenEyeToProjCentre * (float)tan ((renderCam->fov.y*degToRad) / 2.0); // set up crucial magic int tileSize = 8; dim3 threadsPerBlock(tileSize, tileSize); dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize))); //send image to GPU glm::vec3* cudaFinalImage = NULL; cudaMalloc((void**)&cudaFinalImage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3)); cudaMemcpy( cudaFinalImage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyHostToDevice); // package geometry to be sent to GPU global memory staticGeom* geomList = new staticGeom[numberOfGeoms]; sceneInfo primCounts; // Reorder geometry so that light is the first item in geomList, // followed by cubes and then spheres. Doing so reduces divergence. int count = 1; int lightIndex = 0; bool lightSet = false; for(int i=0; i<numberOfGeoms; i++) { if ((geoms [i].materialid == 8) && !lightSet) { staticGeom newStaticGeom; newStaticGeom.type = geoms[i].type; newStaticGeom.materialid = geoms[i].materialid; newStaticGeom.translation = geoms[i].translations[frame]; newStaticGeom.rotation = geoms[i].rotations[frame]; newStaticGeom.scale = geoms[i].scales[frame]; newStaticGeom.transform = geoms[i].transforms[frame]; newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame]; geomList[0] = newStaticGeom; lightSet = true; lightIndex = i; } else if (geoms [i].type == CUBE) { staticGeom newStaticGeom; newStaticGeom.type = geoms[i].type; newStaticGeom.materialid = geoms[i].materialid; newStaticGeom.translation = geoms[i].translations[frame]; newStaticGeom.rotation = geoms[i].rotations[frame]; newStaticGeom.scale = geoms[i].scales[frame]; newStaticGeom.transform = geoms[i].transforms[frame]; newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame]; geomList[count] = newStaticGeom; count ++; } } if (!lightSet) { geomList [0] = geomList [count-1]; count --; } // Lights may only be cubes. primCounts.nCubes = count; for(int i=0; i<numberOfGeoms; i++) { if (geoms [i].type == SPHERE) { staticGeom newStaticGeom; newStaticGeom.type = geoms[i].type; newStaticGeom.materialid = geoms[i].materialid; newStaticGeom.translation = geoms[i].translations[frame]; newStaticGeom.rotation = geoms[i].rotations[frame]; newStaticGeom.scale = geoms[i].scales[frame]; newStaticGeom.transform = geoms[i].transforms[frame]; newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame]; geomList[count] = newStaticGeom; count ++; } } primCounts.nSpheres = count - primCounts.nCubes; primCounts.nMeshes = 0; materials [geoms [lightIndex].materialid].color *= materials [geoms [lightIndex].materialid].emittance; // Allocate memory. We'll copy it later (because we're moving objects around for Motion blur). staticGeom* cudageoms = NULL; cudaMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom)); cudaMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), cudaMemcpyHostToDevice); // Copy materials to GPU global memory: material *materialColours = NULL; glm::vec3 *colourArray = NULL; int sizeOfMaterialsArr = numberOfMaterials * (sizeof (material)); cudaMalloc((void**)&materialColours, numberOfMaterials*sizeof(material)); checkCUDAError ("Could not create Materials Array!: "); cudaMemcpy (materialColours, materials, numberOfMaterials*sizeof(material), cudaMemcpyHostToDevice); // Package all the texture data into an array of structures. mytexture *textureList = new mytexture [numberOfTextures]; for (int i=0; i < numberOfTextures; i++) { textureList [i].texelWidth = textures [i].texelWidth; textureList [i].texelHeight = textures [i].texelHeight; // Malloc for texture data (RGB values) and store the pointer to device memory in texels. // So that when this structure is accessed from the device, the pointer reference is valid. int nTexelElements = textureList [i].texelWidth*textureList [i].texelHeight; cudaMalloc((void**)&textureList [i].texels, nTexelElements*sizeof(glm::vec3)); checkCUDAError ("Error allocing memory for texture data! "); cudaMemcpy (textureList [i].texels, textures [i].texels, nTexelElements*sizeof(glm::vec3), cudaMemcpyHostToDevice); } // Send the array of textures to the GPU. mytexture * textureArray = NULL; cudaMalloc((void**)&textureArray, numberOfTextures*sizeof(mytexture)); checkCUDAError ("Error allocing memory for texture array! "); cudaMemcpy (textureArray, textureList, numberOfTextures*sizeof(mytexture), cudaMemcpyHostToDevice); delete [] textureList; glm::vec3 lightPosInBodySpace = glm::vec3 (0, -0.6, 0); //package camera cameraData cam; cam.resolution = renderCam->resolution; cam.position = renderCam->positions[frame]; cam.view = renderCam->views[frame]; cam.up = renderCam->ups[frame]; cam.fov = renderCam->fov; unsigned int nIterations = renderCam->iterations; time_t startTime = time (NULL); std::default_random_engine randomNumGen (hash (startTime)); std::uniform_real_distribution<float> jitter ((float)0, (float)0.142); float movement = 3.0/nIterations; // For motion blur. int nBounces = 6; int oneEighthDivisor = nIterations / 8; // For antialiasing. int errCount = 0; // For each point sampled in the area light, launch the raytraceRay Kernel which will compute the diffuse, specular, ambient // and shadow colours. It will also compute reflected colours for reflective surfaces. for (int i = 0; i < nIterations; i ++) { glm::vec3* cudaimage = NULL; cudaMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3)); cudaMemset (cudaimage, 0, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3)); float zAdd = jitter (randomNumGen); float xAdd = jitter (randomNumGen); glm::vec3 curLightSamplePos = lightPosInBodySpace; if (!(i%oneEighthDivisor)) // Supersampling at 8x! { cam.position.y += zAdd*0.002; cam.position.x += xAdd*0.002; } if (!((i*4)/(3*nIterations))) { // Motion blur! geomList [primCounts.nCubes].translation += glm::vec3 (movement, 0, 0); glm::mat4 transform = utilityCore::buildTransformationMatrix(geomList [primCounts.nCubes].translation, geomList [primCounts.nCubes].rotation, geomList [primCounts.nCubes].scale); geomList [primCounts.nCubes].transform = utilityCore::glmMat4ToCudaMat4(transform); geomList [primCounts.nCubes].inverseTransform = utilityCore::glmMat4ToCudaMat4(glm::inverse(transform)); } // Now copy the geometry list to the GPU global memory. cudaMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), cudaMemcpyHostToDevice); // Create Ray Pool. int rayPoolLength = cam.resolution.x * cam.resolution.y; ray *rayPoolOnDevice = NULL; cudaMalloc ((void **)&rayPoolOnDevice, rayPoolLength * sizeof (ray)); // Primary Array -> Array holding the true/false value specifying whether the ray is alive (true) or dead (false). bool *primaryArrayOnHost = new bool [rayPoolLength]; memset (primaryArrayOnHost, true, rayPoolLength * sizeof(bool)); bool *primaryArrayOnDevice = NULL; cudaMalloc ((void **)&primaryArrayOnDevice, rayPoolLength * sizeof (bool)); // Secondary Array -> Array that will hold the indices of rays that are alive. Used in stream compaction. int *secondaryArrayOnDevice = NULL; cudaMalloc ((void **)&secondaryArrayOnDevice, rayPoolLength * sizeof (int)); int *secondaryArrayOnHost = new int [rayPoolLength]; // Launch createRayPool kernel to create the ray pool and populate the primary and secondary arrays. fullBlocksPerGrid = dim3 ((int)ceil(float(cam.resolution.x)/threadsPerBlock.x), (int)ceil(float(cam.resolution.y)/threadsPerBlock.y)); createRayPool<<<fullBlocksPerGrid, threadsPerBlock>>> (rayPoolOnDevice, primaryArrayOnDevice, secondaryArrayOnDevice, cam, ProjectionParams); dim3 threadsPerBlock1D (threadsPerBlock.x*threadsPerBlock.y); // Iterate until nBounces: launch kernel to trace each ray bounce. for (int j = 0; j < nBounces; ++j) { // The core raytraceRay kernel launch fullBlocksPerGrid = dim3 ((int)ceil(float(rayPoolLength)/(threadsPerBlock.x*threadsPerBlock.y))); raytraceRay<<<fullBlocksPerGrid, threadsPerBlock, threadsPerBlock.x*threadsPerBlock.y*(sizeof(glm::vec3) + sizeof (bool) + sizeof(ray))>>> ((float)j+(i*nBounces), cam, j, cudaimage, cudageoms, materialColours, textureArray, primCounts, primaryArrayOnDevice, rayPoolOnDevice, rayPoolLength); /// ----- CPU/GPU Hybrid Stream Compaction ----- /// // Scan is done on the CPU, the actual compaction happens on the GPU. // ------------------------------------------------------------------ // Copy the primary array from device to host. cudaMemcpy (primaryArrayOnHost, primaryArrayOnDevice, rayPoolLength * sizeof (bool), cudaMemcpyDeviceToHost); // Exclusive scan. secondaryArrayOnHost [0] = 0; for (int k = 1; k < rayPoolLength; ++ k) secondaryArrayOnHost [k] = secondaryArrayOnHost [k-1] + primaryArrayOnHost [k-1]; // This is because the compactStream kernel should run on the whole, uncompacted array. // We'll set this to rayPoolLength once compactStream has done its job. int compactedRayPoolLength = secondaryArrayOnHost [rayPoolLength-1] + primaryArrayOnHost [rayPoolLength-1]; // Stream compaction. Compact the ray pool into tmpRayPool. ray *tmpRayPool = NULL; cudaMalloc ((void **)&tmpRayPool, rayPoolLength * sizeof (ray)); cudaMemcpy (secondaryArrayOnDevice, secondaryArrayOnHost, rayPoolLength * sizeof (int), cudaMemcpyHostToDevice); compactStream<<<fullBlocksPerGrid, threadsPerBlock1D>>> (rayPoolOnDevice, tmpRayPool, primaryArrayOnDevice, secondaryArrayOnDevice, rayPoolLength); // Now set rayPoolLength to the compacted array size, compactedRayPoolLength. rayPoolLength = compactedRayPoolLength; // Copy the ray pool from tmpRayPool back into rayPoolOnDevice. copyArray<<<fullBlocksPerGrid, threadsPerBlock1D>>> (tmpRayPool, rayPoolOnDevice, rayPoolLength); cudaFree (tmpRayPool); // Set the primary array to all trues because all rays in the ray pool are alive, // now that stream compaction has already happened. cudaMemset (primaryArrayOnDevice, true, rayPoolLength * sizeof (bool)); } checkCUDAError ("One or more of the raytrace/stream compaction kernels failed. "); // At this point, since stream compaction has already taken place, // it means that rayPoolOnDevice contains only rays that are still alive. fullBlocksPerGrid = dim3 ((int)ceil(float(rayPoolLength)/(threadsPerBlock.x*threadsPerBlock.y))); addNoise<<<fullBlocksPerGrid,threadsPerBlock>>>(cudaimage, rayPoolOnDevice, rayPoolLength, cam.resolution); fullBlocksPerGrid = dim3 ((int)ceil(float(cam.resolution.x)/threadsPerBlock.x), (int)ceil(float(cam.resolution.y)/threadsPerBlock.y)); accumulateIterationColour<<<fullBlocksPerGrid, threadsPerBlock>>>(cudaFinalImage, cudaimage, cam.resolution); checkCUDAError("accumulateIterationColour Kernel failed!"); cudaFree (rayPoolOnDevice); cudaFree (primaryArrayOnDevice); cudaFree (secondaryArrayOnDevice); cudaFree (cudaimage); rayPoolOnDevice = NULL; primaryArrayOnDevice = NULL; cudaimage = NULL; delete [] primaryArrayOnHost; delete [] secondaryArrayOnHost; std::cout << "\rRendering.. " << ceil ((float)i/(nIterations-1) * 100) << " percent complete."; } // Accumulate all the colours in the cudaFinalImage memory block on the GPU, and divide // by the no. of light samples to get the final colour. sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, renderCam->resolution, cudaFinalImage, nIterations); std::cout.precision (4); std::cout << "\nRendered in " << difftime (time (NULL), startTime) << " seconds. \n\n"; //retrieve image from GPU cudaMemcpy( renderCam->image, cudaFinalImage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyDeviceToHost); //free up stuff, or else we'll leak memory like a madman if (cudaFinalImage) cudaFree( cudaFinalImage ); if (cudageoms) cudaFree( cudageoms ); if (materialColours) { cudaFree (materialColours); } if (textureArray) { cudaFree (textureArray); } cudaFinalImage = NULL; cudageoms = NULL; materialColours = NULL; // make certain the kernel has completed cudaThreadSynchronize(); delete [] geomList; checkCUDAError("Kernel failed!"); }
58111d66fd137a9f9dd26a7ecf436aee2d5ae564.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright 2019 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include <vector> #include "modules/perception/inference/tensorrt/plugins/leakyReLU_plugin.h" namespace apollo { namespace perception { namespace inference { template <typename Dtype> __global__ void ReLU(const int nthreads, const Dtype *in_data, const float negative_slope, Dtype *out_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { out_data[index] = in_data[index]; if (out_data[index] < 0.0) { out_data[index] *= negative_slope; } } } int ReLUPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, hipStream_t stream) { const int thread_size = 512; const int block_size = (input_dims_.d[0] * input_dims_.d[1] * input_dims_.d[2] * batchSize + thread_size - 1) / thread_size; const int nthreads = input_dims_.d[0] * input_dims_.d[1] * input_dims_.d[2] * batchSize; if (block_size <= 0) return 1; hipLaunchKernelGGL(( ReLU), dim3(block_size), dim3(thread_size), 0, stream, nthreads, (const float *)(inputs[0]), negative_slope_, reinterpret_cast<float *>(outputs[0])); return 1; } } // namespace inference } // namespace perception } // namespace apollo
58111d66fd137a9f9dd26a7ecf436aee2d5ae564.cu
/****************************************************************************** * Copyright 2019 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include <vector> #include "modules/perception/inference/tensorrt/plugins/leakyReLU_plugin.h" namespace apollo { namespace perception { namespace inference { template <typename Dtype> __global__ void ReLU(const int nthreads, const Dtype *in_data, const float negative_slope, Dtype *out_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { out_data[index] = in_data[index]; if (out_data[index] < 0.0) { out_data[index] *= negative_slope; } } } int ReLUPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, cudaStream_t stream) { const int thread_size = 512; const int block_size = (input_dims_.d[0] * input_dims_.d[1] * input_dims_.d[2] * batchSize + thread_size - 1) / thread_size; const int nthreads = input_dims_.d[0] * input_dims_.d[1] * input_dims_.d[2] * batchSize; if (block_size <= 0) return 1; ReLU<<<block_size, thread_size, 0, stream>>>( nthreads, (const float *)(inputs[0]), negative_slope_, reinterpret_cast<float *>(outputs[0])); return 1; } } // namespace inference } // namespace perception } // namespace apollo
0aac155a298455eb45d84c25ca233a7fc15c8aea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "GPUUtils.hpp" // Huge props to wykvictor for this solution https://github.com/opencv/opencv/issues/6295#issuecomment-246647886 __global__ void inRangeCudaKernel(const cv::cuda::PtrStepSz<uchar3> src, cv::cuda::PtrStepSzb dst, int lbc0, int ubc0, int lbc1, int ubc1, int lbc2, int ubc2) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x >= src.cols || y >= src.rows) return; uchar3 v = src(y, x); if(v.x >= lbc0 && v.x <= ubc0 && v.y >= lbc1 && v.y <= ubc1 && v.z >= lbc2 && v.z <= ubc2) dst(y, x) = 255; else dst(y, x) = 0; } void inRangeGPU(cv::cuda::GpuMat &src, cv::Scalar &lowerb, cv::Scalar &upperb, cv::cuda::GpuMat &dst) { const int m = 32; int numRows = src.rows, numCols = src.cols; if(numRows == 0 || numCols == 0) return; // Attention! Cols Vs. Rows are reversed const dim3 gridSize(ceil((float)numCols / m), ceil((float)numRows / m), 1); const dim3 blockSize(m, m, 1); hipLaunchKernelGGL(( inRangeCudaKernel), dim3(gridSize), dim3(blockSize), 0, 0, src, dst, lowerb[0], upperb[0], lowerb[1], upperb[1], lowerb[2], upperb[2]); }
0aac155a298455eb45d84c25ca233a7fc15c8aea.cu
#include "GPUUtils.hpp" // Huge props to wykvictor for this solution https://github.com/opencv/opencv/issues/6295#issuecomment-246647886 __global__ void inRangeCudaKernel(const cv::cuda::PtrStepSz<uchar3> src, cv::cuda::PtrStepSzb dst, int lbc0, int ubc0, int lbc1, int ubc1, int lbc2, int ubc2) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x >= src.cols || y >= src.rows) return; uchar3 v = src(y, x); if(v.x >= lbc0 && v.x <= ubc0 && v.y >= lbc1 && v.y <= ubc1 && v.z >= lbc2 && v.z <= ubc2) dst(y, x) = 255; else dst(y, x) = 0; } void inRangeGPU(cv::cuda::GpuMat &src, cv::Scalar &lowerb, cv::Scalar &upperb, cv::cuda::GpuMat &dst) { const int m = 32; int numRows = src.rows, numCols = src.cols; if(numRows == 0 || numCols == 0) return; // Attention! Cols Vs. Rows are reversed const dim3 gridSize(ceil((float)numCols / m), ceil((float)numRows / m), 1); const dim3 blockSize(m, m, 1); inRangeCudaKernel<<<gridSize, blockSize>>>(src, dst, lowerb[0], upperb[0], lowerb[1], upperb[1], lowerb[2], upperb[2]); }
664a806d90cd315567d022be4cfc620c3a7d4fa1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <hip/hip_runtime.h> // kernels are C++ functions defined with CUDA // They will be called with << >>() // hipGetDeviceCount (int* count) // Returns the number of compute-capable devices // hipGetDeviceProperties (hipDeviceProp_t* prop, int device) // Returns information about the compute-device. // Program that gives the information of the GPUs on the boards int main() { int devices; hipDeviceProp_t prop; try { hipGetDeviceCount(&devices); // Get information of all the Nvidia devices on the computer for(int device = 0; device < devices; device++) { hipGetDeviceProperties(&prop, device); // using std::cout as a display function // using std::endl as a end of line character std::cout << "Device Number : " << device << std::endl; std::cout << "Device name : " << prop.name << std::endl; std::cout << "Memory Clock Rate (KHz) : " << prop.memoryClockRate << std::endl; std::cout << "Global Memory size (bits) : " << prop.memoryBusWidth << std::endl; // get the warp size, i.e. the number of threads in a warp std::cout << "Warp Size : " << prop.warpSize << std::endl; std::cout << "Peak Memory Bandwidth (GB/s) : " << 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6 << std::endl; } } catch (const hipError_t & e) { std::cerr << e; } return 0; } /* Device Number : 0 Device name : GeForce RTX 2060 SUPER Memory Clock Rate (KHz) : 7001000 Global Memory size (bits) : 256 Warp Size : 32 Peak Memory Bandwidth (GB/s) : 448.064 */
664a806d90cd315567d022be4cfc620c3a7d4fa1.cu
#include <stdio.h> #include <iostream> #include <cuda_runtime.h> // kernels are C++ functions defined with CUDA // They will be called with << >>() // cudaGetDeviceCount (int* count) // Returns the number of compute-capable devices // cudaGetDeviceProperties (cudaDeviceProp* prop, int device) // Returns information about the compute-device. // Program that gives the information of the GPUs on the boards int main() { int devices; cudaDeviceProp prop; try { cudaGetDeviceCount(&devices); // Get information of all the Nvidia devices on the computer for(int device = 0; device < devices; device++) { cudaGetDeviceProperties(&prop, device); // using std::cout as a display function // using std::endl as a end of line character std::cout << "Device Number : " << device << std::endl; std::cout << "Device name : " << prop.name << std::endl; std::cout << "Memory Clock Rate (KHz) : " << prop.memoryClockRate << std::endl; std::cout << "Global Memory size (bits) : " << prop.memoryBusWidth << std::endl; // get the warp size, i.e. the number of threads in a warp std::cout << "Warp Size : " << prop.warpSize << std::endl; std::cout << "Peak Memory Bandwidth (GB/s) : " << 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6 << std::endl; } } catch (const cudaError_t & e) { std::cerr << e; } return 0; } /* Device Number : 0 Device name : GeForce RTX 2060 SUPER Memory Clock Rate (KHz) : 7001000 Global Memory size (bits) : 256 Warp Size : 32 Peak Memory Bandwidth (GB/s) : 448.064 */
e473a1432a319b557670ff9420c4610b3547f604.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2017 Stanford, NVIDIA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::batch_norm(const Tensor& input, bool relu, const char* name) { assert(input.numDim == 4); //Only support 4D BN for now BatchNorm *bn = new BatchNorm(*this, input, relu, name); layers.push_back(bn); return bn->outputs[0]; } /* locals[0] = scale locals[1] = bias */ BatchNorm::BatchNorm(FFModel& model, const Tensor& _input, bool _relu, const char* name) : Op(model, OP_BATCHNORM, name, _input), relu(_relu) { assert(_input.numDim == 4); numOutputs = 1; outputs[0].numDim = _input.numDim; for (int i = 0; i < outputs[0].numDim; i++) outputs[0].adim[i] = _input.adim[i]; numWeights = 2; weights[0].numDim = 1; weights[0].adim[0] = _input.adim[2]; weights[1].numDim = 1; weights[1].adim[0] = _input.adim[2]; return; #ifdef DEADCODE // Create output tensor int output_w = _input.adim[0]; int output_h = _input.adim[1]; int output_c = _input.adim[2]; int output_n = _input.adim[3]; FieldSpace fs = model.config.field_space; Rect<4> output_rect(Point<4>(0, 0, 0, 0), Point<4>(output_w-1, output_h-1, output_c-1, output_n-1)); IndexSpaceT<4> output_is = runtime->create_index_space(ctx, output_rect); LogicalRegion output_lr = runtime->create_logical_region(ctx, output_is, fs); LogicalRegion output_grad_lr = runtime->create_logical_region(ctx, output_is, fs); int extent_w = (output_w + num_par_w - 1) / num_par_w; int extent_h = (output_h + num_par_h - 1) / num_par_h; int extent_c = output_c / num_par_c; int extent_n = output_n / num_par_n; assert(output_c % num_par_c == 0); assert(output_n % num_par_n == 0); Rect<4> ext(Point<4>(0, 0, 0, 0), Point<4>(extent_w-1, extent_h-1, extent_c-1, extent_n-1)); Transform<4, 4, coord_t> trans; for (int i = 0; i < 4; i++) for (int j = 0; j < 4; j++) trans[i][j] = 0; trans[0][0] = extent_w; trans[1][1] = extent_h; trans[2][2] = extent_c; trans[3][3] = extent_n; IndexPartition output_ip = runtime->create_partition_by_restriction(ctx, output_is, task_is, trans, ext); assert(runtime->is_index_partition_disjoint(ctx, output_ip)); assert(runtime->is_index_partition_complete(ctx, output_ip)); LogicalPartition output_lp = runtime->get_logical_partition(ctx, output_lr, output_ip); LogicalPartition output_grad_lp = runtime->get_logical_partition(ctx, output_grad_lr, output_ip); int bias_nc = num_replica * _input.adim[2]; /*input_channels*/ Rect<1, coord_t> bias_grad_rect(0, bias_nc - 1); Rect<1, coord_t> bias_rect(0, _input.adim[2] - 1); IndexSpaceT<1> bias_is = runtime->create_index_space(ctx, bias_rect); IndexSpaceT<1> bias_grad_is = runtime->create_index_space(ctx, bias_grad_rect); LogicalRegion bias_lr = runtime->create_logical_region(ctx, bias_is, fs); LogicalRegion scale_lr = runtime->create_logical_region(ctx, bias_is, fs); LogicalRegion bias_grad_lr = runtime->create_logical_region(ctx, bias_grad_is, fs); LogicalRegion scale_grad_lr = runtime->create_logical_region(ctx, bias_grad_is, fs); IndexPartition bias_grad_ip = runtime->create_equal_partition(ctx, bias_grad_is, task_is); LogicalPartition bias_grad_lp = runtime->get_logical_partition(ctx, bias_grad_lr, bias_grad_ip); LogicalPartition scale_grad_lp = runtime->get_logical_partition(ctx, scale_grad_lr, bias_grad_ip); Parameter scale_tensor, bias_tensor; scale_tensor.region = scale_lr; scale_tensor.region_grad = scale_grad_lr; scale_tensor.part = LogicalPartition::NO_PART; scale_tensor.part_grad = scale_grad_lp; weights[0] = scale_tensor; bias_tensor.region = bias_lr; bias_tensor.region_grad = bias_grad_lr; bias_tensor.part = LogicalPartition::NO_PART; bias_tensor.part_grad = bias_grad_lp; weights[1] = bias_tensor; numWeights = 2; outputs[0] = _input; outputs[0].region = output_lr; outputs[0].part = output_lp; outputs[0].region_grad = output_grad_lr; outputs[0].part_grad = output_grad_lp; printf("Create bn layer: output(%d %d %d %d)\n", outputs[0].adim[3], outputs[0].adim[2], outputs[0].adim[1], outputs[0].adim[0]); input_lps[0] = _input.part; #endif } void BatchNorm::create_weights(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<4>(model.get_or_create_task_is(4, pcname)); // Create scale and bias Initializer* scale_initializer = new ConstantInitializer(1.0f); Initializer* bias_initializer = new ConstantInitializer(0.0f); const int dims[1] = {outputs[0].adim[2]}; weights[0] = model.create_conv_weight<1>(this, dims, DT_FLOAT, scale_initializer); weights[1] = model.create_conv_weight<1>(this, dims, DT_FLOAT, bias_initializer); } void BatchNorm::create_output_and_partition(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<4>(model.get_or_create_task_is(4, pcname)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<4> part_rect = runtime->get_index_space_domain(ctx, task_is); // Create output tensor int output_w = outputs[0].adim[0]; int output_h = outputs[0].adim[1]; int output_c = outputs[0].adim[2]; int output_n = outputs[0].adim[3]; int num_par_w = part_rect.hi[0] - part_rect.lo[0] + 1; int num_par_h = part_rect.hi[1] - part_rect.lo[1] + 1; int num_par_c = part_rect.hi[2] - part_rect.lo[2] + 1; int num_par_n = part_rect.hi[3] - part_rect.lo[3] + 1; { const int dims[4] = {output_n, output_c, output_h, output_w}; outputs[0] = model.create_tensor<4>(dims, DT_FLOAT, this); outputs[0].owner_op = this; outputs[0].owner_idx = 0; } // Currently assume data parallelism for batch norm assert(num_par_w == 1); assert(num_par_h == 1); assert(num_par_c == 1); // Compute partition bound for input Rect<4> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part.get_index_partition()); if (input_rect == part_rect) { input_lps[0] = inputs[0].part; input_grad_lps[0] = inputs[0].part_grad; } else { model.create_disjoint_partition( inputs[0], (IndexSpaceT<4>)task_is, input_lps[0], input_grad_lps[0]); } } /* regions[0]: input regions[1]: output regions[2](I): scale regions[3](I): bias */ __host__ OpMeta* BatchNorm::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); const BatchNorm* bm = (BatchNorm*) task->args; FFHandler handle = *((const FFHandler*) task->local_args); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_scale( regions[2], task->regions[2], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); BatchNormMeta* m = new BatchNormMeta(handle); bm->init_meta(m, acc_input.rect, acc_output.rect, acc_scale.rect, acc_bias.rect); m->numChannels = acc_output.rect.hi[2] - acc_output.rect.lo[2] + 1; m->profiling = bm->profiling; return m; } /* regions[0](O): scale, initilized to ones regions[1](O): bias, initilized to zeros */ __host__ void BatchNorm::init_para_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); //const BatchNorm* bm = (BatchNorm*) task->args; const AccessorWO<float, 1> acc_scale(regions[0], FID_DATA); const AccessorWO<float, 1> acc_bias(regions[1], FID_DATA); Rect<1> rect_scale, rect_bias; rect_scale = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); rect_bias = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); assert(acc_scale.accessor.is_dense_arbitrary(rect_scale)); assert(acc_bias.accessor.is_dense_arbitrary(rect_bias)); float *scale_ptr = acc_scale.ptr(rect_scale.lo); float *bias_ptr = acc_bias.ptr(rect_bias.lo); // init kernel and bias #ifdef PARAMETER_ALL_ONES hipLaunchKernelGGL(( ones_kernel), dim3(GET_BLOCKS(rect_scale.volume())), dim3(CUDA_NUM_THREADS), 0, 0, scale_ptr, rect_scale.volume()); hipLaunchKernelGGL(( ones_kernel), dim3(GET_BLOCKS(rect_bias.volume())), dim3(CUDA_NUM_THREADS), 0, 0, bias_ptr, rect_bias.volume()); #else //hipStream_t stream; //checkCUDA(hipStreamCreate(&stream)); //hiprandGenerator_t genGPU; //hiprandCreateGenerator(&genGPU, HIPRAND_RNG_PSEUDO_DEFAULT); //hiprandSetStream(genGPU, stream); //hiprandSetPseudoRandomGeneratorSeed(genGPU, 1234ULL); //hiprandGenerateUniform(genGPU, scale_ptr, rect_scale.volume()); hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(rect_scale.volume())), dim3(CUDA_NUM_THREADS), 0, 0, scale_ptr, rect_scale.volume(), 1.0f); hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(rect_bias.volume())), dim3(CUDA_NUM_THREADS), 0, 0, bias_ptr, rect_bias.volume(), 0.0f); //hiprandDestroyGenerator(genGPU); #endif } __host__ void BatchNorm::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); ParallelConfig pc; std::string pcname = name; ff.config.find_parallel_config(4, pcname, pc); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { FFHandler handle = ff.handlers[pc.device_ids[idx++]]; argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); } IndexLauncher launcher(BATCHNORM_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(BatchNorm)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[1].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[1].region)); launcher.add_field(3, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { meta[idx++] = fm.get_result<OpMeta*>(*it); } } /*static*/ void BatchNorm::forward_kernel(BatchNormMeta *m, float const *input_ptr, float *output_ptr, float const *scale_ptr, float const *bias_ptr) { float alpha = 1.0f, beta = 0.0f; coord_t numChannels = m->numChannels; hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(numChannels)), dim3(CUDA_NUM_THREADS), 0, 0, m->runningMean, numChannels, 0.0f); hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(numChannels)), dim3(CUDA_NUM_THREADS), 0, 0, m->runningVar, numChannels, 0.0f); checkCUDNN(cudnnBatchNormalizationForwardTraining( m->handle.dnn, m->mode, &alpha, &beta, m->inputTensor, input_ptr, m->outputTensor, output_ptr, m->biasTensor, scale_ptr, bias_ptr, 1.0, m->runningMean, m->runningVar, CUDNN_BN_MIN_EPSILON, m->saveMean, m->saveVar)); } /* regions[0](I): input regions[1](O): ouptut regions[2](I): scale regions[3](I): bias */ __host__ void BatchNorm::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); //const BatchNorm* bm = (BatchNorm*) task->args; BatchNormMeta* m = *((BatchNormMeta**) task->local_args); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_scale( regions[2], task->regions[2], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); hipEvent_t t_start, t_end; if (m->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start); } #ifndef DISABLE_LEGION_CUDA_HIJACK hipStream_t stream; checkCUDA(hipStreamCreate(&stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); #endif forward_kernel(m, acc_input.ptr, acc_output.ptr, acc_scale.ptr, acc_bias.ptr); if (m->profiling) { hipEventRecord(t_end); checkCUDA(hipEventSynchronize(t_end)); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); printf("BatchNorm forward time (BF) = %.2fms\n", elapsed); } } __host__ void BatchNorm::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(BATCHNORM_FWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_DISCARD, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[1].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[1].region)); launcher.add_field(3, FID_DATA); runtime->execute_index_space(ctx, launcher); } /*static*/ void BatchNorm::backward_kernel(BatchNormMeta *m, float const *input_ptr, float *output_grad_ptr, float const *output_ptr, float *input_grad_ptr, float const *scale_ptr, float *scale_grad_ptr, float *bias_grad_ptr, size_t numElements) { float alpha = 1.0f; if (m->relu) { hipLaunchKernelGGL(( reluBackward), dim3(GET_BLOCKS(numElements)), dim3(CUDA_NUM_THREADS), 0, 0, output_grad_ptr, output_ptr, numElements); } checkCUDNN(cudnnBatchNormalizationBackward( m->handle.dnn, m->mode, &alpha, &alpha, &alpha, &alpha, m->inputTensor, input_ptr, m->outputTensor, output_grad_ptr, m->inputTensor, input_grad_ptr, m->biasTensor, scale_ptr, scale_grad_ptr, bias_grad_ptr, CUDNN_BN_MIN_EPSILON, m->saveMean, m->saveVar)); } /* regions[0](I): input regions[1](I/O): input_grad regions[2](I): output regions[3](I/O): output_grad regions[4](I): scale regions[5](I/O): scale_grad regions[6](I/O): bias_grad */ __host__ void BatchNorm::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 7); assert(task->regions.size() == 7); //float beta = 0.0f; //const BatchNorm* bm = (BatchNorm*) task->args; BatchNormMeta* m = *((BatchNormMeta**) task->local_args); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_input_grad( regions[1], task->regions[1], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorR<float, 4> acc_output( regions[2], task->regions[2], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_output_grad( regions[3], task->regions[3], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorR<float, 1> acc_scale( regions[4], task->regions[4], FID_DATA, ctx, runtime); TensorAccessorW<float, 1> acc_scale_grad( regions[5], task->regions[5], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorW<float, 1> acc_bias_grad( regions[6], task->regions[6], FID_DATA, ctx, runtime, true/*readOutput*/); hipEvent_t t_start, t_end; if (m->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start); } #ifndef DISABLE_LEGION_CUDA_HIJACK hipStream_t stream; checkCUDA(hipStreamCreate(&stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); #endif backward_kernel(m, acc_input.ptr, acc_output_grad.ptr, acc_output.ptr, acc_input_grad.ptr, acc_scale.ptr, acc_scale_grad.ptr, acc_bias_grad.ptr, acc_output.rect.volume()); if (m->profiling) { hipEventRecord(t_end); checkCUDA(hipEventSynchronize(t_end)); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); printf("BatchNorm backward time = %.2fms\n", elapsed); } } __host__ void BatchNorm::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(BATCHNORM_BWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0](I): input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // regions[1](I/O): input_grad (we only need grad tensors) launcher.add_region_requirement( RegionRequirement(inputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(1, FID_DATA); // regions[2](I): output launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); // regions[3](I/O): output_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(3, FID_DATA); // regions[4](I): filter launcher.add_region_requirement( RegionRequirement(weights[0].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(4, FID_DATA); // regions[5](I/O): filter_grad launcher.add_region_requirement( RegionRequirement(weights[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[0].region_grad)); launcher.add_field(5, FID_DATA); // regions[6](I/O): bias_grad launcher.add_region_requirement( RegionRequirement(weights[1].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[1].region_grad)); launcher.add_field(6, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, launcher); } void BatchNorm::init_meta(BatchNormMeta *m, Rect<4> const &input, Rect<4> const &output, Rect<1> const &scale, Rect<1> const &bias) const { m->relu = this->relu; m->mode = CUDNN_BATCHNORM_SPATIAL; #if CUDNN_VERSION >= 7000 m->mode = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; #endif assert (input == output); int input_w = input.hi[0] - input.lo[0] + 1; int input_h = input.hi[1] - input.lo[1] + 1; int input_c = input.hi[2] - input.lo[2] + 1; int input_n = input.hi[3] - input.lo[3] + 1; int output_w = output.hi[0] - output.lo[0] + 1; int output_h = output.hi[1] - output.lo[1] + 1; int output_c = output.hi[2] - output.lo[2] + 1; int output_n = output.hi[3] - output.lo[3] + 1; checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_n, input_c, input_h, input_w)); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, output_n, output_c, output_h, output_w)); checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, output_c, 1, 1)); checkCUDA(hipMalloc(&m->runningMean, sizeof(float) * output_c)); checkCUDA(hipMalloc(&m->runningVar, sizeof(float) * output_c)); checkCUDA(hipMalloc(&m->saveMean, sizeof(float) * output_c)); checkCUDA(hipMalloc(&m->saveVar, sizeof(float) * output_c)); if (m->relu) { checkCUDNN(cudnnCreateActivationDescriptor(&m->actiDesc)); checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0.0)); } } BatchNormMeta::BatchNormMeta(FFHandler handler) : OpMeta(handler) { checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&biasTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor)); checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc)); } bool BatchNorm::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { Tensor sub_input, sub_output; if (!outputs[0].get_output_sub_tensor(pc, sub_output, op_type)) { return false; } if (!inputs[0].get_input_sub_tensor(pc, sub_input, op_type)) { return false; } BatchNormMeta *m = sim->batch_norm_meta; m->numChannels = sub_output.adim[2]; Rect<1> scale_rect(Point<1>(0), Point<1>(m->numChannels-1)); // scale and bias have same dimensions this->init_meta(m, sub_input.get_domain(), sub_output.get_domain(), scale_rect, scale_rect); sim->free_all(); float *input_ptr = (float *)sim->allocate(sub_input.get_volume(), DT_FLOAT); assert (input_ptr != NULL); float *output_ptr = (float *)sim->allocate(sub_output.get_volume(), DT_FLOAT); assert (output_ptr != NULL); float *bias_ptr = (float *)sim->allocate(m->numChannels, DT_FLOAT); assert (bias_ptr != NULL); float *scale_ptr = (float *)sim->allocate(m->numChannels, DT_FLOAT); assert (scale_ptr != NULL); std::function<void()> forward, backward; forward = [&] { forward_kernel(m, input_ptr, output_ptr, scale_ptr, bias_ptr); }; if (sim->computationMode == COMP_MODE_TRAINING) { float *input_grad_ptr = (float *)sim->allocate(sub_input.get_volume(), DT_FLOAT); assert (input_grad_ptr != NULL); float *output_grad_ptr = (float *)sim->allocate(sub_output.get_volume(), DT_FLOAT); assert (output_grad_ptr != NULL); float *scale_grad_ptr = (float *)sim->allocate(m->numChannels, DT_FLOAT); assert (scale_grad_ptr != NULL); float *bias_grad_ptr = (float *)sim->allocate(m->numChannels, DT_FLOAT); assert (bias_grad_ptr != NULL); backward = [&] { backward_kernel(m, input_ptr, output_grad_ptr, output_ptr, input_grad_ptr, scale_ptr, scale_grad_ptr, bias_grad_ptr, sub_output.get_volume()); }; } inner_measure_operator_cost(sim, forward, backward, cost_metrics); if (sim->computationMode == COMP_MODE_TRAINING) { printf("[Measure BatchNorm] name(%s) size(%zu) forward_time(%.4lf) backward_time(%.4lf)\n", name, sub_input.get_volume(), cost_metrics.forward_time, cost_metrics.backward_time); } else { printf("[Measure BatchNorm] name(%s) size(%zu) forward_time(%.4lf)\n", name, sub_input.get_volume(), cost_metrics.forward_time); } return true; }
e473a1432a319b557670ff9420c4610b3547f604.cu
/* Copyright 2017 Stanford, NVIDIA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::batch_norm(const Tensor& input, bool relu, const char* name) { assert(input.numDim == 4); //Only support 4D BN for now BatchNorm *bn = new BatchNorm(*this, input, relu, name); layers.push_back(bn); return bn->outputs[0]; } /* locals[0] = scale locals[1] = bias */ BatchNorm::BatchNorm(FFModel& model, const Tensor& _input, bool _relu, const char* name) : Op(model, OP_BATCHNORM, name, _input), relu(_relu) { assert(_input.numDim == 4); numOutputs = 1; outputs[0].numDim = _input.numDim; for (int i = 0; i < outputs[0].numDim; i++) outputs[0].adim[i] = _input.adim[i]; numWeights = 2; weights[0].numDim = 1; weights[0].adim[0] = _input.adim[2]; weights[1].numDim = 1; weights[1].adim[0] = _input.adim[2]; return; #ifdef DEADCODE // Create output tensor int output_w = _input.adim[0]; int output_h = _input.adim[1]; int output_c = _input.adim[2]; int output_n = _input.adim[3]; FieldSpace fs = model.config.field_space; Rect<4> output_rect(Point<4>(0, 0, 0, 0), Point<4>(output_w-1, output_h-1, output_c-1, output_n-1)); IndexSpaceT<4> output_is = runtime->create_index_space(ctx, output_rect); LogicalRegion output_lr = runtime->create_logical_region(ctx, output_is, fs); LogicalRegion output_grad_lr = runtime->create_logical_region(ctx, output_is, fs); int extent_w = (output_w + num_par_w - 1) / num_par_w; int extent_h = (output_h + num_par_h - 1) / num_par_h; int extent_c = output_c / num_par_c; int extent_n = output_n / num_par_n; assert(output_c % num_par_c == 0); assert(output_n % num_par_n == 0); Rect<4> ext(Point<4>(0, 0, 0, 0), Point<4>(extent_w-1, extent_h-1, extent_c-1, extent_n-1)); Transform<4, 4, coord_t> trans; for (int i = 0; i < 4; i++) for (int j = 0; j < 4; j++) trans[i][j] = 0; trans[0][0] = extent_w; trans[1][1] = extent_h; trans[2][2] = extent_c; trans[3][3] = extent_n; IndexPartition output_ip = runtime->create_partition_by_restriction(ctx, output_is, task_is, trans, ext); assert(runtime->is_index_partition_disjoint(ctx, output_ip)); assert(runtime->is_index_partition_complete(ctx, output_ip)); LogicalPartition output_lp = runtime->get_logical_partition(ctx, output_lr, output_ip); LogicalPartition output_grad_lp = runtime->get_logical_partition(ctx, output_grad_lr, output_ip); int bias_nc = num_replica * _input.adim[2]; /*input_channels*/ Rect<1, coord_t> bias_grad_rect(0, bias_nc - 1); Rect<1, coord_t> bias_rect(0, _input.adim[2] - 1); IndexSpaceT<1> bias_is = runtime->create_index_space(ctx, bias_rect); IndexSpaceT<1> bias_grad_is = runtime->create_index_space(ctx, bias_grad_rect); LogicalRegion bias_lr = runtime->create_logical_region(ctx, bias_is, fs); LogicalRegion scale_lr = runtime->create_logical_region(ctx, bias_is, fs); LogicalRegion bias_grad_lr = runtime->create_logical_region(ctx, bias_grad_is, fs); LogicalRegion scale_grad_lr = runtime->create_logical_region(ctx, bias_grad_is, fs); IndexPartition bias_grad_ip = runtime->create_equal_partition(ctx, bias_grad_is, task_is); LogicalPartition bias_grad_lp = runtime->get_logical_partition(ctx, bias_grad_lr, bias_grad_ip); LogicalPartition scale_grad_lp = runtime->get_logical_partition(ctx, scale_grad_lr, bias_grad_ip); Parameter scale_tensor, bias_tensor; scale_tensor.region = scale_lr; scale_tensor.region_grad = scale_grad_lr; scale_tensor.part = LogicalPartition::NO_PART; scale_tensor.part_grad = scale_grad_lp; weights[0] = scale_tensor; bias_tensor.region = bias_lr; bias_tensor.region_grad = bias_grad_lr; bias_tensor.part = LogicalPartition::NO_PART; bias_tensor.part_grad = bias_grad_lp; weights[1] = bias_tensor; numWeights = 2; outputs[0] = _input; outputs[0].region = output_lr; outputs[0].part = output_lp; outputs[0].region_grad = output_grad_lr; outputs[0].part_grad = output_grad_lp; printf("Create bn layer: output(%d %d %d %d)\n", outputs[0].adim[3], outputs[0].adim[2], outputs[0].adim[1], outputs[0].adim[0]); input_lps[0] = _input.part; #endif } void BatchNorm::create_weights(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<4>(model.get_or_create_task_is(4, pcname)); // Create scale and bias Initializer* scale_initializer = new ConstantInitializer(1.0f); Initializer* bias_initializer = new ConstantInitializer(0.0f); const int dims[1] = {outputs[0].adim[2]}; weights[0] = model.create_conv_weight<1>(this, dims, DT_FLOAT, scale_initializer); weights[1] = model.create_conv_weight<1>(this, dims, DT_FLOAT, bias_initializer); } void BatchNorm::create_output_and_partition(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<4>(model.get_or_create_task_is(4, pcname)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<4> part_rect = runtime->get_index_space_domain(ctx, task_is); // Create output tensor int output_w = outputs[0].adim[0]; int output_h = outputs[0].adim[1]; int output_c = outputs[0].adim[2]; int output_n = outputs[0].adim[3]; int num_par_w = part_rect.hi[0] - part_rect.lo[0] + 1; int num_par_h = part_rect.hi[1] - part_rect.lo[1] + 1; int num_par_c = part_rect.hi[2] - part_rect.lo[2] + 1; int num_par_n = part_rect.hi[3] - part_rect.lo[3] + 1; { const int dims[4] = {output_n, output_c, output_h, output_w}; outputs[0] = model.create_tensor<4>(dims, DT_FLOAT, this); outputs[0].owner_op = this; outputs[0].owner_idx = 0; } // Currently assume data parallelism for batch norm assert(num_par_w == 1); assert(num_par_h == 1); assert(num_par_c == 1); // Compute partition bound for input Rect<4> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part.get_index_partition()); if (input_rect == part_rect) { input_lps[0] = inputs[0].part; input_grad_lps[0] = inputs[0].part_grad; } else { model.create_disjoint_partition( inputs[0], (IndexSpaceT<4>)task_is, input_lps[0], input_grad_lps[0]); } } /* regions[0]: input regions[1]: output regions[2](I): scale regions[3](I): bias */ __host__ OpMeta* BatchNorm::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); const BatchNorm* bm = (BatchNorm*) task->args; FFHandler handle = *((const FFHandler*) task->local_args); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_scale( regions[2], task->regions[2], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); BatchNormMeta* m = new BatchNormMeta(handle); bm->init_meta(m, acc_input.rect, acc_output.rect, acc_scale.rect, acc_bias.rect); m->numChannels = acc_output.rect.hi[2] - acc_output.rect.lo[2] + 1; m->profiling = bm->profiling; return m; } /* regions[0](O): scale, initilized to ones regions[1](O): bias, initilized to zeros */ __host__ void BatchNorm::init_para_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); //const BatchNorm* bm = (BatchNorm*) task->args; const AccessorWO<float, 1> acc_scale(regions[0], FID_DATA); const AccessorWO<float, 1> acc_bias(regions[1], FID_DATA); Rect<1> rect_scale, rect_bias; rect_scale = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); rect_bias = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); assert(acc_scale.accessor.is_dense_arbitrary(rect_scale)); assert(acc_bias.accessor.is_dense_arbitrary(rect_bias)); float *scale_ptr = acc_scale.ptr(rect_scale.lo); float *bias_ptr = acc_bias.ptr(rect_bias.lo); // init kernel and bias #ifdef PARAMETER_ALL_ONES ones_kernel<<<GET_BLOCKS(rect_scale.volume()), CUDA_NUM_THREADS>>>( scale_ptr, rect_scale.volume()); ones_kernel<<<GET_BLOCKS(rect_bias.volume()), CUDA_NUM_THREADS>>>( bias_ptr, rect_bias.volume()); #else //cudaStream_t stream; //checkCUDA(cudaStreamCreate(&stream)); //curandGenerator_t genGPU; //curandCreateGenerator(&genGPU, CURAND_RNG_PSEUDO_DEFAULT); //curandSetStream(genGPU, stream); //curandSetPseudoRandomGeneratorSeed(genGPU, 1234ULL); //curandGenerateUniform(genGPU, scale_ptr, rect_scale.volume()); assign_kernel<<<GET_BLOCKS(rect_scale.volume()), CUDA_NUM_THREADS>>>( scale_ptr, rect_scale.volume(), 1.0f); assign_kernel<<<GET_BLOCKS(rect_bias.volume()), CUDA_NUM_THREADS>>>( bias_ptr, rect_bias.volume(), 0.0f); //curandDestroyGenerator(genGPU); #endif } __host__ void BatchNorm::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); ParallelConfig pc; std::string pcname = name; ff.config.find_parallel_config(4, pcname, pc); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { FFHandler handle = ff.handlers[pc.device_ids[idx++]]; argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); } IndexLauncher launcher(BATCHNORM_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(BatchNorm)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[1].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[1].region)); launcher.add_field(3, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { meta[idx++] = fm.get_result<OpMeta*>(*it); } } /*static*/ void BatchNorm::forward_kernel(BatchNormMeta *m, float const *input_ptr, float *output_ptr, float const *scale_ptr, float const *bias_ptr) { float alpha = 1.0f, beta = 0.0f; coord_t numChannels = m->numChannels; assign_kernel<<<GET_BLOCKS(numChannels), CUDA_NUM_THREADS>>>(m->runningMean, numChannels, 0.0f); assign_kernel<<<GET_BLOCKS(numChannels), CUDA_NUM_THREADS>>>(m->runningVar, numChannels, 0.0f); checkCUDNN(cudnnBatchNormalizationForwardTraining( m->handle.dnn, m->mode, &alpha, &beta, m->inputTensor, input_ptr, m->outputTensor, output_ptr, m->biasTensor, scale_ptr, bias_ptr, 1.0, m->runningMean, m->runningVar, CUDNN_BN_MIN_EPSILON, m->saveMean, m->saveVar)); } /* regions[0](I): input regions[1](O): ouptut regions[2](I): scale regions[3](I): bias */ __host__ void BatchNorm::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); //const BatchNorm* bm = (BatchNorm*) task->args; BatchNormMeta* m = *((BatchNormMeta**) task->local_args); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_scale( regions[2], task->regions[2], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); cudaEvent_t t_start, t_end; if (m->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start); } #ifndef DISABLE_LEGION_CUDA_HIJACK cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); #endif forward_kernel(m, acc_input.ptr, acc_output.ptr, acc_scale.ptr, acc_bias.ptr); if (m->profiling) { cudaEventRecord(t_end); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("BatchNorm forward time (BF) = %.2fms\n", elapsed); } } __host__ void BatchNorm::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(BATCHNORM_FWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_DISCARD, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[1].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[1].region)); launcher.add_field(3, FID_DATA); runtime->execute_index_space(ctx, launcher); } /*static*/ void BatchNorm::backward_kernel(BatchNormMeta *m, float const *input_ptr, float *output_grad_ptr, float const *output_ptr, float *input_grad_ptr, float const *scale_ptr, float *scale_grad_ptr, float *bias_grad_ptr, size_t numElements) { float alpha = 1.0f; if (m->relu) { reluBackward<<<GET_BLOCKS(numElements), CUDA_NUM_THREADS>>>(output_grad_ptr, output_ptr, numElements); } checkCUDNN(cudnnBatchNormalizationBackward( m->handle.dnn, m->mode, &alpha, &alpha, &alpha, &alpha, m->inputTensor, input_ptr, m->outputTensor, output_grad_ptr, m->inputTensor, input_grad_ptr, m->biasTensor, scale_ptr, scale_grad_ptr, bias_grad_ptr, CUDNN_BN_MIN_EPSILON, m->saveMean, m->saveVar)); } /* regions[0](I): input regions[1](I/O): input_grad regions[2](I): output regions[3](I/O): output_grad regions[4](I): scale regions[5](I/O): scale_grad regions[6](I/O): bias_grad */ __host__ void BatchNorm::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 7); assert(task->regions.size() == 7); //float beta = 0.0f; //const BatchNorm* bm = (BatchNorm*) task->args; BatchNormMeta* m = *((BatchNormMeta**) task->local_args); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_input_grad( regions[1], task->regions[1], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorR<float, 4> acc_output( regions[2], task->regions[2], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_output_grad( regions[3], task->regions[3], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorR<float, 1> acc_scale( regions[4], task->regions[4], FID_DATA, ctx, runtime); TensorAccessorW<float, 1> acc_scale_grad( regions[5], task->regions[5], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorW<float, 1> acc_bias_grad( regions[6], task->regions[6], FID_DATA, ctx, runtime, true/*readOutput*/); cudaEvent_t t_start, t_end; if (m->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start); } #ifndef DISABLE_LEGION_CUDA_HIJACK cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); #endif backward_kernel(m, acc_input.ptr, acc_output_grad.ptr, acc_output.ptr, acc_input_grad.ptr, acc_scale.ptr, acc_scale_grad.ptr, acc_bias_grad.ptr, acc_output.rect.volume()); if (m->profiling) { cudaEventRecord(t_end); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("BatchNorm backward time = %.2fms\n", elapsed); } } __host__ void BatchNorm::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(BATCHNORM_BWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0](I): input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // regions[1](I/O): input_grad (we only need grad tensors) launcher.add_region_requirement( RegionRequirement(inputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(1, FID_DATA); // regions[2](I): output launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); // regions[3](I/O): output_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(3, FID_DATA); // regions[4](I): filter launcher.add_region_requirement( RegionRequirement(weights[0].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(4, FID_DATA); // regions[5](I/O): filter_grad launcher.add_region_requirement( RegionRequirement(weights[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[0].region_grad)); launcher.add_field(5, FID_DATA); // regions[6](I/O): bias_grad launcher.add_region_requirement( RegionRequirement(weights[1].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[1].region_grad)); launcher.add_field(6, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, launcher); } void BatchNorm::init_meta(BatchNormMeta *m, Rect<4> const &input, Rect<4> const &output, Rect<1> const &scale, Rect<1> const &bias) const { m->relu = this->relu; m->mode = CUDNN_BATCHNORM_SPATIAL; #if CUDNN_VERSION >= 7000 m->mode = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; #endif assert (input == output); int input_w = input.hi[0] - input.lo[0] + 1; int input_h = input.hi[1] - input.lo[1] + 1; int input_c = input.hi[2] - input.lo[2] + 1; int input_n = input.hi[3] - input.lo[3] + 1; int output_w = output.hi[0] - output.lo[0] + 1; int output_h = output.hi[1] - output.lo[1] + 1; int output_c = output.hi[2] - output.lo[2] + 1; int output_n = output.hi[3] - output.lo[3] + 1; checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_n, input_c, input_h, input_w)); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, output_n, output_c, output_h, output_w)); checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, output_c, 1, 1)); checkCUDA(cudaMalloc(&m->runningMean, sizeof(float) * output_c)); checkCUDA(cudaMalloc(&m->runningVar, sizeof(float) * output_c)); checkCUDA(cudaMalloc(&m->saveMean, sizeof(float) * output_c)); checkCUDA(cudaMalloc(&m->saveVar, sizeof(float) * output_c)); if (m->relu) { checkCUDNN(cudnnCreateActivationDescriptor(&m->actiDesc)); checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0.0)); } } BatchNormMeta::BatchNormMeta(FFHandler handler) : OpMeta(handler) { checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&biasTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor)); checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc)); } bool BatchNorm::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { Tensor sub_input, sub_output; if (!outputs[0].get_output_sub_tensor(pc, sub_output, op_type)) { return false; } if (!inputs[0].get_input_sub_tensor(pc, sub_input, op_type)) { return false; } BatchNormMeta *m = sim->batch_norm_meta; m->numChannels = sub_output.adim[2]; Rect<1> scale_rect(Point<1>(0), Point<1>(m->numChannels-1)); // scale and bias have same dimensions this->init_meta(m, sub_input.get_domain(), sub_output.get_domain(), scale_rect, scale_rect); sim->free_all(); float *input_ptr = (float *)sim->allocate(sub_input.get_volume(), DT_FLOAT); assert (input_ptr != NULL); float *output_ptr = (float *)sim->allocate(sub_output.get_volume(), DT_FLOAT); assert (output_ptr != NULL); float *bias_ptr = (float *)sim->allocate(m->numChannels, DT_FLOAT); assert (bias_ptr != NULL); float *scale_ptr = (float *)sim->allocate(m->numChannels, DT_FLOAT); assert (scale_ptr != NULL); std::function<void()> forward, backward; forward = [&] { forward_kernel(m, input_ptr, output_ptr, scale_ptr, bias_ptr); }; if (sim->computationMode == COMP_MODE_TRAINING) { float *input_grad_ptr = (float *)sim->allocate(sub_input.get_volume(), DT_FLOAT); assert (input_grad_ptr != NULL); float *output_grad_ptr = (float *)sim->allocate(sub_output.get_volume(), DT_FLOAT); assert (output_grad_ptr != NULL); float *scale_grad_ptr = (float *)sim->allocate(m->numChannels, DT_FLOAT); assert (scale_grad_ptr != NULL); float *bias_grad_ptr = (float *)sim->allocate(m->numChannels, DT_FLOAT); assert (bias_grad_ptr != NULL); backward = [&] { backward_kernel(m, input_ptr, output_grad_ptr, output_ptr, input_grad_ptr, scale_ptr, scale_grad_ptr, bias_grad_ptr, sub_output.get_volume()); }; } inner_measure_operator_cost(sim, forward, backward, cost_metrics); if (sim->computationMode == COMP_MODE_TRAINING) { printf("[Measure BatchNorm] name(%s) size(%zu) forward_time(%.4lf) backward_time(%.4lf)\n", name, sub_input.get_volume(), cost_metrics.forward_time, cost_metrics.backward_time); } else { printf("[Measure BatchNorm] name(%s) size(%zu) forward_time(%.4lf)\n", name, sub_input.get_volume(), cost_metrics.forward_time); } return true; }
a4aa0de0a6ccd19e2064fe384a5b2b298b6e5885.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // source: http://www.cl.cam.ac.uk/research/rainbow/projects/dcbgrid/DCBGrid-preprint.pdf #include "GpuImage.h" #include "helper_cuda.h" #include "AmCudaHelper.cuh" #include <iostream> void GpuImage::Create( GpuImageType type, int width, int height ) { if ( type != imageType || width != w || height != h ) { Destroy(); //LOG_EVENT("Creating image"); switch ( type ) { case IMAGE_TYPE_XRGB32: checkCudaErrors(hipMallocPitch((void**)&gpuImage, &gpuImagePitch, width * sizeof(unsigned int), height)); break; /*case IMAGE_TYPE_FLOAT2: checkCudaErrors(hipMallocPitch((void**)&gpuImage, &gpuImagePitch, width * sizeof(float2), height)); break;*/ } w = width; h = height; imageType = type; } } void GpuImage::Destroy() { //LOG_EVENT("Destroying image"); CUDA_FREE( gpuImage ); w = h = 0; } void GpuImage::SwapData(GpuImage & other) { std::swap<unsigned int*>(gpuImage, other.gpuImage); std::swap<GpuImageType>(imageType, other.imageType); std::swap<size_t>(gpuImagePitch, other.gpuImagePitch); std::swap<int>(w, other.w); std::swap<int>(h, other.h); } void GpuImage::CopyDataIn(unsigned int const* hostData) { checkCudaErrors(hipMemcpy2D(gpuImage, gpuImagePitch, hostData, w * sizeof(unsigned int), w * sizeof(unsigned int), h, hipMemcpyHostToDevice)); } #if 0 void GpuImage::AsyncCopyFrom(const GpuImage* const other, const GpuExecutionStream & stream) { // Resize image if needed, then copy in the relevant data SizeToMatch(*other); checkCudaErrors(hipMemcpy2DAsync(gpuImage, gpuImagePitch, other->gpuImage, other->gpuImagePitch, w, h, hipMemcpyDeviceToDevice, stream.Get())); } #endif void GpuImage::SizeToMatch(const GpuImage & other) { Create(other.imageType, other.GetWidth(), other.GetHeight()); } __global__ void TransformImage(unsigned int* imageIn, unsigned int inStride, int width, int height, float* imageOut, unsigned int outStride) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if(x < width && y < height) { unsigned int input = imageIn[inStride * y + x]; imageOut[outStride * y + x] = (float)((input & 0xFF) + ((input >> 8) & 0xFF) + ((input >> 16) & 0xFF)) / (255.0f * 3); } } void GpuImage::CopyDataOut(float* hostData) { // Create device memory for output of the transformation kernel float* transImage; size_t transImagePitch; checkCudaErrors( hipMallocPitch(&transImage, &transImagePitch, w * sizeof(float), h) ); dim3 blockDimension(32, 8); dim3 gridDimension((w - 1) / blockDimension.x + 1, (h - 1) / blockDimension.y + 1); //RECORD_KERNEL_LAUNCH("Transform image kernel", gridDimension, blockDimension); hipLaunchKernelGGL(( TransformImage), dim3(gridDimension), dim3(blockDimension), 0, 0, gpuImage, gpuImagePitch / sizeof(unsigned int), w, h, transImage, transImagePitch / sizeof(float)); //CHECK_KERNEL_ERROR("Transform image kernel"); // Copy out the transformed result checkCudaErrors(hipMemcpy2D(hostData, w * sizeof(float), transImage, transImagePitch, w * sizeof(float), h, hipMemcpyDeviceToHost)); CUDA_FREE(transImage); } #pragma region Downsampling __global__ void DownSampleImage(const unsigned int* const imgIn, const int imgInStride, const int outWidth, const int outHeight, const int dsFactor, unsigned int* const imgOut, const int imgOutStride) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if(x < outWidth && y < outHeight) imgOut[imgOutStride * y + x] = imgIn[imgInStride * y * dsFactor + x * dsFactor]; } void GpuImage::DownsampleFrom(const GpuImage* const other, int downsampleFactor) { Create(other->imageType, other->w / downsampleFactor, other->h / downsampleFactor); dim3 blockDimension(32, 8); dim3 gridDimension((w - 1) / blockDimension.x + 1, (h - 1) / blockDimension.y + 1); //RECORD_KERNEL_LAUNCH("Down-sample image kernel", gridDimension, blockDimension); hipLaunchKernelGGL(( DownSampleImage), dim3(gridDimension), dim3(blockDimension), 0, 0, other->gpuImage, other->gpuImagePitch / sizeof(unsigned int), w, h, downsampleFactor, gpuImage, gpuImagePitch / sizeof(unsigned int)); //CHECK_KERNEL_ERROR("Down-sample image kernel"); } #pragma endregion
a4aa0de0a6ccd19e2064fe384a5b2b298b6e5885.cu
// source: http://www.cl.cam.ac.uk/research/rainbow/projects/dcbgrid/DCBGrid-preprint.pdf #include "GpuImage.h" #include "helper_cuda.h" #include "AmCudaHelper.cuh" #include <iostream> void GpuImage::Create( GpuImageType type, int width, int height ) { if ( type != imageType || width != w || height != h ) { Destroy(); //LOG_EVENT("Creating image"); switch ( type ) { case IMAGE_TYPE_XRGB32: checkCudaErrors(cudaMallocPitch((void**)&gpuImage, &gpuImagePitch, width * sizeof(unsigned int), height)); break; /*case IMAGE_TYPE_FLOAT2: checkCudaErrors(cudaMallocPitch((void**)&gpuImage, &gpuImagePitch, width * sizeof(float2), height)); break;*/ } w = width; h = height; imageType = type; } } void GpuImage::Destroy() { //LOG_EVENT("Destroying image"); CUDA_FREE( gpuImage ); w = h = 0; } void GpuImage::SwapData(GpuImage & other) { std::swap<unsigned int*>(gpuImage, other.gpuImage); std::swap<GpuImageType>(imageType, other.imageType); std::swap<size_t>(gpuImagePitch, other.gpuImagePitch); std::swap<int>(w, other.w); std::swap<int>(h, other.h); } void GpuImage::CopyDataIn(unsigned int const* hostData) { checkCudaErrors(cudaMemcpy2D(gpuImage, gpuImagePitch, hostData, w * sizeof(unsigned int), w * sizeof(unsigned int), h, cudaMemcpyHostToDevice)); } #if 0 void GpuImage::AsyncCopyFrom(const GpuImage* const other, const GpuExecutionStream & stream) { // Resize image if needed, then copy in the relevant data SizeToMatch(*other); checkCudaErrors(cudaMemcpy2DAsync(gpuImage, gpuImagePitch, other->gpuImage, other->gpuImagePitch, w, h, cudaMemcpyDeviceToDevice, stream.Get())); } #endif void GpuImage::SizeToMatch(const GpuImage & other) { Create(other.imageType, other.GetWidth(), other.GetHeight()); } __global__ void TransformImage(unsigned int* imageIn, unsigned int inStride, int width, int height, float* imageOut, unsigned int outStride) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if(x < width && y < height) { unsigned int input = imageIn[inStride * y + x]; imageOut[outStride * y + x] = (float)((input & 0xFF) + ((input >> 8) & 0xFF) + ((input >> 16) & 0xFF)) / (255.0f * 3); } } void GpuImage::CopyDataOut(float* hostData) { // Create device memory for output of the transformation kernel float* transImage; size_t transImagePitch; checkCudaErrors( cudaMallocPitch(&transImage, &transImagePitch, w * sizeof(float), h) ); dim3 blockDimension(32, 8); dim3 gridDimension((w - 1) / blockDimension.x + 1, (h - 1) / blockDimension.y + 1); //RECORD_KERNEL_LAUNCH("Transform image kernel", gridDimension, blockDimension); TransformImage<<<gridDimension, blockDimension>>>(gpuImage, gpuImagePitch / sizeof(unsigned int), w, h, transImage, transImagePitch / sizeof(float)); //CHECK_KERNEL_ERROR("Transform image kernel"); // Copy out the transformed result checkCudaErrors(cudaMemcpy2D(hostData, w * sizeof(float), transImage, transImagePitch, w * sizeof(float), h, cudaMemcpyDeviceToHost)); CUDA_FREE(transImage); } #pragma region Downsampling __global__ void DownSampleImage(const unsigned int* const imgIn, const int imgInStride, const int outWidth, const int outHeight, const int dsFactor, unsigned int* const imgOut, const int imgOutStride) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if(x < outWidth && y < outHeight) imgOut[imgOutStride * y + x] = imgIn[imgInStride * y * dsFactor + x * dsFactor]; } void GpuImage::DownsampleFrom(const GpuImage* const other, int downsampleFactor) { Create(other->imageType, other->w / downsampleFactor, other->h / downsampleFactor); dim3 blockDimension(32, 8); dim3 gridDimension((w - 1) / blockDimension.x + 1, (h - 1) / blockDimension.y + 1); //RECORD_KERNEL_LAUNCH("Down-sample image kernel", gridDimension, blockDimension); DownSampleImage<<<gridDimension, blockDimension>>>(other->gpuImage, other->gpuImagePitch / sizeof(unsigned int), w, h, downsampleFactor, gpuImage, gpuImagePitch / sizeof(unsigned int)); //CHECK_KERNEL_ERROR("Down-sample image kernel"); } #pragma endregion
08db76dc964acf6c946aa9e52f496ad38081c6e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_OPENCV #include <math.h> #include <math_constants.h> #include <opencv2/core/core.hpp> #include <vector> #include "caffe/layers/detectnet_transform_layer.hpp" #include "caffe/util/detectnet_coverage.hpp" #include "caffe/util/gpu_memory.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { // Calculate the location in the image from the loop index __device__ void get_pixel_indices(const int loop_index, const uint4 shape, int* x, int* y, int* n ) { int idx = loop_index; *n = idx / (shape.y * shape.x); idx -= *n * shape.y * shape.x; *y = idx / shape.x; idx -= *y * shape.x; *x = idx; } // https://www.cs.rit.edu/~ncs/color/t_convert.html template <typename Dtype> __device__ void convert_rgb_to_hsv( Dtype r, Dtype g, Dtype b, Dtype* h, Dtype* s, Dtype* v ) { Dtype min_v = min(min(r, g), b); Dtype max_v = max(max(r, g), b); // NOLINT(build/include_what_you_use) Dtype delta = max_v - min_v; if (max_v == 0 || delta == 0) { *h = 0; *s = 0; *v = max_v; return; } if (r == max_v) { *h = (g - b) / delta; } else if (g == max_v) { *h = 2 + (b - r) / delta; } else { *h = 4 + (r - g) / delta; } *h *= 60; if (h < 0) { *h += 360; } *s = delta / max_v; *v = max_v; } // https://www.cs.rit.edu/~ncs/color/t_convert.html template <typename Dtype> __device__ void convert_hsv_to_rgb( Dtype h, Dtype s, Dtype v, Dtype* r, Dtype* g, Dtype* b ) { int i; Dtype f, p, q, t; if (s == 0) { *r = v; *g = v; *b = v; return; } h /= 60; // sector 0 to 5 i = floor(h); f = h - i; // factorial part of h p = v * (1 - s); q = v * (1 - s * f); t = v * (1 - s * (1 - f)); switch (i) { case 0: *r = v; *g = t; *b = p; break; case 1: *r = q; *g = v; *b = p; break; case 2: *r = p; *g = v; *b = t; break; case 3: *r = p; *g = q; *b = v; break; case 4: *r = t; *g = p; *b = v; break; default: // case 5: *r = v; *g = p; *b = q; break; } } template <typename Dtype> __global__ void color_transformations( const Dtype* src_data, Dtype* dst_data, const uint4 shape, const AugmentSelection* aug_data ) { CUDA_KERNEL_LOOP(loop_index, shape.x * shape.y * shape.w) { int x, y, n; get_pixel_indices(loop_index, shape, &x, &y, &n); // check what needs doing const AugmentSelection& as = aug_data[n]; const bool doHueRotation = (abs(as.hue_rotation) > FLT_EPSILON); const bool doDesaturation = (as.saturation < (1.0 - 1.0/UINT8_MAX)); // N*cs*hs*ws + H*ws + W int index = n * shape.z * shape.y * shape.x + y * shape.x + x; // hs*ws const int channel_stride = shape.y * shape.x; // read Dtype r = src_data[index + 0 * channel_stride]; Dtype g = src_data[index + 1 * channel_stride]; Dtype b = src_data[index + 2 * channel_stride]; if (doHueRotation || doDesaturation) { // transform Dtype h, s, v; convert_rgb_to_hsv(r, g, b, &h, &s, &v); if (doHueRotation) { h -= aug_data[n].hue_rotation; } if (doDesaturation) { s *= aug_data[n].saturation; } convert_hsv_to_rgb(h, s, v, &r, &g, &b); } // write dst_data[index + 0 * channel_stride] = r; dst_data[index + 1 * channel_stride] = g; dst_data[index + 2 * channel_stride] = b; } } // Mean is WxHxC // For each pixel in the current image, subtract the corresponding pixel // from the mean image template <typename Dtype> __global__ void pixel_mean_subtraction( Dtype* data, const Dtype* mean_data, const uint4 shape ) { CUDA_KERNEL_LOOP(loop_index, shape.x * shape.y * shape.w) { int x, y, n; get_pixel_indices(loop_index, shape, &x, &y, &n); for (int c = 0; c < shape.z; c++) { // N*cs*hs*ws + C*hs*ws + H*ws + W const int data_idx = (n * shape.z * shape.y * shape.x) + (c * shape.y * shape.x) + (y * shape.x) + x; // C*hs*ws + H*ws + W const int mean_idx = (c * shape.y * shape.x) + (y * shape.x) + x; data[data_idx] -= mean_data[mean_idx]; } } } // Mean is 1x1xC // For each pixel in the current image, subtract the mean pixel template <typename Dtype> __global__ void channel_mean_subtraction( Dtype* data, const uint4 shape, const Dtype mean_value1, const Dtype mean_value2, const Dtype mean_value3 ) { CUDA_KERNEL_LOOP(loop_index, shape.x * shape.y * shape.w) { int x, y, n; get_pixel_indices(loop_index, shape, &x, &y, &n); // N*cs*hs*ws + C*hs*ws + H*ws + W const int data_idx = (n * shape.z * shape.y * shape.x) +(y * shape.x) + x; // hs*ws const int channel_stride = shape.y * shape.x; data[data_idx + 0 * channel_stride] -= mean_value1; data[data_idx + 1 * channel_stride] -= mean_value2; data[data_idx + 2 * channel_stride] -= mean_value3; } } template <typename Dtype> __device__ void rotate_point( const Dtype ax, const Dtype ay, // original point const Dtype cx, const Dtype cy, // center point float angle, Dtype* bx, Dtype* by // destination point ) { const Dtype s = sin(angle); const Dtype c = cos(angle); // translate to origin const Dtype tx = ax - cx; const Dtype ty = ay - cy; *bx = (tx * c) - (ty * s) + cx; *by = (tx * s) + (ty * c) + cy; } template <typename Dtype> __device__ Dtype get_value( const Dtype* data, const uint4& shape, const unsigned int n, const unsigned int c, int y, int x ) { // Replicate border for 1 pixel if (x == -1) x = 0; if (x == shape.x) x = shape.x - 1; if (y == -1) y = 0; if (y == shape.y) y = shape.y - 1; if (x >= 0 && x < shape.x && y >= 0 && y < shape.y) { // N*cs*hs*ws + C*hs*ws + H*ws + W return data[(n * shape.z * shape.y * shape.x) + (c * shape.y * shape.x) + (y * shape.x) + x]; } else { return 0; } } template <typename Dtype> __device__ Dtype cubic_interpolation(const Dtype& d, const Dtype& v1, const Dtype& v2, const Dtype& v3, const Dtype& v4 ) { // d is [0,1], marking the distance from v2 towards v3 return v2 + d * ( -2.0 * v1 - 3.0 * v2 + 6.0 * v3 - 1.0 * v4 + d * ( 3.0 * v1 - 6.0 * v2 + 3.0 * v3 + 0.0 * v4 + d * ( -1.0 * v1 + 3.0 * v2 - 3.0 * v3 + 1.0 * v4))) / 6.0; } // Interpolate in 1D space template <typename Dtype> __device__ Dtype interpolate_x( const Dtype* data, const uint4& shape, const unsigned int n, const unsigned int c, const int y, const Dtype x ) { Dtype dx = x - floor(x); return cubic_interpolation(dx, get_value(data, shape, n, c, y, floor(x) - 1), get_value(data, shape, n, c, y, floor(x)), get_value(data, shape, n, c, y, ceil(x)), get_value(data, shape, n, c, y, ceil(x) + 1)); } // Interpolate in 2D space template <typename Dtype> __device__ Dtype interpolate_xy( const Dtype* data, const uint4& shape, const unsigned int n, const unsigned int c, const Dtype y, const Dtype x ) { Dtype dy = y - floor(y); return cubic_interpolation(dy, interpolate_x(data, shape, n, c, floor(y) - 1, x), interpolate_x(data, shape, n, c, floor(y), x), interpolate_x(data, shape, n, c, ceil(y), x), interpolate_x(data, shape, n, c, ceil(y) + 1, x)); } template <typename Dtype> __global__ void spatial_transformations( const Dtype* src_data, const uint4 src_shape, const AugmentSelection* aug_data, Dtype* dst_data, const uint4 dst_shape ) { CUDA_KERNEL_LOOP(loop_index, dst_shape.x * dst_shape.y * dst_shape.w) { int dst_x, dst_y, n; get_pixel_indices(loop_index, dst_shape, &dst_x, &dst_y, &n); const AugmentSelection& as = aug_data[n]; // calculate src pixel indices for this thread Dtype x = dst_x; Dtype y = dst_y; // crop x += as.crop_offset.x; y += as.crop_offset.y; // rotate if (abs(as.rotation) > FLT_EPSILON) { const Dtype w_before = as.scale.width - 1; const Dtype h_before = as.scale.height - 1; const float angle = as.rotation * CUDART_PI_F / 180.0f; const Dtype w_after = abs(w_before * cos(angle)) + abs(h_before * sin(angle)); const Dtype h_after = abs(w_before * sin(angle)) + abs(h_before * cos(angle)); rotate_point(x, y, w_after / 2.0f, h_after / 2.0f, -angle, &x, &y); x -= (w_after - w_before) / 2.0f; y -= (h_after - h_before) / 2.0f; } // scale if (src_shape.x != as.scale.width) { x *= Dtype(src_shape.x - 1) / (as.scale.width - 1); } if (src_shape.y != as.scale.height) { y *= Dtype(src_shape.y - 1) / (as.scale.height - 1); } // flip if (as.flip) { x = (src_shape.x - x - 1.0); } for (int c = 0; c < dst_shape.z; c++) { // N*cs*hs*ws + C*hs*ws + H*ws + W const int dst_idx = (n * dst_shape.z * dst_shape.y * dst_shape.x) + (c * dst_shape.y * dst_shape.x) + (dst_y * dst_shape.x) + dst_x; dst_data[dst_idx] = interpolate_xy(src_data, src_shape, n, c, y, x); } } } template <typename Dtype> void DetectNetTransformationLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); AugmentSelection* aug_data = reinterpret_cast<AugmentSelection*>( gpu_workspace_augmentations_.data()); Dtype* tmp_data = reinterpret_cast<Dtype*>( gpu_workspace_tmpdata_.data()); const uint4 bottom_shape = make_uint4( bottom[0]->shape(3), // x = W bottom[0]->shape(2), // y = H bottom[0]->shape(1), // z = C bottom[0]->shape(0)); // w = N const int bottom_count = bottom[0]->count(); const int bottom_pixels = bottom_shape.x * bottom_shape.y * bottom_shape.w; const uint4 top_shape = make_uint4( top[0]->shape(3), // x = W top[0]->shape(2), // y = H top[0]->shape(1), // z = C top[0]->shape(0)); // w = N const int top_count = top[0]->count(); const int top_pixels = top_shape.x * top_shape.y * top_shape.w; // Get current stream int device; CUDA_CHECK(hipGetDevice(&device)); hipStream_t stream = GPUMemory::device_stream(device); // Make augmentation selections for each image vector<AugmentSelection> augmentations; for (int i = 0; i < bottom_shape.w; i++) { augmentations.push_back(get_augmentations( cv::Point(bottom_shape.x, bottom_shape.y))); } // Copy augmentation selections to GPU size_t aug_data_sz = sizeof(AugmentSelection) * augmentations.size(); caffe_gpu_memcpy(aug_data_sz, &augmentations[0], aug_data); // Color transformations // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( color_transformations), dim3(CAFFE_GET_BLOCKS(bottom_pixels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom_data, tmp_data, bottom_shape, aug_data); // Mean subtraction if (t_param_.has_mean_file()) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( pixel_mean_subtraction), dim3(CAFFE_GET_BLOCKS(bottom_pixels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, tmp_data, mean_blob_.gpu_data(), bottom_shape); } else if (t_param_.mean_value_size() != 0) { CHECK_EQ(bottom_shape.z, 3) << "Data must have 3 channels when " "using transform_param.mean_value."; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( channel_mean_subtraction), dim3(CAFFE_GET_BLOCKS(bottom_pixels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, tmp_data, bottom_shape, mean_values_[0] * UINT8_MAX, mean_values_[1] * UINT8_MAX, mean_values_[2] * UINT8_MAX); } // Spatial transformations // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( spatial_transformations), dim3(CAFFE_GET_BLOCKS(top_pixels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, tmp_data, bottom_shape, aug_data, top_data, top_shape); // Use CPU to transform labels const vector<vector<BboxLabel> > list_list_bboxes = blobToLabels(*bottom[1]); for (size_t i = 0; i < bottom[1]->num(); i++) { const vector<BboxLabel>& list_bboxes = list_list_bboxes[i]; Dtype* output_label = &top[1]->mutable_cpu_data()[ top[1]->offset(i, 0, 0, 0) ]; transform_label_cpu(list_bboxes, output_label, augmentations[i], cv::Size(bottom_shape.x, bottom_shape.y)); } } INSTANTIATE_LAYER_GPU_FUNCS(DetectNetTransformationLayer); } // namespace caffe #endif
08db76dc964acf6c946aa9e52f496ad38081c6e1.cu
#ifdef USE_OPENCV #include <math.h> #include <math_constants.h> #include <opencv2/core/core.hpp> #include <vector> #include "caffe/layers/detectnet_transform_layer.hpp" #include "caffe/util/detectnet_coverage.hpp" #include "caffe/util/gpu_memory.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { // Calculate the location in the image from the loop index __device__ void get_pixel_indices(const int loop_index, const uint4 shape, int* x, int* y, int* n ) { int idx = loop_index; *n = idx / (shape.y * shape.x); idx -= *n * shape.y * shape.x; *y = idx / shape.x; idx -= *y * shape.x; *x = idx; } // https://www.cs.rit.edu/~ncs/color/t_convert.html template <typename Dtype> __device__ void convert_rgb_to_hsv( Dtype r, Dtype g, Dtype b, Dtype* h, Dtype* s, Dtype* v ) { Dtype min_v = min(min(r, g), b); Dtype max_v = max(max(r, g), b); // NOLINT(build/include_what_you_use) Dtype delta = max_v - min_v; if (max_v == 0 || delta == 0) { *h = 0; *s = 0; *v = max_v; return; } if (r == max_v) { *h = (g - b) / delta; } else if (g == max_v) { *h = 2 + (b - r) / delta; } else { *h = 4 + (r - g) / delta; } *h *= 60; if (h < 0) { *h += 360; } *s = delta / max_v; *v = max_v; } // https://www.cs.rit.edu/~ncs/color/t_convert.html template <typename Dtype> __device__ void convert_hsv_to_rgb( Dtype h, Dtype s, Dtype v, Dtype* r, Dtype* g, Dtype* b ) { int i; Dtype f, p, q, t; if (s == 0) { *r = v; *g = v; *b = v; return; } h /= 60; // sector 0 to 5 i = floor(h); f = h - i; // factorial part of h p = v * (1 - s); q = v * (1 - s * f); t = v * (1 - s * (1 - f)); switch (i) { case 0: *r = v; *g = t; *b = p; break; case 1: *r = q; *g = v; *b = p; break; case 2: *r = p; *g = v; *b = t; break; case 3: *r = p; *g = q; *b = v; break; case 4: *r = t; *g = p; *b = v; break; default: // case 5: *r = v; *g = p; *b = q; break; } } template <typename Dtype> __global__ void color_transformations( const Dtype* src_data, Dtype* dst_data, const uint4 shape, const AugmentSelection* aug_data ) { CUDA_KERNEL_LOOP(loop_index, shape.x * shape.y * shape.w) { int x, y, n; get_pixel_indices(loop_index, shape, &x, &y, &n); // check what needs doing const AugmentSelection& as = aug_data[n]; const bool doHueRotation = (abs(as.hue_rotation) > FLT_EPSILON); const bool doDesaturation = (as.saturation < (1.0 - 1.0/UINT8_MAX)); // N*cs*hs*ws + H*ws + W int index = n * shape.z * shape.y * shape.x + y * shape.x + x; // hs*ws const int channel_stride = shape.y * shape.x; // read Dtype r = src_data[index + 0 * channel_stride]; Dtype g = src_data[index + 1 * channel_stride]; Dtype b = src_data[index + 2 * channel_stride]; if (doHueRotation || doDesaturation) { // transform Dtype h, s, v; convert_rgb_to_hsv(r, g, b, &h, &s, &v); if (doHueRotation) { h -= aug_data[n].hue_rotation; } if (doDesaturation) { s *= aug_data[n].saturation; } convert_hsv_to_rgb(h, s, v, &r, &g, &b); } // write dst_data[index + 0 * channel_stride] = r; dst_data[index + 1 * channel_stride] = g; dst_data[index + 2 * channel_stride] = b; } } // Mean is WxHxC // For each pixel in the current image, subtract the corresponding pixel // from the mean image template <typename Dtype> __global__ void pixel_mean_subtraction( Dtype* data, const Dtype* mean_data, const uint4 shape ) { CUDA_KERNEL_LOOP(loop_index, shape.x * shape.y * shape.w) { int x, y, n; get_pixel_indices(loop_index, shape, &x, &y, &n); for (int c = 0; c < shape.z; c++) { // N*cs*hs*ws + C*hs*ws + H*ws + W const int data_idx = (n * shape.z * shape.y * shape.x) + (c * shape.y * shape.x) + (y * shape.x) + x; // C*hs*ws + H*ws + W const int mean_idx = (c * shape.y * shape.x) + (y * shape.x) + x; data[data_idx] -= mean_data[mean_idx]; } } } // Mean is 1x1xC // For each pixel in the current image, subtract the mean pixel template <typename Dtype> __global__ void channel_mean_subtraction( Dtype* data, const uint4 shape, const Dtype mean_value1, const Dtype mean_value2, const Dtype mean_value3 ) { CUDA_KERNEL_LOOP(loop_index, shape.x * shape.y * shape.w) { int x, y, n; get_pixel_indices(loop_index, shape, &x, &y, &n); // N*cs*hs*ws + C*hs*ws + H*ws + W const int data_idx = (n * shape.z * shape.y * shape.x) +(y * shape.x) + x; // hs*ws const int channel_stride = shape.y * shape.x; data[data_idx + 0 * channel_stride] -= mean_value1; data[data_idx + 1 * channel_stride] -= mean_value2; data[data_idx + 2 * channel_stride] -= mean_value3; } } template <typename Dtype> __device__ void rotate_point( const Dtype ax, const Dtype ay, // original point const Dtype cx, const Dtype cy, // center point float angle, Dtype* bx, Dtype* by // destination point ) { const Dtype s = sin(angle); const Dtype c = cos(angle); // translate to origin const Dtype tx = ax - cx; const Dtype ty = ay - cy; *bx = (tx * c) - (ty * s) + cx; *by = (tx * s) + (ty * c) + cy; } template <typename Dtype> __device__ Dtype get_value( const Dtype* data, const uint4& shape, const unsigned int n, const unsigned int c, int y, int x ) { // Replicate border for 1 pixel if (x == -1) x = 0; if (x == shape.x) x = shape.x - 1; if (y == -1) y = 0; if (y == shape.y) y = shape.y - 1; if (x >= 0 && x < shape.x && y >= 0 && y < shape.y) { // N*cs*hs*ws + C*hs*ws + H*ws + W return data[(n * shape.z * shape.y * shape.x) + (c * shape.y * shape.x) + (y * shape.x) + x]; } else { return 0; } } template <typename Dtype> __device__ Dtype cubic_interpolation(const Dtype& d, const Dtype& v1, const Dtype& v2, const Dtype& v3, const Dtype& v4 ) { // d is [0,1], marking the distance from v2 towards v3 return v2 + d * ( -2.0 * v1 - 3.0 * v2 + 6.0 * v3 - 1.0 * v4 + d * ( 3.0 * v1 - 6.0 * v2 + 3.0 * v3 + 0.0 * v4 + d * ( -1.0 * v1 + 3.0 * v2 - 3.0 * v3 + 1.0 * v4))) / 6.0; } // Interpolate in 1D space template <typename Dtype> __device__ Dtype interpolate_x( const Dtype* data, const uint4& shape, const unsigned int n, const unsigned int c, const int y, const Dtype x ) { Dtype dx = x - floor(x); return cubic_interpolation(dx, get_value(data, shape, n, c, y, floor(x) - 1), get_value(data, shape, n, c, y, floor(x)), get_value(data, shape, n, c, y, ceil(x)), get_value(data, shape, n, c, y, ceil(x) + 1)); } // Interpolate in 2D space template <typename Dtype> __device__ Dtype interpolate_xy( const Dtype* data, const uint4& shape, const unsigned int n, const unsigned int c, const Dtype y, const Dtype x ) { Dtype dy = y - floor(y); return cubic_interpolation(dy, interpolate_x(data, shape, n, c, floor(y) - 1, x), interpolate_x(data, shape, n, c, floor(y), x), interpolate_x(data, shape, n, c, ceil(y), x), interpolate_x(data, shape, n, c, ceil(y) + 1, x)); } template <typename Dtype> __global__ void spatial_transformations( const Dtype* src_data, const uint4 src_shape, const AugmentSelection* aug_data, Dtype* dst_data, const uint4 dst_shape ) { CUDA_KERNEL_LOOP(loop_index, dst_shape.x * dst_shape.y * dst_shape.w) { int dst_x, dst_y, n; get_pixel_indices(loop_index, dst_shape, &dst_x, &dst_y, &n); const AugmentSelection& as = aug_data[n]; // calculate src pixel indices for this thread Dtype x = dst_x; Dtype y = dst_y; // crop x += as.crop_offset.x; y += as.crop_offset.y; // rotate if (abs(as.rotation) > FLT_EPSILON) { const Dtype w_before = as.scale.width - 1; const Dtype h_before = as.scale.height - 1; const float angle = as.rotation * CUDART_PI_F / 180.0f; const Dtype w_after = abs(w_before * cos(angle)) + abs(h_before * sin(angle)); const Dtype h_after = abs(w_before * sin(angle)) + abs(h_before * cos(angle)); rotate_point(x, y, w_after / 2.0f, h_after / 2.0f, -angle, &x, &y); x -= (w_after - w_before) / 2.0f; y -= (h_after - h_before) / 2.0f; } // scale if (src_shape.x != as.scale.width) { x *= Dtype(src_shape.x - 1) / (as.scale.width - 1); } if (src_shape.y != as.scale.height) { y *= Dtype(src_shape.y - 1) / (as.scale.height - 1); } // flip if (as.flip) { x = (src_shape.x - x - 1.0); } for (int c = 0; c < dst_shape.z; c++) { // N*cs*hs*ws + C*hs*ws + H*ws + W const int dst_idx = (n * dst_shape.z * dst_shape.y * dst_shape.x) + (c * dst_shape.y * dst_shape.x) + (dst_y * dst_shape.x) + dst_x; dst_data[dst_idx] = interpolate_xy(src_data, src_shape, n, c, y, x); } } } template <typename Dtype> void DetectNetTransformationLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); AugmentSelection* aug_data = reinterpret_cast<AugmentSelection*>( gpu_workspace_augmentations_.data()); Dtype* tmp_data = reinterpret_cast<Dtype*>( gpu_workspace_tmpdata_.data()); const uint4 bottom_shape = make_uint4( bottom[0]->shape(3), // x = W bottom[0]->shape(2), // y = H bottom[0]->shape(1), // z = C bottom[0]->shape(0)); // w = N const int bottom_count = bottom[0]->count(); const int bottom_pixels = bottom_shape.x * bottom_shape.y * bottom_shape.w; const uint4 top_shape = make_uint4( top[0]->shape(3), // x = W top[0]->shape(2), // y = H top[0]->shape(1), // z = C top[0]->shape(0)); // w = N const int top_count = top[0]->count(); const int top_pixels = top_shape.x * top_shape.y * top_shape.w; // Get current stream int device; CUDA_CHECK(cudaGetDevice(&device)); cudaStream_t stream = GPUMemory::device_stream(device); // Make augmentation selections for each image vector<AugmentSelection> augmentations; for (int i = 0; i < bottom_shape.w; i++) { augmentations.push_back(get_augmentations( cv::Point(bottom_shape.x, bottom_shape.y))); } // Copy augmentation selections to GPU size_t aug_data_sz = sizeof(AugmentSelection) * augmentations.size(); caffe_gpu_memcpy(aug_data_sz, &augmentations[0], aug_data); // Color transformations // NOLINT_NEXT_LINE(whitespace/operators) color_transformations<<<CAFFE_GET_BLOCKS(bottom_pixels), CAFFE_CUDA_NUM_THREADS>>>(bottom_data, tmp_data, bottom_shape, aug_data); // Mean subtraction if (t_param_.has_mean_file()) { // NOLINT_NEXT_LINE(whitespace/operators) pixel_mean_subtraction<<<CAFFE_GET_BLOCKS(bottom_pixels), CAFFE_CUDA_NUM_THREADS>>>(tmp_data, mean_blob_.gpu_data(), bottom_shape); } else if (t_param_.mean_value_size() != 0) { CHECK_EQ(bottom_shape.z, 3) << "Data must have 3 channels when " "using transform_param.mean_value."; // NOLINT_NEXT_LINE(whitespace/operators) channel_mean_subtraction<<<CAFFE_GET_BLOCKS(bottom_pixels), CAFFE_CUDA_NUM_THREADS>>>(tmp_data, bottom_shape, mean_values_[0] * UINT8_MAX, mean_values_[1] * UINT8_MAX, mean_values_[2] * UINT8_MAX); } // Spatial transformations // NOLINT_NEXT_LINE(whitespace/operators) spatial_transformations<<<CAFFE_GET_BLOCKS(top_pixels), CAFFE_CUDA_NUM_THREADS>>>(tmp_data, bottom_shape, aug_data, top_data, top_shape); // Use CPU to transform labels const vector<vector<BboxLabel> > list_list_bboxes = blobToLabels(*bottom[1]); for (size_t i = 0; i < bottom[1]->num(); i++) { const vector<BboxLabel>& list_bboxes = list_list_bboxes[i]; Dtype* output_label = &top[1]->mutable_cpu_data()[ top[1]->offset(i, 0, 0, 0) ]; transform_label_cpu(list_bboxes, output_label, augmentations[i], cv::Size(bottom_shape.x, bottom_shape.y)); } } INSTANTIATE_LAYER_GPU_FUNCS(DetectNetTransformationLayer); } // namespace caffe #endif
073d5f50c9be7f739977b906dc8e0f00636067eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "hd_block_size.h" #define TODEV(A,s) float *A##_d;hipMalloc((void**)&A##_d,((s))*sizeof(float));hipMemcpy(A##_d,A,(s)*sizeof(float),hipMemcpyHostToDevice); #define FROMDEV(A,s) hipMemcpy(A,A##_d,(s)*sizeof(float),hipMemcpyDeviceToHost); #define CLNUP(A) hipFree(A##_d) #define TODEV3(A) TODEV(A,d3) #define TODEV2(A) TODEV(A,d2) #define FROMDEV3(A) FROMDEV(A,d3) #define FROMDEV2(A) FROMDEV(A,d2) //extern __constant__ float rdx_[1]; //extern __constant__ float rdy_[1]; __global__ void horizontal_diffusion_gpu(const int ids, const int ide, const int jds, const int jde, const int kds, const int kde, const int ims, const int ime, const int jms, const int jme, const int kms, const int kme, const int its, const int ite, const int jts, const int jte, const int kts, const int kte, const int cf_specified, const int cf_nested, const int cf_open_xs, const int cf_open_xe, const int cf_open_ys, const int cf_open_ye, const int cf_periodic_x, const int cf_polar, const char name, const float *field, float *tendency, const float *mu, const float *msfux, const float *msfuy, const float *msfvx, const float *msfvx_inv, const float *msfvy, const float *msftx, const float *msfty, const float khdif, const float *xkmhd, const float rdx, const float rdy); extern "C" int gethostname(char * name, size_t len); extern "C" { /** * Gets some basic device information, * sets the device for the task, * and performs a simply alloc and transfer operation on GPU */ int horizontal_diffusion_gpu_init_(int *myproc, int *nproc, int *mydevice) { float x, *x_d; int i, dc; hipError_t cerr; char hostname[64]; hipDeviceProp_t dp; hipEvent_t tS, tE; float timer = 0.0f; hipEventCreate(&tS); hipEventCreate(&tE); // Get some GPU device info hipGetDeviceCount(&dc); if (dc > 4) { fprintf(stderr, "Warning: more than %d devices on node (%d)\n", 4, dc); dc = 4; } fprintf(stderr, "Number of devices on this node: %d\n", dc); i = (*mydevice); if (dc > 0) { if ((cerr = hipSetDevice(i))) { fprintf(stderr, "Non-zero cerr %d\n", cerr); } } gethostname(hostname, 64); fprintf(stderr, "Setting device %02d for task %03d on host %s\n", i, *myproc, hostname); if ((cerr = hipGetDeviceProperties(&dp, i))) { fprintf(stderr, "Device %02d: cerr = %d\n", i, cerr); } else { fprintf(stderr, "Device %02d: name %s\n", i, dp.name); fprintf(stderr, "Device %02d: mem %lu\n", i, dp.totalGlobalMem); fprintf(stderr, "Device %02d: smem %lu\n", i, dp.sharedMemPerBlock); fprintf(stderr, "Device %02d: nreg %d\n", i, dp.regsPerBlock); fprintf(stderr, "Device %02d: warp %d\n", i, dp.warpSize); fprintf(stderr, "Device %02d: pitch %lu\n", i, dp.memPitch); fprintf(stderr, "Device %02d: maxthrds %d\n", i, dp.maxThreadsPerBlock); fprintf(stderr, "Device %02d: maxtdim %d %d %d\n", i, (dp.maxThreadsDim)[0], (dp.maxThreadsDim)[1], (dp.maxThreadsDim)[2]); fprintf(stderr, "Device %02d: maxgdim %d %d %d\n", i, (dp.maxGridSize)[0], (dp.maxGridSize)[1], (dp.maxGridSize)[2]); fprintf(stderr, "Device %02d: clock %d\n", i, dp.clockRate); fprintf(stderr, "Device %02d: talign %lu\n", i, dp.textureAlignment); } hipEventRecord(tS, NULL); hipMalloc((void **) (&x_d), sizeof(float)); hipMemcpy(x_d, &x, sizeof(float), hipMemcpyHostToDevice); hipFree(x_d); hipEventRecord(tE, NULL); hipEventSynchronize(tE); hipEventElapsedTime(&timer, tS, tE); fprintf(stderr, "horizontal_diffusion_gpu_init: %.3f\n", timer); return 0; } /** * Convert fortran index to c index */ int indexI(int fi) { return fi + 4; } int indexJ(int fj) { return fj + 4; } int indexK(int fk) { return fk - 1; } // Dimensiones de las variables int IMS = -4; int IME = 430; int JMS = -4; int JME = 305; int KMS = 1; int KME = 35; int IX = 435; int JX = 310; int KX = 35; /** * Print variable to console */ void printVariable(const char name[], float *var, int ims, int ime, int kms, int kme, int jms, int jme) { printf("%s:\n", name); for (int k = indexK(kms); k <= indexK(kme); k++) { for (int j = indexJ(jms); j <= indexJ(jme); j++) { for (int i = indexI(ims); i <= indexI(ime); i++) { printf("%7.2f\t", var[i + k * IX + j * IX * KX]); } printf("\n"); } printf("*\n"); } } //[435][35][310] int horizontal_diffusion_host_(int *ids, int *ide, int *jds, int *jde, int *kds, int *kde, int *ims, int *ime, int *jms, int *jme, int *kms, int *kme, int *its, int *ite, int *jts, int *jte, int *kts, int *kte, int *cf_specified, int *cf_nested, int *cf_open_xs, int *cf_open_xe, int *cf_open_ys, int *cf_open_ye, int *cf_periodic_x, int *cf_polar, char *name, float *field, float *tendency, float *mu, float *msfux, float *msfuy, float *msfvx, float *msfvx_inv, float *msfvy, float *msftx, float *msfty, float *khdif, float *xkmhd, float *rdx, float *rdy) { // Dimensions int d3 = (*ime - *ims + 1) * (*jme - *jms + 1) * (*kme - *kms + 1); int d2 = (*ime - *ims + 1) * (*jme - *jms + 1); // Timing data hipEvent_t tS0, tE0, tS1, tE1; hipEventCreate(&tS0); hipEventCreate(&tS1); hipEventCreate(&tE0); hipEventCreate(&tE1); float timer = 0.0f; printf("Dimensions:\n"); printf("ids,ide,jds,jde,kds,kde: %d,%d,%d,%d,%d,%d\n", *ids, *ide, *jds, *jde, *kds, *kde); printf("ims,ime,jms,jme,kms,kme: %d,%d,%d,%d,%d,%d\n", *ims, *ime, *jms, *jme, *kms, *kme); printf("its,ite,jts,jte,kts,kte: %d,%d,%d,%d,%d,%d\n", *its, *ite, *jts, *jte, *kts, *kte); /*printf("Input variables: \n"); printf("Boolean: %d %d %d %d %d %d %d %d\n", *cf_specified, *cf_nested, *cf_open_xs, *cf_open_xe, *cf_open_ys, *cf_open_ye, *cf_periodic_x, *cf_polar); printf("String: %c\n", *name); printf("Float: %f %f %f\n", *khdif, *rdx, *rdy);*/ // Starting transference of data to device memory hipEventRecord(tS0, NULL); TODEV3(field); TODEV3(tendency); TODEV3(xkmhd); TODEV2(mu); TODEV2(msfux); TODEV2(msfuy); TODEV2(msfvx); TODEV2(msfvx_inv); TODEV2(msfvy); TODEV2(msftx); TODEV2(msfty); // Main variable - before //printVariable("Tendency (input)", tendency, 100, 120, 1, 1, 200, 200); int remx, remy; remx = (*ime - *ims + 1) % XXX != 0 ? 1 : 0; remy = (*jme - *jms + 1) % YYY != 0 ? 1 : 0; dim3 dimBlock(XXX, YYY, 1); dim3 dimGrid(((*ime - *ims + 1) / XXX) + remx, ((*jme - *jms + 1) / YYY) + remy, MKX); printf("Call to kernel: block dims %d %d %d\n", dimBlock.x, dimBlock.y, dimBlock.z); printf("Call to kernel: grid dims %d %d %d\n", dimGrid.x, dimGrid.y, dimGrid.z); printf("Calling kernel \n"); hipEventRecord(tS1, NULL); // Changing cache configuration hipFuncSetCacheConfig(horizontal_diffusion_gpu, hipFuncCachePreferL1); // Set constant memory //hipMemcpyToSymbol(&rdx_, &rdx, sizeof(float)); //hipMemcpyToSymbol(&rdy_, &rdy, sizeof(float)); //for( int i=0; i<10; i++){ hipError_t err = hipSuccess; hipLaunchKernelGGL(( horizontal_diffusion_gpu), dim3(dimGrid), dim3(dimBlock), 0, 0, *ids, *ide, *jds, *jde, *kds, *kde, *ims, *ime, *jms, *jme, *kms, *kme, *its, *ite, *jts, *jte, *kts, *kte, *cf_specified, *cf_nested, *cf_open_xs, *cf_open_xe, *cf_open_ys, *cf_open_ye, *cf_periodic_x, *cf_polar, *name, field_d, tendency_d, mu_d, msfux_d, msfuy_d, msfvx_d, msfvx_inv_d, msfvy_d, msftx_d, msfty_d, *khdif, xkmhd_d, *rdx, *rdy); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch kernel (error code %s)!\n", hipGetErrorString(err)); exit (EXIT_FAILURE); } hipDeviceSynchronize(); //} hipEventRecord(tE1, NULL); hipEventSynchronize(tE1); float timerRun; hipEventElapsedTime(&timerRun, tS1, tE1); fprintf(stderr, "Call to kernel (not including data xfer): %.3f msec\n", timerRun); //fprintf(stderr, "Call to kernel (not including data xfer): %.3f msec\n", timerRun/10); // Starting transference of output data from device FROMDEV3(tendency); hipEventRecord(tE0, NULL); hipEventSynchronize(tE0); hipEventElapsedTime(&timer, tS0, tE0); printf("Call to kernel (including data xfer): %.3f msec\n", timer); //printf("Call to kernel (including data xfer): %.3f msec\n", timer - timerRun + (timerRun/10)); //printVariable("(hd.cu): Tendency (output)", tendency, 100, 120, 1, 1, 200, 200); CLNUP(field); CLNUP(tendency); CLNUP(xkmhd); CLNUP(mu); CLNUP(msfux); CLNUP(msfuy); CLNUP(msfvx); CLNUP(msfvx_inv); CLNUP(msfvy); CLNUP(msftx); CLNUP(msfty); return 0; } /* int get_horizontal_diffusion_gpu_levels(int *retval) { (*retval) = MKX; } */ }
073d5f50c9be7f739977b906dc8e0f00636067eb.cu
#include <stdio.h> #include "hd_block_size.h" #define TODEV(A,s) float *A##_d;cudaMalloc((void**)&A##_d,((s))*sizeof(float));cudaMemcpy(A##_d,A,(s)*sizeof(float),cudaMemcpyHostToDevice); #define FROMDEV(A,s) cudaMemcpy(A,A##_d,(s)*sizeof(float),cudaMemcpyDeviceToHost); #define CLNUP(A) cudaFree(A##_d) #define TODEV3(A) TODEV(A,d3) #define TODEV2(A) TODEV(A,d2) #define FROMDEV3(A) FROMDEV(A,d3) #define FROMDEV2(A) FROMDEV(A,d2) //extern __constant__ float rdx_[1]; //extern __constant__ float rdy_[1]; __global__ void horizontal_diffusion_gpu(const int ids, const int ide, const int jds, const int jde, const int kds, const int kde, const int ims, const int ime, const int jms, const int jme, const int kms, const int kme, const int its, const int ite, const int jts, const int jte, const int kts, const int kte, const int cf_specified, const int cf_nested, const int cf_open_xs, const int cf_open_xe, const int cf_open_ys, const int cf_open_ye, const int cf_periodic_x, const int cf_polar, const char name, const float *field, float *tendency, const float *mu, const float *msfux, const float *msfuy, const float *msfvx, const float *msfvx_inv, const float *msfvy, const float *msftx, const float *msfty, const float khdif, const float *xkmhd, const float rdx, const float rdy); extern "C" int gethostname(char * name, size_t len); extern "C" { /** * Gets some basic device information, * sets the device for the task, * and performs a simply alloc and transfer operation on GPU */ int horizontal_diffusion_gpu_init_(int *myproc, int *nproc, int *mydevice) { float x, *x_d; int i, dc; cudaError_t cerr; char hostname[64]; cudaDeviceProp dp; cudaEvent_t tS, tE; float timer = 0.0f; cudaEventCreate(&tS); cudaEventCreate(&tE); // Get some GPU device info cudaGetDeviceCount(&dc); if (dc > 4) { fprintf(stderr, "Warning: more than %d devices on node (%d)\n", 4, dc); dc = 4; } fprintf(stderr, "Number of devices on this node: %d\n", dc); i = (*mydevice); if (dc > 0) { if ((cerr = cudaSetDevice(i))) { fprintf(stderr, "Non-zero cerr %d\n", cerr); } } gethostname(hostname, 64); fprintf(stderr, "Setting device %02d for task %03d on host %s\n", i, *myproc, hostname); if ((cerr = cudaGetDeviceProperties(&dp, i))) { fprintf(stderr, "Device %02d: cerr = %d\n", i, cerr); } else { fprintf(stderr, "Device %02d: name %s\n", i, dp.name); fprintf(stderr, "Device %02d: mem %lu\n", i, dp.totalGlobalMem); fprintf(stderr, "Device %02d: smem %lu\n", i, dp.sharedMemPerBlock); fprintf(stderr, "Device %02d: nreg %d\n", i, dp.regsPerBlock); fprintf(stderr, "Device %02d: warp %d\n", i, dp.warpSize); fprintf(stderr, "Device %02d: pitch %lu\n", i, dp.memPitch); fprintf(stderr, "Device %02d: maxthrds %d\n", i, dp.maxThreadsPerBlock); fprintf(stderr, "Device %02d: maxtdim %d %d %d\n", i, (dp.maxThreadsDim)[0], (dp.maxThreadsDim)[1], (dp.maxThreadsDim)[2]); fprintf(stderr, "Device %02d: maxgdim %d %d %d\n", i, (dp.maxGridSize)[0], (dp.maxGridSize)[1], (dp.maxGridSize)[2]); fprintf(stderr, "Device %02d: clock %d\n", i, dp.clockRate); fprintf(stderr, "Device %02d: talign %lu\n", i, dp.textureAlignment); } cudaEventRecord(tS, NULL); cudaMalloc((void **) (&x_d), sizeof(float)); cudaMemcpy(x_d, &x, sizeof(float), cudaMemcpyHostToDevice); cudaFree(x_d); cudaEventRecord(tE, NULL); cudaEventSynchronize(tE); cudaEventElapsedTime(&timer, tS, tE); fprintf(stderr, "horizontal_diffusion_gpu_init: %.3f\n", timer); return 0; } /** * Convert fortran index to c index */ int indexI(int fi) { return fi + 4; } int indexJ(int fj) { return fj + 4; } int indexK(int fk) { return fk - 1; } // Dimensiones de las variables int IMS = -4; int IME = 430; int JMS = -4; int JME = 305; int KMS = 1; int KME = 35; int IX = 435; int JX = 310; int KX = 35; /** * Print variable to console */ void printVariable(const char name[], float *var, int ims, int ime, int kms, int kme, int jms, int jme) { printf("%s:\n", name); for (int k = indexK(kms); k <= indexK(kme); k++) { for (int j = indexJ(jms); j <= indexJ(jme); j++) { for (int i = indexI(ims); i <= indexI(ime); i++) { printf("%7.2f\t", var[i + k * IX + j * IX * KX]); } printf("\n"); } printf("*\n"); } } //[435][35][310] int horizontal_diffusion_host_(int *ids, int *ide, int *jds, int *jde, int *kds, int *kde, int *ims, int *ime, int *jms, int *jme, int *kms, int *kme, int *its, int *ite, int *jts, int *jte, int *kts, int *kte, int *cf_specified, int *cf_nested, int *cf_open_xs, int *cf_open_xe, int *cf_open_ys, int *cf_open_ye, int *cf_periodic_x, int *cf_polar, char *name, float *field, float *tendency, float *mu, float *msfux, float *msfuy, float *msfvx, float *msfvx_inv, float *msfvy, float *msftx, float *msfty, float *khdif, float *xkmhd, float *rdx, float *rdy) { // Dimensions int d3 = (*ime - *ims + 1) * (*jme - *jms + 1) * (*kme - *kms + 1); int d2 = (*ime - *ims + 1) * (*jme - *jms + 1); // Timing data cudaEvent_t tS0, tE0, tS1, tE1; cudaEventCreate(&tS0); cudaEventCreate(&tS1); cudaEventCreate(&tE0); cudaEventCreate(&tE1); float timer = 0.0f; printf("Dimensions:\n"); printf("ids,ide,jds,jde,kds,kde: %d,%d,%d,%d,%d,%d\n", *ids, *ide, *jds, *jde, *kds, *kde); printf("ims,ime,jms,jme,kms,kme: %d,%d,%d,%d,%d,%d\n", *ims, *ime, *jms, *jme, *kms, *kme); printf("its,ite,jts,jte,kts,kte: %d,%d,%d,%d,%d,%d\n", *its, *ite, *jts, *jte, *kts, *kte); /*printf("Input variables: \n"); printf("Boolean: %d %d %d %d %d %d %d %d\n", *cf_specified, *cf_nested, *cf_open_xs, *cf_open_xe, *cf_open_ys, *cf_open_ye, *cf_periodic_x, *cf_polar); printf("String: %c\n", *name); printf("Float: %f %f %f\n", *khdif, *rdx, *rdy);*/ // Starting transference of data to device memory cudaEventRecord(tS0, NULL); TODEV3(field); TODEV3(tendency); TODEV3(xkmhd); TODEV2(mu); TODEV2(msfux); TODEV2(msfuy); TODEV2(msfvx); TODEV2(msfvx_inv); TODEV2(msfvy); TODEV2(msftx); TODEV2(msfty); // Main variable - before //printVariable("Tendency (input)", tendency, 100, 120, 1, 1, 200, 200); int remx, remy; remx = (*ime - *ims + 1) % XXX != 0 ? 1 : 0; remy = (*jme - *jms + 1) % YYY != 0 ? 1 : 0; dim3 dimBlock(XXX, YYY, 1); dim3 dimGrid(((*ime - *ims + 1) / XXX) + remx, ((*jme - *jms + 1) / YYY) + remy, MKX); printf("Call to kernel: block dims %d %d %d\n", dimBlock.x, dimBlock.y, dimBlock.z); printf("Call to kernel: grid dims %d %d %d\n", dimGrid.x, dimGrid.y, dimGrid.z); printf("Calling kernel \n"); cudaEventRecord(tS1, NULL); // Changing cache configuration cudaFuncSetCacheConfig(horizontal_diffusion_gpu, cudaFuncCachePreferL1); // Set constant memory //cudaMemcpyToSymbol(&rdx_, &rdx, sizeof(float)); //cudaMemcpyToSymbol(&rdy_, &rdy, sizeof(float)); //for( int i=0; i<10; i++){ cudaError_t err = cudaSuccess; horizontal_diffusion_gpu<<<dimGrid, dimBlock>>>(*ids, *ide, *jds, *jde, *kds, *kde, *ims, *ime, *jms, *jme, *kms, *kme, *its, *ite, *jts, *jte, *kts, *kte, *cf_specified, *cf_nested, *cf_open_xs, *cf_open_xe, *cf_open_ys, *cf_open_ye, *cf_periodic_x, *cf_polar, *name, field_d, tendency_d, mu_d, msfux_d, msfuy_d, msfvx_d, msfvx_inv_d, msfvy_d, msftx_d, msfty_d, *khdif, xkmhd_d, *rdx, *rdy); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch kernel (error code %s)!\n", cudaGetErrorString(err)); exit (EXIT_FAILURE); } cudaThreadSynchronize(); //} cudaEventRecord(tE1, NULL); cudaEventSynchronize(tE1); float timerRun; cudaEventElapsedTime(&timerRun, tS1, tE1); fprintf(stderr, "Call to kernel (not including data xfer): %.3f msec\n", timerRun); //fprintf(stderr, "Call to kernel (not including data xfer): %.3f msec\n", timerRun/10); // Starting transference of output data from device FROMDEV3(tendency); cudaEventRecord(tE0, NULL); cudaEventSynchronize(tE0); cudaEventElapsedTime(&timer, tS0, tE0); printf("Call to kernel (including data xfer): %.3f msec\n", timer); //printf("Call to kernel (including data xfer): %.3f msec\n", timer - timerRun + (timerRun/10)); //printVariable("(hd.cu): Tendency (output)", tendency, 100, 120, 1, 1, 200, 200); CLNUP(field); CLNUP(tendency); CLNUP(xkmhd); CLNUP(mu); CLNUP(msfux); CLNUP(msfuy); CLNUP(msfvx); CLNUP(msfvx_inv); CLNUP(msfvy); CLNUP(msftx); CLNUP(msfty); return 0; } /* int get_horizontal_diffusion_gpu_levels(int *retval) { (*retval) = MKX; } */ }
fe8c7908b9cd7eef487998e29491f7cb4788086a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2022 The Microsoft DeepSpeed Team */ #include "dequantization_utils.h" #include "memory_access_utils.h" namespace cg = cooperative_groups; template <typename T, int numBits, dequantize::Type qType, int unroll, int threads> __global__ void dequantize_kernel(T* __restrict__ dequant_data, const int8_t* __restrict__ q_data, const float* __restrict__ q_params, int elems_per_group, int total_elems) { dequantize::to_global<T, numBits, qType, unroll, threads>( dequant_data, q_data, q_params, elems_per_group, total_elems); } #define LAUNCH_DEQUANT_KERNEL(num_bits, q_type) \ hipLaunchKernelGGL(( dequantize_kernel<T, num_bits, q_type, unroll, threads>), dim3(grid), dim3(block), 0, stream, \ dequant_data, q_data, q_params, elems_per_group, total_elems); template <typename T> void launch_dequantize_kernel(T* dequant_data, const int8_t* q_data, const float* q_params, quantize::Type q_type, int num_bits, int elems_per_group, int total_elems, hipStream_t stream) { constexpr int unroll = 8; constexpr int threads = 512; constexpr int elems_per_block = unroll * threads * dequantize::granularity / (sizeof(T)); const dim3 block(threads); const dim3 grid((total_elems + elems_per_block - 1) / elems_per_block); // TODO(cmikeh2): It may make sense to tune unroll, there is perf benefit for large // problem sizes with this large unroll value. if (num_bits == 8 && q_type == quantize::Type::Symmetric) { LAUNCH_DEQUANT_KERNEL(8, quantize::Type::Symmetric); } else if (num_bits == 8 && q_type == quantize::Type::Asymmetric) { LAUNCH_DEQUANT_KERNEL(8, quantize::Type::Asymmetric); } else if (num_bits == 4 && q_type == quantize::Type::Symmetric) { LAUNCH_DEQUANT_KERNEL(4, quantize::Type::Symmetric); } else if (num_bits == 4 && q_type == quantize::Type::Asymmetric) { LAUNCH_DEQUANT_KERNEL(4, quantize::Type::Asymmetric); } } template void launch_dequantize_kernel(__half* dequant_data, const int8_t* q_data, const float* q_params, quantize::Type q_type, int num_bits, int elems_per_group, int total_elems, hipStream_t stream); template void launch_dequantize_kernel(float* dequant_data, const int8_t* q_data, const float* q_params, quantize::Type q_type, int num_bits, int elems_per_group, int total_elems, hipStream_t stream);
fe8c7908b9cd7eef487998e29491f7cb4788086a.cu
/* Copyright 2022 The Microsoft DeepSpeed Team */ #include "dequantization_utils.h" #include "memory_access_utils.h" namespace cg = cooperative_groups; template <typename T, int numBits, dequantize::Type qType, int unroll, int threads> __global__ void dequantize_kernel(T* __restrict__ dequant_data, const int8_t* __restrict__ q_data, const float* __restrict__ q_params, int elems_per_group, int total_elems) { dequantize::to_global<T, numBits, qType, unroll, threads>( dequant_data, q_data, q_params, elems_per_group, total_elems); } #define LAUNCH_DEQUANT_KERNEL(num_bits, q_type) \ dequantize_kernel<T, num_bits, q_type, unroll, threads><<<grid, block, 0, stream>>>( \ dequant_data, q_data, q_params, elems_per_group, total_elems); template <typename T> void launch_dequantize_kernel(T* dequant_data, const int8_t* q_data, const float* q_params, quantize::Type q_type, int num_bits, int elems_per_group, int total_elems, cudaStream_t stream) { constexpr int unroll = 8; constexpr int threads = 512; constexpr int elems_per_block = unroll * threads * dequantize::granularity / (sizeof(T)); const dim3 block(threads); const dim3 grid((total_elems + elems_per_block - 1) / elems_per_block); // TODO(cmikeh2): It may make sense to tune unroll, there is perf benefit for large // problem sizes with this large unroll value. if (num_bits == 8 && q_type == quantize::Type::Symmetric) { LAUNCH_DEQUANT_KERNEL(8, quantize::Type::Symmetric); } else if (num_bits == 8 && q_type == quantize::Type::Asymmetric) { LAUNCH_DEQUANT_KERNEL(8, quantize::Type::Asymmetric); } else if (num_bits == 4 && q_type == quantize::Type::Symmetric) { LAUNCH_DEQUANT_KERNEL(4, quantize::Type::Symmetric); } else if (num_bits == 4 && q_type == quantize::Type::Asymmetric) { LAUNCH_DEQUANT_KERNEL(4, quantize::Type::Asymmetric); } } template void launch_dequantize_kernel(__half* dequant_data, const int8_t* q_data, const float* q_params, quantize::Type q_type, int num_bits, int elems_per_group, int total_elems, cudaStream_t stream); template void launch_dequantize_kernel(float* dequant_data, const int8_t* q_data, const float* q_params, quantize::Type q_type, int num_bits, int elems_per_group, int total_elems, cudaStream_t stream);
c258f058e917497c2f0493b9bf70047bac7b649d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "testMultiGPU_Jacobi2D_Decom.cuh" #include <iostream> #include <chrono> #include <memory> #include <vector> #include <fstream> using namespace std; using namespace std::chrono; #define IMUL(a,b) __mul24(a,b) //hipError_t performMultiGPUJacobi(); //Support for below c++14 on *nix template<typename T, typename ...Args> std::unique_ptr<T> make_unique(Args&& ...args) { return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); } struct create_Device { int deviceID; //In a GPU topology set the GPU position int devicePosition_X; int devicePosition_Y; int devicePosition_Z; vector<float> eHalo; vector<float> wHalo; vector<float> nHalo; vector<float> sHalo; //Flags check the halos needed by the device int eHalo_flag = 0; int wHalo_flag = 0; int nHalo_flag = 0; int sHalo_flag = 0; }; //Simple Jacobi iteration __global__ void jacobi_Simple(const float *A0, const float *A1, const float *A2, const float *A3, const float *A4, float *x_in, float *x_out, const float *rhs, const int ehalo_flag, const int whalo_flag, const int nhalo_flag, const int shalo_flag, float *ehalo, float *whalo, float *nhalo, float *shalo, const int deviceID, const int numDevices, const int domain_Decom) { int index = threadIdx.x + blockDim.x * blockIdx.x; float result = rhs[index]; int dim_x = blockDim.x;// dim across x int dim_y = gridDim.x; int x_pos = blockIdx.x; int y_pos = threadIdx.x; //result = nhalo[y_pos]; //x_out[index] = result; //Get the boundaries int leftBoundaryElem = x_pos * (dim_x); int rightBoundaryElem = (x_pos * dim_x) + (dim_x - 1); int topBoundaryElem = y_pos + ((dim_y - 1) * (dim_x)); int bottomBoundaryElem = y_pos; //Halo computation for 1D Decompostion: For the First and Last GPU Halo computation on both the sides(nhalo and shalo wont be needed) if (domain_Decom == 1) { if (numDevices > 1) { //First GPU if (deviceID == 0) { //We need to use nhalos //Carry out computations for boundary elements if (index != leftBoundaryElem) //Left result -= A1[index] * x_in[index - 1]; if (index != rightBoundaryElem) //Right result -= A3[index] * x_in[index + 1]; if (index != bottomBoundaryElem) //Bottom result -= A0[index] * x_in[index - dim_x]; if (index != topBoundaryElem) //Top result -= A4[index] * x_in[index + dim_x]; //The top boundary needs element from nhalo if (index == topBoundaryElem) //nHalos result -= A4[index] * nhalo[y_pos]; result /= A2[index]; x_out[index] = result; //Update Halo at the end of computation if (index == topBoundaryElem) //nHalos updated nhalo[y_pos] = result; return; } //Last GPU else if (deviceID == (numDevices - 1)) { //We need to use shalos //Carry out computations for boundary elements if (index != leftBoundaryElem) //Left result -= A1[index] * x_in[index - 1]; if (index != rightBoundaryElem) //Right result -= A3[index] * x_in[index + 1]; if (index != bottomBoundaryElem) //Bottom result -= A0[index] * x_in[index - dim_x]; //The Bottom boundary needs elements from shalo if (index == bottomBoundaryElem) //nHalos result -= A0[index] * shalo[y_pos]; if (index != topBoundaryElem) //Top result -= A4[index] * x_in[index + dim_x]; result /= A2[index]; x_out[index] = result; //Update Halo at the end of computation if (index == bottomBoundaryElem) //sHalos updated shalo[y_pos] = result; return; } //For all the middle GPUs else { //We need to use both shalos and nhalos //Carry out computations for boundary elements if (index != leftBoundaryElem) //Left result -= A1[index] * x_in[index - 1]; if (index != rightBoundaryElem) //Right result -= A3[index] * x_in[index + 1]; if (index != bottomBoundaryElem) //Bottom result -= A0[index] * x_in[index - dim_x]; //The Bottom boundary needs elements from shalo if (index == bottomBoundaryElem) //nHalos result -= A0[index] * shalo[y_pos]; if (index != topBoundaryElem) //Top result -= A4[index] * x_in[index + dim_x]; //The top boundary needs element from nhalo if (index == topBoundaryElem) //nHalos result -= A4[index] * nhalo[y_pos]; result /= A2[index]; x_out[index] = result; //Update Halo at the end of computation if (index == bottomBoundaryElem) //sHalos updated shalo[y_pos] = result; //Update Halo at the end of computation if (index == topBoundaryElem) //nHalos updated nhalo[y_pos] = result; return; } } } else if (domain_Decom == 2) { //======Left Bounday Elem if (index != leftBoundaryElem) //Left result -= A1[index] * x_in[index - 1]; //Computation using the Halos if (index == leftBoundaryElem) { if (whalo_flag == 1) { result -= A1[index] * whalo[x_pos]; } } //======Right Bounday Elem if (index != rightBoundaryElem) //Right result -= A3[index] * x_in[index + 1]; if (index == rightBoundaryElem) { if (ehalo_flag == 1) { result -= A3[index] * ehalo[x_pos]; } } //======Bottom Bounday Elem if (index != bottomBoundaryElem) //Bottom result -= A0[index] * x_in[index - dim_x]; if (index == bottomBoundaryElem) { if (shalo_flag == 1) { result -= A0[index] * shalo[y_pos]; } } //======Top Bounday Elem if (index != topBoundaryElem) //Top result -= A4[index] * x_in[index + dim_x]; if (index == topBoundaryElem) { if (nhalo_flag == 1) { result -= A4[index] * nhalo[y_pos]; } } result /= A2[index]; x_out[index] = result; //Updating Halos at the End of the computation if (index == topBoundaryElem) { if (nhalo_flag == 1) { nhalo[y_pos] = result; } } if (index == bottomBoundaryElem) { if (shalo_flag == 1) { shalo[y_pos] = result; } } if (index == leftBoundaryElem) { if (whalo_flag == 1) { whalo[x_pos] = result; } } if (index == rightBoundaryElem) { if (ehalo_flag == 1) { ehalo[x_pos] = result; } } return; } //For computations on a Machine with a single GPU else { {//For some reason order of computation (left,right,top and bottom) gives a different result //Carry out computations for boundary elements if (index != leftBoundaryElem) //Left result -= A1[index] * x_in[index - 1]; if (index != rightBoundaryElem) //Right result -= A3[index] * x_in[index + 1]; if (index != bottomBoundaryElem) //Bottom result -= A0[index] * x_in[index - dim_x]; if (index != topBoundaryElem) //Top result -= A4[index] * x_in[index + dim_x]; result /= A2[index]; x_out[index] = result; return; } } } //========================MultiGPU utility functions============================================================================ void checkP2Paccess(int numGPUs) { for (int i = 0; i<numGPUs; i++) { hipSetDevice(i); for (int j = 0; j<numGPUs; j++) { int access; if (i != j) { hipDeviceCanAccessPeer(&access, i, j); if (auto err = hipGetLastError()) { cout << "P2P Operations failed : " << hipGetErrorString(err) << endl; return; } } } } cout << "\n***NOTE: In case a device doesn't have P2P access to other one, it falls back to normal memcopy procedure.\nSo you can see lesser Bandwidth (GB/s) in those cases.\n\n"; } bool enableP2P(int numGPUs) { for (int i = 0; i<numGPUs; i++) { hipSetDevice(i); for (int j = 0; j<numGPUs; j++) { int access; hipDeviceCanAccessPeer(&access, i, j); if (auto err = hipGetLastError()) { cout << "P2P Operations failed while enabling: " << hipGetErrorString(err) << endl; return false; } if (access) { hipDeviceEnablePeerAccess(j, 0); if (auto err = hipGetLastError()) { cout << "P2P Operations failed while enabling: " << hipGetErrorString(err) << endl; return false; } } } } return true; } void disableP2P(int numGPUs) { for (int i = 0; i<numGPUs; i++) { hipSetDevice(i); for (int j = 0; j<numGPUs; j++) { int access; hipDeviceCanAccessPeer(&access, i, j); if (auto err = hipGetLastError()) { cout << "P2P Operations failed while disabling : " << hipGetErrorString(err) << endl; return; } if (access) { hipDeviceDisablePeerAccess(j); if (auto err = hipGetLastError()) { cout << "P2P Operations failed while disabling: " << hipGetErrorString(err) << endl; return; } } } } } //=============================================================================================================================== //====================================Creating Topology with the number of Devices available==================================== void generateGPUGRID(int numDevices, int &numberOfDevicesAlong_X, int &numberOfDevicesAlong_Y) { //Finding GPU topology along x and y //Assumuing total number of devices is a perfect square(To be changed later) numberOfDevicesAlong_X = (int)sqrt(numDevices); numberOfDevicesAlong_Y = (int)numberOfDevicesAlong_X; } /* Creates a topology for a number of devices in a system for ex. The devices are aware of left, right, top and bottom neigbours in 2D 1. It also decides the chunk per devices by determining x-dimension and y-dimensions for per chunk of data per device. 2. It also initializes halos for each devices which can be exchanged with the neighbours */ void createTopology(int numDevices, vector<create_Device> &deviceArray, int numberOfDevicesAlong_X, int numberOfDevicesAlong_Y) { deviceArray.resize(numDevices); unsigned int deviceCount = 0; for (int gridCount_X = 0; gridCount_X < numberOfDevicesAlong_X; gridCount_X++) { for (int gridCount_Y = 0; gridCount_Y < numberOfDevicesAlong_Y; gridCount_Y++) { deviceArray[deviceCount].deviceID = deviceCount; deviceArray[deviceCount].devicePosition_X = gridCount_X; deviceArray[deviceCount].devicePosition_Y = gridCount_Y; //devicePosition_Z to be changed later deviceArray[deviceCount].devicePosition_Z = 1; deviceCount++; } } } //============================================================================================================================== //Init Halos: In 1D decomposition only North and South Halos are used. In 2D decomposition North, South, East and West Halo need to be initialized and computed //TODO:Create a Halo Exchange Mechanism for 2D Multi GPU topology void initHalos2D(create_Device &device, int chunk_X, int chunk_Y, float *vec_in, int maxdevicesAlong_X, int maxDevicesAlong_Y, int rowStartPos, int rowEndPos, int dim) { /*cout << endl << "Inside Halo Computation 2D. printing Details"; cout << endl << "Device ID " << device.deviceID; cout << endl << "Device position X " << device.devicePosition_X; cout << endl << "Device position Y " << device.devicePosition_Y; cout << endl << "Row Start " << rowStartPos; cout << endl << "Row End " << rowEndPos;*/ //Assigning counter for each individual Halos. To prevent update of the same counter //int rowStartPosEast = rowStartPos; int rowStartPosWest = rowStartPos; int rowStartPosNorth = rowStartPos; int rowStartPosSouth = rowStartPos; int rowEndPosEast = rowEndPos; //int rowEndPosWest = rowEndPos; //int rowEndPosNorth = rowEndPos; //int rowEndPosSouth = rowEndPos; //Checks provided for Boundary devices in GPU topology if ((device.devicePosition_Y - 1) >= 0) { //cout << "West Halo needed "; device.wHalo_flag = 1; device.wHalo.resize(chunk_Y); for (int rowNum = 0; rowNum < chunk_Y; rowNum++) { device.wHalo[rowNum] = vec_in[rowStartPosWest]; //cout << rowStartPosWest << " "; rowStartPosWest += dim; } } if ((device.devicePosition_Y + 1) < maxdevicesAlong_X) { //cout << "East Halo needed "; device.eHalo_flag = 1; device.eHalo.resize(chunk_Y); for (int rowNum = 0; rowNum < chunk_Y; rowNum++) { device.eHalo[rowNum] = vec_in[rowEndPosEast]; //cout << rowEndPosEast << " "; rowEndPosEast += dim; } } if ((device.devicePosition_X - 1) >= 0) { //cout << "South Halo needed "; device.sHalo_flag = 1; device.sHalo.resize(chunk_X); for (int rowNum = 0; rowNum < chunk_X; rowNum++) { device.sHalo[rowNum] = vec_in[rowStartPosSouth]; //cout << rowStartPosSouth << " "; rowStartPosSouth++; } } if ((device.devicePosition_X + 1) < maxDevicesAlong_Y) { //cout << "North Halo needed "; device.nHalo_flag = 1; device.nHalo.resize(chunk_X); rowStartPosNorth = rowStartPosNorth + (dim * (chunk_Y - 1)); for (int rowNum = 0; rowNum < chunk_X; rowNum++) { device.nHalo[rowNum] = vec_in[rowStartPosNorth]; //cout << rowStartPosNorth << " "; rowStartPosNorth++; } } } //======================================Exchange Halos: on Host============================================== int getDeviceIDfromCoord(int devCoord_x, int devCoord_y, int numberofDevicesAlong_X) { int devID = (devCoord_x * numberofDevicesAlong_X) + devCoord_y; return devID; } void exchangehalos_onHost(int numDevices, vector<create_Device> &deviceArray, int numberofDevicesAlong_X) { //Halos exist in pairs so: //Important: A device exchanges North-to-South Pairs and East-to-West Pairs only. Not South-to-North pairs and West-to-East pairs //That way the number of exchanges are kept to minimum for (int dev = 0;dev < numDevices;dev++) { int getDevCoord_X = deviceArray[dev].devicePosition_X; int getDevCoord_Y = deviceArray[dev].devicePosition_Y; //Check if device is having a north Halo buffer if (deviceArray[dev].nHalo_flag == 1) { int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X + 1, getDevCoord_Y, numberofDevicesAlong_X); //Exchange Halos (deviceArray[dev].nHalo).swap(deviceArray[devIDtoNorth].sHalo); } //Check if device is having a east Halo buffer if (deviceArray[dev].eHalo_flag == 1) { int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y + 1, numberofDevicesAlong_X); //Exchange Halos (deviceArray[dev].eHalo).swap(deviceArray[devIDtoEast].wHalo); } } } bool exchangehalos_onHostPinned(int numDevices, vector<create_Device> &deviceArray, int numberofDevicesAlong_X, vector<float*> &nHalosPinned, vector<float*> &sHalosPinned, vector<float*> &eHalosPinned, vector<float*> &wHalosPinned) { //Halos exist in pairs so: //Important: A device exchanges North-to-South Pairs and East-to-West Pairs only. Not South-to-North pairs and West-to-East pairs //That way the number of exchanges are kept to minimum for (int dev = 0;dev < numDevices;dev++) { int getDevCoord_X = deviceArray[dev].devicePosition_X; int getDevCoord_Y = deviceArray[dev].devicePosition_Y; //Check if device is having a north Halo buffer if (deviceArray[dev].nHalo_flag == 1) { int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X + 1, getDevCoord_Y, numberofDevicesAlong_X); //Exchange Halos //(deviceArray[dev].nHalo).swap(deviceArray[devIDtoNorth].sHalo); swap(nHalosPinned[dev], sHalosPinned[devIDtoNorth]); } //Check if device is having a east Halo buffer if (deviceArray[dev].eHalo_flag == 1) { int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y + 1, numberofDevicesAlong_X); //Exchange Halos //(deviceArray[dev].eHalo).swap(deviceArray[devIDtoEast].wHalo); swap(eHalosPinned[dev], wHalosPinned[devIDtoEast]); } } return true; } //===========================Exchange Halos: on Host Ends===================================================== //Init matrix Diagonals A0, A1, A2, A3, A4 void copyValues(float *A0, float *A1, float *A2, float *A3, float *A4, float *rhs, float *vec_in, float *vec_out, int dim, float *val_A0, float *val_A1, float *val_A2, float *val_A3, float *val_A4, float *val_rhs, float *val_x_in) { unsigned int size = dim * dim; for (unsigned int i = 0; i < size; i++) { A0[i] = val_A0[i]; A1[i] = val_A1[i]; A2[i] = val_A2[i]; A3[i] = val_A3[i]; A4[i] = val_A4[i]; rhs[i] = val_rhs[i]; vec_in[i] = val_x_in[i]; vec_out[i] = 0.0f; } } void getAllDeviceProperties() { int nDevices; hipGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); cout << " Device Number: " << i << endl; cout << " Device name: " << prop.name << endl; cout << " Memory Clock Rate (KHz): " << prop.memoryClockRate << endl; cout << " Memory Bus Width (bits): " << prop.memoryBusWidth << endl;; cout << " Peak Memory Bandwidth (GB/s): " << 2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6 << endl << endl << endl; } } /* Prints an output file for checking results */ void sendToPrint(float *partial_result, int devicePosition_X, int devicePosition_Y, int numberOfDevicesAlong_X, int chunk_X, int chunk_Y, int dim, int totalSize, vector<float> &result, int numDevices, int currentIteration, int numberOfTotalIterations) { int devicePosX = devicePosition_X; int devicePosY = devicePosition_Y; //Calculating data position based on device coords //numberOfDevicesAlong_X * Chunk_X * Chunk_Y : finds out the total data per row of GPUs allocated int dataStartPos_X = (devicePosX * numberOfDevicesAlong_X * chunk_X * chunk_Y) + (devicePosY * chunk_X); int dataEndPos_X = dataStartPos_X + chunk_X; //One complete row across all GPU is dim in order to get the next element above an element we add (currentPosition + dim ) int rowStartPos = dataStartPos_X; int rowEndPos = dataEndPos_X; int indexCounter = 0; //cout << endl; for (int rowNum = 0; rowNum < chunk_Y; rowNum++) { //Get one complete row for the GPU for (int pos = rowStartPos; pos < rowEndPos; pos++) { result[pos] = partial_result[indexCounter]; indexCounter++; } //cout << endl; rowStartPos += dim; rowEndPos += dim; } //Printing when the last device computation is done: Remove the check to check computation for each device int deviceID = getDeviceIDfromCoord(devicePosition_X, devicePosition_Y, numberOfDevicesAlong_X); if ((deviceID == (numDevices - 1)) && (currentIteration == (numberOfTotalIterations - 1))) { ofstream myfile; myfile.open("data2.txt"); //Printing the values here for (int i = totalSize;i > 0; i--) { if (i%dim == 0) { myfile << endl; } myfile << result[i - 1] << " "; } myfile.close(); } } hipError_t performMultiGPUJacobi(unsigned int val_dim, unsigned int numJacobiIt, float* val_A0, float* val_A1, float* val_A2, float* val_A3, float* val_A4, float* val_rhs, float* val_x_in) { //Fixed value changed later int dim = 8; if (val_dim != 0) { dim = val_dim; } //TODO: write a 2D domain decomposition method for more than 2 GPUs int size = dim * dim; //auto result = make_unique<float[]>(size); //Create Diagonal Vectors std::vector<float> a0(size); std::vector<float> a1(size); std::vector<float> a2(size); std::vector<float> a3(size); std::vector<float> a4(size); std::vector<float> vec_in(size); std::vector<float> vec_out(size); std::vector<float> rhs(size); std::vector<float> result(size); //Get the total number of devices int numDevices; hipGetDeviceCount(&numDevices); cout << endl << "Total number of Devices in the System are : " << numDevices << endl; getAllDeviceProperties(); //Enable Peer-to-Peer access across all GPUs : Done on phase 2 of development bool p2penabled = false; p2penabled = enableP2P(numDevices); //Configuring the number of GPU's manually //numDevices=2; copyValues(&a0[0], &a1[0], &a2[0], &a3[0], &a4[0], &rhs[0], &vec_in[0], &vec_out[0], dim, &val_A0[0], &val_A1[0], &val_A2[0], &val_A3[0], &val_A4[0], &val_rhs[0], &val_x_in[0]); vector<create_Device> deviceArray; /* Distributed Compuation using Halos: Algorithm 1. Init Halos. 1.a) In 1D decomposition nhalo and shalo intialized from vector x_in 1.b) In 2D decompsition nhalo,shalo, ehalo and whalo initialozed from vector x_in 2. Pass the halos to Jacobi_kernal. 3. Store the result computed at the boundary into the halo boundary positions. 4. Swap nhalo and shalo pairs in 1D decompostion. Swap (nhalo,shalo) and (ehalo,whalo) in 2D. */ //=================================Domain Decomposition Logic Starts================================================================= /*Generating a GPU Grid with multiple GPUs and creating a Topology*/ int numberOfDevicesAlong_X = 1; int numberOfDevicesAlong_Y = 1; generateGPUGRID(numDevices, numberOfDevicesAlong_X, numberOfDevicesAlong_Y); cout << "GPU grid structure is : " << numberOfDevicesAlong_X << " X " << numberOfDevicesAlong_Y << endl; //Set Decomposition dimension 1D or 2D: when decomposition is 0. Computation happens on a single GPU int decom_Dim = 2; //Total elements along each dim in 2D int chunk_X = dim / numberOfDevicesAlong_X; int chunk_Y = dim / numberOfDevicesAlong_Y; /* Creating a GPU topology with multiple devices*/ createTopology(numDevices, deviceArray, numberOfDevicesAlong_X, numberOfDevicesAlong_Y); //Let the total number of GPU be 2 : has to be changed later //Computation divided into (size/2) on first and size-(size/2) on second std::vector<int> domainDivision(numDevices); //Logic for total chunk per device (Domain distribution) for (int i = 0; i < numDevices; i++) { //Chunk per GPU will be same irrepective of 1D or 2D decomposition domainDivision[i] = size / numDevices; } //For use on Device std::vector<float*>d_A0(numDevices); std::vector<float*>d_A1(numDevices); std::vector<float*>d_A2(numDevices); std::vector<float*>d_A3(numDevices); std::vector<float*>d_A4(numDevices); std::vector<float*>d_Vec_In(numDevices); std::vector<float*>d_Vec_Out(numDevices); std::vector<float*>d_nhalos(numDevices); std::vector<float*>d_shalos(numDevices); std::vector<float*>d_ehalos(numDevices); std::vector<float*>d_whalos(numDevices); std::vector<float*>d_Rhs(numDevices); std::vector<float*>x_buffer(numDevices); std::vector<float*>y_buffer(numDevices); //Note: Using Pinned memory on Host for Halos -> Performance Approach 1 vector<float*>nHalo_pinned(numDevices); vector<float*>sHalo_pinned(numDevices); vector<float*>wHalo_pinned(numDevices); vector<float*>eHalo_pinned(numDevices); for (int dev = 0;dev < numDevices;dev++) { hipSetDevice(dev); hipHostMalloc((void**)&nHalo_pinned[dev], (chunk_X) * sizeof(float)); hipHostMalloc((void**)&sHalo_pinned[dev], (chunk_X) * sizeof(float)); hipHostMalloc((void**)&wHalo_pinned[dev], (chunk_Y) * sizeof(float)); hipHostMalloc((void**)&eHalo_pinned[dev], (chunk_Y) * sizeof(float)); } for (int dev = 0; dev < numDevices; dev++) { //Setting the device before allocation hipSetDevice(dev); //cudamalloc the Diagonals hipMalloc((void**)&d_A0[dev], domainDivision[dev] * sizeof(float)); hipMalloc((void**)&d_A1[dev], domainDivision[dev] * sizeof(float)); hipMalloc((void**)&d_A2[dev], domainDivision[dev] * sizeof(float)); hipMalloc((void**)&d_A3[dev], domainDivision[dev] * sizeof(float)); hipMalloc((void**)&d_A4[dev], domainDivision[dev] * sizeof(float)); //Using pinned memory as part of performance upgrade- Phase 2 of development //cudamalloc the Input Vector and Result vector hipMalloc((void**)&d_Vec_In[dev], domainDivision[dev] * sizeof(float)); hipMalloc((void**)&d_Vec_Out[dev], domainDivision[dev] * sizeof(float)); hipMalloc((void**)&d_Rhs[dev], domainDivision[dev] * sizeof(float)); //hipMalloc Halos: North and South--1D. TODO: East and West for 2D hipMalloc((void**)&d_nhalos[dev], chunk_X * sizeof(float)); hipMalloc((void**)&d_shalos[dev], chunk_X * sizeof(float)); hipMalloc((void**)&d_ehalos[dev], chunk_Y * sizeof(float)); hipMalloc((void**)&d_whalos[dev], chunk_Y * sizeof(float)); //Buffer memory used for p2p exchange hipMalloc((void**)&x_buffer[dev], chunk_X * sizeof(float)); hipMalloc((void**)&y_buffer[dev], chunk_Y * sizeof(float)); } /* The transfer of Data from Host to Device : Domain Decomposition in 2D*/ if (decom_Dim == 2) { //Create Partial Diagonal Vectors //Size per GPU will be int chunkSize = chunk_X * chunk_Y; std::vector<float> partial_a0(chunkSize); std::vector<float> partial_a1(chunkSize); std::vector<float> partial_a2(chunkSize); std::vector<float> partial_a3(chunkSize); std::vector<float> partial_a4(chunkSize); std::vector<float> partial_vec_in(chunkSize); std::vector<float> partial_vec_out(chunkSize); std::vector<float> partial_rhs(chunkSize); std::vector<float> partial_result(chunkSize); for (int dev = 0; dev < numDevices; dev++) { //Test the properties of the device assigned //cout << endl << "New Logical Device created " << deviceArray[dev].deviceID; //cout << endl << "New Logical Device (X,Y) coord (" << deviceArray[dev].devicePosition_X << "," << deviceArray[dev].devicePosition_Y << ")"; //==========Important: Logic for creation of Chunks to be allocated to GPUs========================================== //Important : Mention about the correlation between the topology and data position in the thesis int devicePosX = deviceArray[dev].devicePosition_X; int devicePosY = deviceArray[dev].devicePosition_Y; //Calculating data position based on device coords //numberOfDevicesAlong_X * Chunk_X * Chunk_Y : finds out the total data per row of GPUs allocated int dataStartPos_X = (devicePosX * numberOfDevicesAlong_X * chunk_X * chunk_Y) + (devicePosY * chunk_X); int dataEndPos_X = dataStartPos_X + chunk_X; //One complete row across all GPU is dim in order to get the next element above an element we add (currentPosition + dim ) int rowStartPos = dataStartPos_X; int rowEndPos = dataEndPos_X; int indexCounter = 0; //Initialize Halos initHalos2D(deviceArray[dev], chunk_X, chunk_Y, &vec_in[0], numberOfDevicesAlong_X, numberOfDevicesAlong_Y, rowStartPos, rowEndPos - 1, dim); for (int rowNum = 0; rowNum < chunk_Y; rowNum++) { //Get one complete row for the GPU for (int pos = rowStartPos; pos < rowEndPos; pos++) { partial_a0[indexCounter] = a0[pos]; partial_a1[indexCounter] = a1[pos]; partial_a2[indexCounter] = a2[pos]; partial_a3[indexCounter] = a3[pos]; partial_a4[indexCounter] = a4[pos]; partial_vec_in[indexCounter] = vec_in[pos]; partial_vec_out[indexCounter] = vec_out[pos]; partial_rhs[indexCounter] = rhs[pos]; partial_result[indexCounter] = result[pos]; indexCounter++; } rowStartPos += dim; rowEndPos += dim; } //==========Important: Logic for creation of Chunks to be allocated to GPUs Ends ========================================== //Setting Cuda device hipSetDevice(dev); //Copy the diagonals from host to device : calling all at once instead of putting inside the for loop hipMemcpy(d_A0[dev], &partial_a0[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_A1[dev], &partial_a1[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_A2[dev], &partial_a2[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_A3[dev], &partial_a3[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_A4[dev], &partial_a4[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice); //Copy in and out vectors and RHS hipMemcpy(d_Vec_In[dev], &partial_vec_in[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_Vec_Out[dev], &partial_vec_out[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_Rhs[dev], &partial_rhs[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice); } if (auto err = hipGetLastError()) { cout << "Data copy failed 1: " << hipGetErrorString(err) << endl; return err; } //Copy intial Halos in 2D //Initial Exchange Halos: Then do intial cudaMemcopies exchangehalos_onHost(numDevices, deviceArray, numberOfDevicesAlong_X); for (int dev = 0; dev < numDevices; dev++) { hipSetDevice(dev); //Copying Halos to the device if (deviceArray[dev].nHalo_flag == 1) { hipMemcpy(d_nhalos[dev], &deviceArray[dev].nHalo[0], chunk_X * sizeof(float), hipMemcpyHostToDevice); } if (deviceArray[dev].sHalo_flag == 1) { hipMemcpy(d_shalos[dev], &deviceArray[dev].sHalo[0], chunk_X * sizeof(float), hipMemcpyHostToDevice); } if (deviceArray[dev].eHalo_flag == 1) { hipMemcpy(d_ehalos[dev], &deviceArray[dev].eHalo[0], chunk_Y * sizeof(float), hipMemcpyHostToDevice); } if (deviceArray[dev].wHalo_flag == 1) { hipMemcpy(d_whalos[dev], &deviceArray[dev].wHalo[0], chunk_Y * sizeof(float), hipMemcpyHostToDevice); } } //Development phase 2 changes : For p2p operation communication initialize buffers for (int dev = 0; dev < numDevices; dev++) { hipSetDevice(dev); //Copying Halos to the device if (deviceArray[dev].nHalo_flag == 1) { hipMemcpy(x_buffer[dev], &deviceArray[dev].nHalo[0], chunk_X * sizeof(float), hipMemcpyHostToDevice); } if (deviceArray[dev].wHalo_flag == 1) { hipMemcpy(y_buffer[dev], &deviceArray[dev].eHalo[0], chunk_Y * sizeof(float), hipMemcpyHostToDevice); } } } //=================================Domain Decomposition Logic Ends ================================================================= int blocksize = chunk_X; int threads = chunk_Y; //cout << endl<<"blocksize" << blocksize; //cout << endl<<"thread" << threads; //Call to kernal int iterations = 0; if (numJacobiIt != 0) { iterations = numJacobiIt; } else { cout << endl << " No. of iterations is zero exiting... "; //return; } //===========================================CUDA Stream implementation for performance. Phase 2 of Development ==================================================== //===========Algorithm Improvement: Identify the neighbours so that they could be launched together and the exchange can take place. Without having to wait for computation across all devices============================ hipStream_t streams[4];//Possible to declare it dynamically ? Yes. Using Vectors. hipStream_t streamsforHaloExcahnge[4]; //Note: Default stream for a device is always syncronizing so creating seperate streams for each device for (int i = 0; i < numDevices; i++) { hipSetDevice(i); hipStreamCreate(&streams[i]); if (p2penabled) { hipStreamCreate(&streamsforHaloExcahnge[i]); } } /*Using a pagable memory first*/ //std::vector<float> partial_resultOnHost(chunk_X * chunk_Y); /*Using a pinned(page locked) memory for performance*/ vector<float*>partial_resultOnHost(numDevices); for (int dev = 0;dev < numDevices;dev++) { hipSetDevice(dev); hipHostMalloc((void**)&partial_resultOnHost[dev], (chunk_X * chunk_Y) * sizeof(float)); } //Check performance high_resolution_clock::time_point t1 = high_resolution_clock::now(); hipError_t status = hipGetLastError(); for (int i = 0; i < iterations; i++) { //cout << endl << endl << "Iteration : " << i + 1 << endl << endl << endl; //TODO: optimization using kernel instead of For Loop //Performance changes by launching kernal seperately for (int dev = 0; dev < numDevices;dev++) { status = hipSetDevice(dev); if (status != hipSuccess) { cout << "SetDevice unsuccessful exiting"; return status; } jacobi_Simple << <blocksize, threads, 0, streams[dev] >> >(d_A0[dev], d_A1[dev], d_A2[dev], d_A3[dev], d_A4[dev], d_Vec_In[dev], d_Vec_Out[dev], d_Rhs[dev], deviceArray[dev].eHalo_flag, deviceArray[dev].wHalo_flag, deviceArray[dev].nHalo_flag, deviceArray[dev].sHalo_flag, d_ehalos[dev], d_whalos[dev], d_nhalos[dev], d_shalos[dev], deviceArray[dev].deviceID, numDevices, decom_Dim); } if (auto err = hipGetLastError()) { cout << "Kernal Execution failed: " << hipGetErrorString(err) << " Iteration :" << i << endl; return err; } for (int dev = 0; dev < numDevices;dev++) { hipSetDevice(dev); //partial_resultOnHost.clear(); //cout << endl << endl << "Computation for Device " << dev; //TODO: Performance Upgrade: Currently serial has to be done hipMemcpyAsync using CUDA Streams //Copy the intermediate result from Device to Host memory //hipMemcpyAsync(&partial_resultOnHost[dev][0], d_Vec_Out[dev], domainDivision[dev] * sizeof(float), hipMemcpyDeviceToHost, streams[dev]); //Note: Performance gain on swap vectors on Host rather than using streams and hipMemcpyAsync if (i == (iterations - 1))//Copy the results just for the final iteration { hipMemcpyAsync(&partial_resultOnHost[dev][0], d_Vec_Out[dev], domainDivision[dev] * sizeof(float), hipMemcpyDeviceToHost, streams[dev]); continue; } //Check Intermeduate Results and store it //sendToPrint(&partial_resultOnHost[0], deviceArray[dev].devicePosition_X, deviceArray[dev].devicePosition_Y, numberOfDevicesAlong_X, chunk_X, chunk_Y, dim, size, result, numDevices, i, iterations); //Copy the intermediate result from the Host memory to the Device memory //hipMemcpyAsync(d_Vec_In[dev], &partial_resultOnHost[dev][0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice, streams[dev]); //Store Halo positions after iteration for exchanging if (!p2penabled) { if (numDevices > 1) { if (deviceArray[dev].nHalo_flag == 1) { hipMemcpyAsync(nHalo_pinned[dev], d_nhalos[dev], chunk_X * sizeof(float), hipMemcpyDeviceToHost, streams[dev]); if (auto err = hipGetLastError()) { cout << "d_nhalos copy failed D2H: " << hipGetErrorString(err) << endl; return err; } } if (deviceArray[dev].sHalo_flag == 1) { hipMemcpyAsync(sHalo_pinned[dev], d_shalos[dev], chunk_X * sizeof(float), hipMemcpyDeviceToHost, streams[dev]); if (auto err = hipGetLastError()) { cout << "d_shalos copy failed D2H: " << hipGetErrorString(err) << endl; return err; } } if (deviceArray[dev].eHalo_flag == 1) { hipMemcpyAsync(eHalo_pinned[dev], d_ehalos[dev], chunk_Y * sizeof(float), hipMemcpyDeviceToHost, streams[dev]); if (auto err = hipGetLastError()) { cout << "d_ehalos copy failed D2H: " << hipGetErrorString(err) << endl; return err; } } if (deviceArray[dev].wHalo_flag == 1) { hipMemcpyAsync(wHalo_pinned[dev], d_whalos[dev], chunk_Y * sizeof(float), hipMemcpyDeviceToHost, streams[dev]); if (auto err = hipGetLastError()) { cout << "d_whalos copy failed D2H " << hipGetErrorString(err) << endl; return err; } } } } } if (auto err = hipGetLastError()) { cout << "Data copy failed 2: " << hipGetErrorString(err) << endl; return err; } //Exchange Halos after each iteration except the last iteration if ((i < (iterations-1))) { //Synchronize streams from each device for (int dev = 0; dev < numDevices; dev++) { hipSetDevice(dev); hipStreamSynchronize(streams[dev]); if (auto err = hipGetLastError()) { cout << "Stream " << dev << " synchronize error for iteration : " << i << ". ERROR IS: " << hipGetErrorString(err) << endl; return err; } } if ((!p2penabled)) { bool exchangeComplete = false; //Note: Using Pinned memory on Host for Halos -> Performance Approach 1 //exchangehalos_onHost(numDevices, deviceArray, numberOfDevicesAlong_X); exchangeComplete = exchangehalos_onHostPinned(numDevices, deviceArray, numberOfDevicesAlong_X, nHalo_pinned, sHalo_pinned, eHalo_pinned, wHalo_pinned); if (exchangeComplete) { for (int dev = 0; dev < numDevices; dev++) { //Swap input output vectors for all devices swap(d_Vec_In[dev], d_Vec_Out[dev]); hipSetDevice(dev); //Copying Halos to the device if (deviceArray[dev].nHalo_flag == 1) { hipMemcpyAsync(d_nhalos[dev], nHalo_pinned[dev], chunk_X * sizeof(float), hipMemcpyHostToDevice, streams[dev]); } if (auto err = hipGetLastError()) { cout << "d_nhalos copy failed H2D: " << hipGetErrorString(err) << endl; return err; } if (deviceArray[dev].sHalo_flag == 1) { hipMemcpyAsync(d_shalos[dev], sHalo_pinned[dev], chunk_X * sizeof(float), hipMemcpyHostToDevice, streams[dev]); } if (auto err = hipGetLastError()) { cout << "d_shalos copy failed H2D: " << hipGetErrorString(err) << endl; return err; } if (deviceArray[dev].eHalo_flag == 1) { hipMemcpyAsync(d_ehalos[dev], eHalo_pinned[dev], chunk_Y * sizeof(float), hipMemcpyHostToDevice, streams[dev]); } if (auto err = hipGetLastError()) { cout << "d_ehalos copy failed H2D: " << hipGetErrorString(err) << endl; return err; } if (deviceArray[dev].wHalo_flag == 1) { hipMemcpyAsync(d_whalos[dev], wHalo_pinned[dev], chunk_Y * sizeof(float), hipMemcpyHostToDevice, streams[dev]); } if (auto err = hipGetLastError()) { cout << "d_whalos copy failed H2D: " << hipGetErrorString(err) << endl; return err; } } } } else { for (int dev = 0;dev < numDevices;dev++) { //Swap input output vectors for all devices swap(d_Vec_In[dev], d_Vec_Out[dev]); hipSetDevice(dev); int getDevCoord_X = deviceArray[dev].devicePosition_X; int getDevCoord_Y = deviceArray[dev].devicePosition_Y; //Check if device is having a north Halo buffer if (deviceArray[dev].nHalo_flag == 1) { int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X + 1, getDevCoord_Y, numberOfDevicesAlong_X); //Exchange Halos //Send to the device hipMemcpyPeerAsync(x_buffer[dev], dev, d_shalos[devIDtoNorth], devIDtoNorth, chunk_X * sizeof(float), streamsforHaloExcahnge[dev]); //Recieve from the device hipMemcpyPeerAsync(d_shalos[devIDtoNorth], devIDtoNorth, d_nhalos[dev], dev, chunk_X * sizeof(float), streamsforHaloExcahnge[dev]); hipMemcpyAsync(d_nhalos[dev], x_buffer[dev], chunk_X * sizeof(float), hipMemcpyDeviceToDevice, streamsforHaloExcahnge[dev]); } //Check if device is having a east Halo buffer if (deviceArray[dev].eHalo_flag == 1) { int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y + 1, numberOfDevicesAlong_Y); //Exchange Halos //Send to the device hipMemcpyPeerAsync(y_buffer[dev], dev, d_whalos[devIDtoEast], devIDtoEast, chunk_Y * sizeof(float), streamsforHaloExcahnge[dev]); //Recieve from the device hipMemcpyPeerAsync(d_whalos[devIDtoEast], devIDtoEast, d_ehalos[dev], dev, chunk_Y * sizeof(float), streamsforHaloExcahnge[dev]); hipMemcpyAsync(d_ehalos[dev], y_buffer[dev], chunk_Y * sizeof(float), hipMemcpyDeviceToDevice, streamsforHaloExcahnge[dev]); } } } } } if (auto err = hipGetLastError()) { cout << "Data copy failed 3: " << hipGetErrorString(err) << endl; return err; } high_resolution_clock::time_point t2 = high_resolution_clock::now(); auto duration = duration_cast<microseconds>(t2 - t1).count(); cout << endl << "Iterations successful. Time taken in microseconds :" << duration << endl; //Sync and Destroy streams for (int i = 0; i < numDevices; ++i) { //Synchro the streams hipSetDevice(i); hipStreamSynchronize(streams[i]); hipStreamDestroy(streams[i]); if (p2penabled) { hipStreamSynchronize(streamsforHaloExcahnge[i]); hipStreamDestroy(streamsforHaloExcahnge[i]); } } //Results copied to disk for (int dev = 0; dev < numDevices;dev++) { sendToPrint(&partial_resultOnHost[dev][0], deviceArray[dev].devicePosition_X, deviceArray[dev].devicePosition_Y, numberOfDevicesAlong_X, chunk_X, chunk_Y, dim, size, result, numDevices, iterations - 1, iterations); } //==========================================Performance using CUDA stream ends=========================================================================== //Done in phase 2 of development: Disble P2P across devices if (p2penabled) { disableP2P(numDevices); } //Free memory on devices for (int dev = 0; dev < numDevices; dev++) { hipSetDevice(dev); hipFree(d_A0[dev]); hipFree(d_A1[dev]); hipFree(d_A2[dev]); hipFree(d_A3[dev]); hipFree(d_A4[dev]); hipFree(d_Vec_In[dev]); hipFree(d_Vec_Out[dev]); hipFree(d_nhalos[dev]); hipFree(d_shalos[dev]); hipFree(d_ehalos[dev]); hipFree(d_whalos[dev]); hipFree(d_Rhs[dev]); hipHostFree(partial_resultOnHost[dev]); hipHostFree(nHalo_pinned[dev]); hipHostFree(sHalo_pinned[dev]); hipHostFree(wHalo_pinned[dev]); hipHostFree(eHalo_pinned[dev]); hipDeviceReset(); } cout << endl << "Device Memory free successful." << endl; //Take care of dynamic mem location //delete[] domainDivision; return hipSuccess; } int performJacobi_MultiGPU2D_Decom(unsigned int dim, unsigned int numJacobiIt, float* A0, float* A1, float* A2, float* A3, float* A4, float* rhs, float* x_in) { hipError_t cudaStatus = performMultiGPUJacobi(dim, numJacobiIt, &A0[0], &A1[0], &A2[0], &A3[0], &A4[0], &rhs[0], &x_in[0]); if (cudaStatus != hipSuccess) { cout << "Computation failed: " << endl; return 1; } if (cudaStatus != hipSuccess) { cout << "Cuda Device Reset failed: " << endl; return 1; } return 0; }
c258f058e917497c2f0493b9bf70047bac7b649d.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "testMultiGPU_Jacobi2D_Decom.cuh" #include <iostream> #include <chrono> #include <memory> #include <vector> #include <fstream> using namespace std; using namespace std::chrono; #define IMUL(a,b) __mul24(a,b) //cudaError_t performMultiGPUJacobi(); //Support for below c++14 on *nix template<typename T, typename ...Args> std::unique_ptr<T> make_unique(Args&& ...args) { return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); } struct create_Device { int deviceID; //In a GPU topology set the GPU position int devicePosition_X; int devicePosition_Y; int devicePosition_Z; vector<float> eHalo; vector<float> wHalo; vector<float> nHalo; vector<float> sHalo; //Flags check the halos needed by the device int eHalo_flag = 0; int wHalo_flag = 0; int nHalo_flag = 0; int sHalo_flag = 0; }; //Simple Jacobi iteration __global__ void jacobi_Simple(const float *A0, const float *A1, const float *A2, const float *A3, const float *A4, float *x_in, float *x_out, const float *rhs, const int ehalo_flag, const int whalo_flag, const int nhalo_flag, const int shalo_flag, float *ehalo, float *whalo, float *nhalo, float *shalo, const int deviceID, const int numDevices, const int domain_Decom) { int index = threadIdx.x + blockDim.x * blockIdx.x; float result = rhs[index]; int dim_x = blockDim.x;// dim across x int dim_y = gridDim.x; int x_pos = blockIdx.x; int y_pos = threadIdx.x; //result = nhalo[y_pos]; //x_out[index] = result; //Get the boundaries int leftBoundaryElem = x_pos * (dim_x); int rightBoundaryElem = (x_pos * dim_x) + (dim_x - 1); int topBoundaryElem = y_pos + ((dim_y - 1) * (dim_x)); int bottomBoundaryElem = y_pos; //Halo computation for 1D Decompostion: For the First and Last GPU Halo computation on both the sides(nhalo and shalo wont be needed) if (domain_Decom == 1) { if (numDevices > 1) { //First GPU if (deviceID == 0) { //We need to use nhalos //Carry out computations for boundary elements if (index != leftBoundaryElem) //Left result -= A1[index] * x_in[index - 1]; if (index != rightBoundaryElem) //Right result -= A3[index] * x_in[index + 1]; if (index != bottomBoundaryElem) //Bottom result -= A0[index] * x_in[index - dim_x]; if (index != topBoundaryElem) //Top result -= A4[index] * x_in[index + dim_x]; //The top boundary needs element from nhalo if (index == topBoundaryElem) //nHalos result -= A4[index] * nhalo[y_pos]; result /= A2[index]; x_out[index] = result; //Update Halo at the end of computation if (index == topBoundaryElem) //nHalos updated nhalo[y_pos] = result; return; } //Last GPU else if (deviceID == (numDevices - 1)) { //We need to use shalos //Carry out computations for boundary elements if (index != leftBoundaryElem) //Left result -= A1[index] * x_in[index - 1]; if (index != rightBoundaryElem) //Right result -= A3[index] * x_in[index + 1]; if (index != bottomBoundaryElem) //Bottom result -= A0[index] * x_in[index - dim_x]; //The Bottom boundary needs elements from shalo if (index == bottomBoundaryElem) //nHalos result -= A0[index] * shalo[y_pos]; if (index != topBoundaryElem) //Top result -= A4[index] * x_in[index + dim_x]; result /= A2[index]; x_out[index] = result; //Update Halo at the end of computation if (index == bottomBoundaryElem) //sHalos updated shalo[y_pos] = result; return; } //For all the middle GPUs else { //We need to use both shalos and nhalos //Carry out computations for boundary elements if (index != leftBoundaryElem) //Left result -= A1[index] * x_in[index - 1]; if (index != rightBoundaryElem) //Right result -= A3[index] * x_in[index + 1]; if (index != bottomBoundaryElem) //Bottom result -= A0[index] * x_in[index - dim_x]; //The Bottom boundary needs elements from shalo if (index == bottomBoundaryElem) //nHalos result -= A0[index] * shalo[y_pos]; if (index != topBoundaryElem) //Top result -= A4[index] * x_in[index + dim_x]; //The top boundary needs element from nhalo if (index == topBoundaryElem) //nHalos result -= A4[index] * nhalo[y_pos]; result /= A2[index]; x_out[index] = result; //Update Halo at the end of computation if (index == bottomBoundaryElem) //sHalos updated shalo[y_pos] = result; //Update Halo at the end of computation if (index == topBoundaryElem) //nHalos updated nhalo[y_pos] = result; return; } } } else if (domain_Decom == 2) { //======Left Bounday Elem if (index != leftBoundaryElem) //Left result -= A1[index] * x_in[index - 1]; //Computation using the Halos if (index == leftBoundaryElem) { if (whalo_flag == 1) { result -= A1[index] * whalo[x_pos]; } } //======Right Bounday Elem if (index != rightBoundaryElem) //Right result -= A3[index] * x_in[index + 1]; if (index == rightBoundaryElem) { if (ehalo_flag == 1) { result -= A3[index] * ehalo[x_pos]; } } //======Bottom Bounday Elem if (index != bottomBoundaryElem) //Bottom result -= A0[index] * x_in[index - dim_x]; if (index == bottomBoundaryElem) { if (shalo_flag == 1) { result -= A0[index] * shalo[y_pos]; } } //======Top Bounday Elem if (index != topBoundaryElem) //Top result -= A4[index] * x_in[index + dim_x]; if (index == topBoundaryElem) { if (nhalo_flag == 1) { result -= A4[index] * nhalo[y_pos]; } } result /= A2[index]; x_out[index] = result; //Updating Halos at the End of the computation if (index == topBoundaryElem) { if (nhalo_flag == 1) { nhalo[y_pos] = result; } } if (index == bottomBoundaryElem) { if (shalo_flag == 1) { shalo[y_pos] = result; } } if (index == leftBoundaryElem) { if (whalo_flag == 1) { whalo[x_pos] = result; } } if (index == rightBoundaryElem) { if (ehalo_flag == 1) { ehalo[x_pos] = result; } } return; } //For computations on a Machine with a single GPU else { {//For some reason order of computation (left,right,top and bottom) gives a different result //Carry out computations for boundary elements if (index != leftBoundaryElem) //Left result -= A1[index] * x_in[index - 1]; if (index != rightBoundaryElem) //Right result -= A3[index] * x_in[index + 1]; if (index != bottomBoundaryElem) //Bottom result -= A0[index] * x_in[index - dim_x]; if (index != topBoundaryElem) //Top result -= A4[index] * x_in[index + dim_x]; result /= A2[index]; x_out[index] = result; return; } } } //========================MultiGPU utility functions============================================================================ void checkP2Paccess(int numGPUs) { for (int i = 0; i<numGPUs; i++) { cudaSetDevice(i); for (int j = 0; j<numGPUs; j++) { int access; if (i != j) { cudaDeviceCanAccessPeer(&access, i, j); if (auto err = cudaGetLastError()) { cout << "P2P Operations failed : " << cudaGetErrorString(err) << endl; return; } } } } cout << "\n***NOTE: In case a device doesn't have P2P access to other one, it falls back to normal memcopy procedure.\nSo you can see lesser Bandwidth (GB/s) in those cases.\n\n"; } bool enableP2P(int numGPUs) { for (int i = 0; i<numGPUs; i++) { cudaSetDevice(i); for (int j = 0; j<numGPUs; j++) { int access; cudaDeviceCanAccessPeer(&access, i, j); if (auto err = cudaGetLastError()) { cout << "P2P Operations failed while enabling: " << cudaGetErrorString(err) << endl; return false; } if (access) { cudaDeviceEnablePeerAccess(j, 0); if (auto err = cudaGetLastError()) { cout << "P2P Operations failed while enabling: " << cudaGetErrorString(err) << endl; return false; } } } } return true; } void disableP2P(int numGPUs) { for (int i = 0; i<numGPUs; i++) { cudaSetDevice(i); for (int j = 0; j<numGPUs; j++) { int access; cudaDeviceCanAccessPeer(&access, i, j); if (auto err = cudaGetLastError()) { cout << "P2P Operations failed while disabling : " << cudaGetErrorString(err) << endl; return; } if (access) { cudaDeviceDisablePeerAccess(j); if (auto err = cudaGetLastError()) { cout << "P2P Operations failed while disabling: " << cudaGetErrorString(err) << endl; return; } } } } } //=============================================================================================================================== //====================================Creating Topology with the number of Devices available==================================== void generateGPUGRID(int numDevices, int &numberOfDevicesAlong_X, int &numberOfDevicesAlong_Y) { //Finding GPU topology along x and y //Assumuing total number of devices is a perfect square(To be changed later) numberOfDevicesAlong_X = (int)sqrt(numDevices); numberOfDevicesAlong_Y = (int)numberOfDevicesAlong_X; } /* Creates a topology for a number of devices in a system for ex. The devices are aware of left, right, top and bottom neigbours in 2D 1. It also decides the chunk per devices by determining x-dimension and y-dimensions for per chunk of data per device. 2. It also initializes halos for each devices which can be exchanged with the neighbours */ void createTopology(int numDevices, vector<create_Device> &deviceArray, int numberOfDevicesAlong_X, int numberOfDevicesAlong_Y) { deviceArray.resize(numDevices); unsigned int deviceCount = 0; for (int gridCount_X = 0; gridCount_X < numberOfDevicesAlong_X; gridCount_X++) { for (int gridCount_Y = 0; gridCount_Y < numberOfDevicesAlong_Y; gridCount_Y++) { deviceArray[deviceCount].deviceID = deviceCount; deviceArray[deviceCount].devicePosition_X = gridCount_X; deviceArray[deviceCount].devicePosition_Y = gridCount_Y; //devicePosition_Z to be changed later deviceArray[deviceCount].devicePosition_Z = 1; deviceCount++; } } } //============================================================================================================================== //Init Halos: In 1D decomposition only North and South Halos are used. In 2D decomposition North, South, East and West Halo need to be initialized and computed //TODO:Create a Halo Exchange Mechanism for 2D Multi GPU topology void initHalos2D(create_Device &device, int chunk_X, int chunk_Y, float *vec_in, int maxdevicesAlong_X, int maxDevicesAlong_Y, int rowStartPos, int rowEndPos, int dim) { /*cout << endl << "Inside Halo Computation 2D. printing Details"; cout << endl << "Device ID " << device.deviceID; cout << endl << "Device position X " << device.devicePosition_X; cout << endl << "Device position Y " << device.devicePosition_Y; cout << endl << "Row Start " << rowStartPos; cout << endl << "Row End " << rowEndPos;*/ //Assigning counter for each individual Halos. To prevent update of the same counter //int rowStartPosEast = rowStartPos; int rowStartPosWest = rowStartPos; int rowStartPosNorth = rowStartPos; int rowStartPosSouth = rowStartPos; int rowEndPosEast = rowEndPos; //int rowEndPosWest = rowEndPos; //int rowEndPosNorth = rowEndPos; //int rowEndPosSouth = rowEndPos; //Checks provided for Boundary devices in GPU topology if ((device.devicePosition_Y - 1) >= 0) { //cout << "West Halo needed "; device.wHalo_flag = 1; device.wHalo.resize(chunk_Y); for (int rowNum = 0; rowNum < chunk_Y; rowNum++) { device.wHalo[rowNum] = vec_in[rowStartPosWest]; //cout << rowStartPosWest << " "; rowStartPosWest += dim; } } if ((device.devicePosition_Y + 1) < maxdevicesAlong_X) { //cout << "East Halo needed "; device.eHalo_flag = 1; device.eHalo.resize(chunk_Y); for (int rowNum = 0; rowNum < chunk_Y; rowNum++) { device.eHalo[rowNum] = vec_in[rowEndPosEast]; //cout << rowEndPosEast << " "; rowEndPosEast += dim; } } if ((device.devicePosition_X - 1) >= 0) { //cout << "South Halo needed "; device.sHalo_flag = 1; device.sHalo.resize(chunk_X); for (int rowNum = 0; rowNum < chunk_X; rowNum++) { device.sHalo[rowNum] = vec_in[rowStartPosSouth]; //cout << rowStartPosSouth << " "; rowStartPosSouth++; } } if ((device.devicePosition_X + 1) < maxDevicesAlong_Y) { //cout << "North Halo needed "; device.nHalo_flag = 1; device.nHalo.resize(chunk_X); rowStartPosNorth = rowStartPosNorth + (dim * (chunk_Y - 1)); for (int rowNum = 0; rowNum < chunk_X; rowNum++) { device.nHalo[rowNum] = vec_in[rowStartPosNorth]; //cout << rowStartPosNorth << " "; rowStartPosNorth++; } } } //======================================Exchange Halos: on Host============================================== int getDeviceIDfromCoord(int devCoord_x, int devCoord_y, int numberofDevicesAlong_X) { int devID = (devCoord_x * numberofDevicesAlong_X) + devCoord_y; return devID; } void exchangehalos_onHost(int numDevices, vector<create_Device> &deviceArray, int numberofDevicesAlong_X) { //Halos exist in pairs so: //Important: A device exchanges North-to-South Pairs and East-to-West Pairs only. Not South-to-North pairs and West-to-East pairs //That way the number of exchanges are kept to minimum for (int dev = 0;dev < numDevices;dev++) { int getDevCoord_X = deviceArray[dev].devicePosition_X; int getDevCoord_Y = deviceArray[dev].devicePosition_Y; //Check if device is having a north Halo buffer if (deviceArray[dev].nHalo_flag == 1) { int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X + 1, getDevCoord_Y, numberofDevicesAlong_X); //Exchange Halos (deviceArray[dev].nHalo).swap(deviceArray[devIDtoNorth].sHalo); } //Check if device is having a east Halo buffer if (deviceArray[dev].eHalo_flag == 1) { int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y + 1, numberofDevicesAlong_X); //Exchange Halos (deviceArray[dev].eHalo).swap(deviceArray[devIDtoEast].wHalo); } } } bool exchangehalos_onHostPinned(int numDevices, vector<create_Device> &deviceArray, int numberofDevicesAlong_X, vector<float*> &nHalosPinned, vector<float*> &sHalosPinned, vector<float*> &eHalosPinned, vector<float*> &wHalosPinned) { //Halos exist in pairs so: //Important: A device exchanges North-to-South Pairs and East-to-West Pairs only. Not South-to-North pairs and West-to-East pairs //That way the number of exchanges are kept to minimum for (int dev = 0;dev < numDevices;dev++) { int getDevCoord_X = deviceArray[dev].devicePosition_X; int getDevCoord_Y = deviceArray[dev].devicePosition_Y; //Check if device is having a north Halo buffer if (deviceArray[dev].nHalo_flag == 1) { int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X + 1, getDevCoord_Y, numberofDevicesAlong_X); //Exchange Halos //(deviceArray[dev].nHalo).swap(deviceArray[devIDtoNorth].sHalo); swap(nHalosPinned[dev], sHalosPinned[devIDtoNorth]); } //Check if device is having a east Halo buffer if (deviceArray[dev].eHalo_flag == 1) { int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y + 1, numberofDevicesAlong_X); //Exchange Halos //(deviceArray[dev].eHalo).swap(deviceArray[devIDtoEast].wHalo); swap(eHalosPinned[dev], wHalosPinned[devIDtoEast]); } } return true; } //===========================Exchange Halos: on Host Ends===================================================== //Init matrix Diagonals A0, A1, A2, A3, A4 void copyValues(float *A0, float *A1, float *A2, float *A3, float *A4, float *rhs, float *vec_in, float *vec_out, int dim, float *val_A0, float *val_A1, float *val_A2, float *val_A3, float *val_A4, float *val_rhs, float *val_x_in) { unsigned int size = dim * dim; for (unsigned int i = 0; i < size; i++) { A0[i] = val_A0[i]; A1[i] = val_A1[i]; A2[i] = val_A2[i]; A3[i] = val_A3[i]; A4[i] = val_A4[i]; rhs[i] = val_rhs[i]; vec_in[i] = val_x_in[i]; vec_out[i] = 0.0f; } } void getAllDeviceProperties() { int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); cout << " Device Number: " << i << endl; cout << " Device name: " << prop.name << endl; cout << " Memory Clock Rate (KHz): " << prop.memoryClockRate << endl; cout << " Memory Bus Width (bits): " << prop.memoryBusWidth << endl;; cout << " Peak Memory Bandwidth (GB/s): " << 2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6 << endl << endl << endl; } } /* Prints an output file for checking results */ void sendToPrint(float *partial_result, int devicePosition_X, int devicePosition_Y, int numberOfDevicesAlong_X, int chunk_X, int chunk_Y, int dim, int totalSize, vector<float> &result, int numDevices, int currentIteration, int numberOfTotalIterations) { int devicePosX = devicePosition_X; int devicePosY = devicePosition_Y; //Calculating data position based on device coords //numberOfDevicesAlong_X * Chunk_X * Chunk_Y : finds out the total data per row of GPUs allocated int dataStartPos_X = (devicePosX * numberOfDevicesAlong_X * chunk_X * chunk_Y) + (devicePosY * chunk_X); int dataEndPos_X = dataStartPos_X + chunk_X; //One complete row across all GPU is dim in order to get the next element above an element we add (currentPosition + dim ) int rowStartPos = dataStartPos_X; int rowEndPos = dataEndPos_X; int indexCounter = 0; //cout << endl; for (int rowNum = 0; rowNum < chunk_Y; rowNum++) { //Get one complete row for the GPU for (int pos = rowStartPos; pos < rowEndPos; pos++) { result[pos] = partial_result[indexCounter]; indexCounter++; } //cout << endl; rowStartPos += dim; rowEndPos += dim; } //Printing when the last device computation is done: Remove the check to check computation for each device int deviceID = getDeviceIDfromCoord(devicePosition_X, devicePosition_Y, numberOfDevicesAlong_X); if ((deviceID == (numDevices - 1)) && (currentIteration == (numberOfTotalIterations - 1))) { ofstream myfile; myfile.open("data2.txt"); //Printing the values here for (int i = totalSize;i > 0; i--) { if (i%dim == 0) { myfile << endl; } myfile << result[i - 1] << " "; } myfile.close(); } } cudaError_t performMultiGPUJacobi(unsigned int val_dim, unsigned int numJacobiIt, float* val_A0, float* val_A1, float* val_A2, float* val_A3, float* val_A4, float* val_rhs, float* val_x_in) { //Fixed value changed later int dim = 8; if (val_dim != 0) { dim = val_dim; } //TODO: write a 2D domain decomposition method for more than 2 GPUs int size = dim * dim; //auto result = make_unique<float[]>(size); //Create Diagonal Vectors std::vector<float> a0(size); std::vector<float> a1(size); std::vector<float> a2(size); std::vector<float> a3(size); std::vector<float> a4(size); std::vector<float> vec_in(size); std::vector<float> vec_out(size); std::vector<float> rhs(size); std::vector<float> result(size); //Get the total number of devices int numDevices; cudaGetDeviceCount(&numDevices); cout << endl << "Total number of Devices in the System are : " << numDevices << endl; getAllDeviceProperties(); //Enable Peer-to-Peer access across all GPUs : Done on phase 2 of development bool p2penabled = false; p2penabled = enableP2P(numDevices); //Configuring the number of GPU's manually //numDevices=2; copyValues(&a0[0], &a1[0], &a2[0], &a3[0], &a4[0], &rhs[0], &vec_in[0], &vec_out[0], dim, &val_A0[0], &val_A1[0], &val_A2[0], &val_A3[0], &val_A4[0], &val_rhs[0], &val_x_in[0]); vector<create_Device> deviceArray; /* Distributed Compuation using Halos: Algorithm 1. Init Halos. 1.a) In 1D decomposition nhalo and shalo intialized from vector x_in 1.b) In 2D decompsition nhalo,shalo, ehalo and whalo initialozed from vector x_in 2. Pass the halos to Jacobi_kernal. 3. Store the result computed at the boundary into the halo boundary positions. 4. Swap nhalo and shalo pairs in 1D decompostion. Swap (nhalo,shalo) and (ehalo,whalo) in 2D. */ //=================================Domain Decomposition Logic Starts================================================================= /*Generating a GPU Grid with multiple GPUs and creating a Topology*/ int numberOfDevicesAlong_X = 1; int numberOfDevicesAlong_Y = 1; generateGPUGRID(numDevices, numberOfDevicesAlong_X, numberOfDevicesAlong_Y); cout << "GPU grid structure is : " << numberOfDevicesAlong_X << " X " << numberOfDevicesAlong_Y << endl; //Set Decomposition dimension 1D or 2D: when decomposition is 0. Computation happens on a single GPU int decom_Dim = 2; //Total elements along each dim in 2D int chunk_X = dim / numberOfDevicesAlong_X; int chunk_Y = dim / numberOfDevicesAlong_Y; /* Creating a GPU topology with multiple devices*/ createTopology(numDevices, deviceArray, numberOfDevicesAlong_X, numberOfDevicesAlong_Y); //Let the total number of GPU be 2 : has to be changed later //Computation divided into (size/2) on first and size-(size/2) on second std::vector<int> domainDivision(numDevices); //Logic for total chunk per device (Domain distribution) for (int i = 0; i < numDevices; i++) { //Chunk per GPU will be same irrepective of 1D or 2D decomposition domainDivision[i] = size / numDevices; } //For use on Device std::vector<float*>d_A0(numDevices); std::vector<float*>d_A1(numDevices); std::vector<float*>d_A2(numDevices); std::vector<float*>d_A3(numDevices); std::vector<float*>d_A4(numDevices); std::vector<float*>d_Vec_In(numDevices); std::vector<float*>d_Vec_Out(numDevices); std::vector<float*>d_nhalos(numDevices); std::vector<float*>d_shalos(numDevices); std::vector<float*>d_ehalos(numDevices); std::vector<float*>d_whalos(numDevices); std::vector<float*>d_Rhs(numDevices); std::vector<float*>x_buffer(numDevices); std::vector<float*>y_buffer(numDevices); //Note: Using Pinned memory on Host for Halos -> Performance Approach 1 vector<float*>nHalo_pinned(numDevices); vector<float*>sHalo_pinned(numDevices); vector<float*>wHalo_pinned(numDevices); vector<float*>eHalo_pinned(numDevices); for (int dev = 0;dev < numDevices;dev++) { cudaSetDevice(dev); cudaMallocHost((void**)&nHalo_pinned[dev], (chunk_X) * sizeof(float)); cudaMallocHost((void**)&sHalo_pinned[dev], (chunk_X) * sizeof(float)); cudaMallocHost((void**)&wHalo_pinned[dev], (chunk_Y) * sizeof(float)); cudaMallocHost((void**)&eHalo_pinned[dev], (chunk_Y) * sizeof(float)); } for (int dev = 0; dev < numDevices; dev++) { //Setting the device before allocation cudaSetDevice(dev); //cudamalloc the Diagonals cudaMalloc((void**)&d_A0[dev], domainDivision[dev] * sizeof(float)); cudaMalloc((void**)&d_A1[dev], domainDivision[dev] * sizeof(float)); cudaMalloc((void**)&d_A2[dev], domainDivision[dev] * sizeof(float)); cudaMalloc((void**)&d_A3[dev], domainDivision[dev] * sizeof(float)); cudaMalloc((void**)&d_A4[dev], domainDivision[dev] * sizeof(float)); //Using pinned memory as part of performance upgrade- Phase 2 of development //cudamalloc the Input Vector and Result vector cudaMalloc((void**)&d_Vec_In[dev], domainDivision[dev] * sizeof(float)); cudaMalloc((void**)&d_Vec_Out[dev], domainDivision[dev] * sizeof(float)); cudaMalloc((void**)&d_Rhs[dev], domainDivision[dev] * sizeof(float)); //cudaMalloc Halos: North and South--1D. TODO: East and West for 2D cudaMalloc((void**)&d_nhalos[dev], chunk_X * sizeof(float)); cudaMalloc((void**)&d_shalos[dev], chunk_X * sizeof(float)); cudaMalloc((void**)&d_ehalos[dev], chunk_Y * sizeof(float)); cudaMalloc((void**)&d_whalos[dev], chunk_Y * sizeof(float)); //Buffer memory used for p2p exchange cudaMalloc((void**)&x_buffer[dev], chunk_X * sizeof(float)); cudaMalloc((void**)&y_buffer[dev], chunk_Y * sizeof(float)); } /* The transfer of Data from Host to Device : Domain Decomposition in 2D*/ if (decom_Dim == 2) { //Create Partial Diagonal Vectors //Size per GPU will be int chunkSize = chunk_X * chunk_Y; std::vector<float> partial_a0(chunkSize); std::vector<float> partial_a1(chunkSize); std::vector<float> partial_a2(chunkSize); std::vector<float> partial_a3(chunkSize); std::vector<float> partial_a4(chunkSize); std::vector<float> partial_vec_in(chunkSize); std::vector<float> partial_vec_out(chunkSize); std::vector<float> partial_rhs(chunkSize); std::vector<float> partial_result(chunkSize); for (int dev = 0; dev < numDevices; dev++) { //Test the properties of the device assigned //cout << endl << "New Logical Device created " << deviceArray[dev].deviceID; //cout << endl << "New Logical Device (X,Y) coord (" << deviceArray[dev].devicePosition_X << "," << deviceArray[dev].devicePosition_Y << ")"; //==========Important: Logic for creation of Chunks to be allocated to GPUs========================================== //Important : Mention about the correlation between the topology and data position in the thesis int devicePosX = deviceArray[dev].devicePosition_X; int devicePosY = deviceArray[dev].devicePosition_Y; //Calculating data position based on device coords //numberOfDevicesAlong_X * Chunk_X * Chunk_Y : finds out the total data per row of GPUs allocated int dataStartPos_X = (devicePosX * numberOfDevicesAlong_X * chunk_X * chunk_Y) + (devicePosY * chunk_X); int dataEndPos_X = dataStartPos_X + chunk_X; //One complete row across all GPU is dim in order to get the next element above an element we add (currentPosition + dim ) int rowStartPos = dataStartPos_X; int rowEndPos = dataEndPos_X; int indexCounter = 0; //Initialize Halos initHalos2D(deviceArray[dev], chunk_X, chunk_Y, &vec_in[0], numberOfDevicesAlong_X, numberOfDevicesAlong_Y, rowStartPos, rowEndPos - 1, dim); for (int rowNum = 0; rowNum < chunk_Y; rowNum++) { //Get one complete row for the GPU for (int pos = rowStartPos; pos < rowEndPos; pos++) { partial_a0[indexCounter] = a0[pos]; partial_a1[indexCounter] = a1[pos]; partial_a2[indexCounter] = a2[pos]; partial_a3[indexCounter] = a3[pos]; partial_a4[indexCounter] = a4[pos]; partial_vec_in[indexCounter] = vec_in[pos]; partial_vec_out[indexCounter] = vec_out[pos]; partial_rhs[indexCounter] = rhs[pos]; partial_result[indexCounter] = result[pos]; indexCounter++; } rowStartPos += dim; rowEndPos += dim; } //==========Important: Logic for creation of Chunks to be allocated to GPUs Ends ========================================== //Setting Cuda device cudaSetDevice(dev); //Copy the diagonals from host to device : calling all at once instead of putting inside the for loop cudaMemcpy(d_A0[dev], &partial_a0[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_A1[dev], &partial_a1[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_A2[dev], &partial_a2[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_A3[dev], &partial_a3[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_A4[dev], &partial_a4[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice); //Copy in and out vectors and RHS cudaMemcpy(d_Vec_In[dev], &partial_vec_in[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_Vec_Out[dev], &partial_vec_out[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_Rhs[dev], &partial_rhs[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice); } if (auto err = cudaGetLastError()) { cout << "Data copy failed 1: " << cudaGetErrorString(err) << endl; return err; } //Copy intial Halos in 2D //Initial Exchange Halos: Then do intial cudaMemcopies exchangehalos_onHost(numDevices, deviceArray, numberOfDevicesAlong_X); for (int dev = 0; dev < numDevices; dev++) { cudaSetDevice(dev); //Copying Halos to the device if (deviceArray[dev].nHalo_flag == 1) { cudaMemcpy(d_nhalos[dev], &deviceArray[dev].nHalo[0], chunk_X * sizeof(float), cudaMemcpyHostToDevice); } if (deviceArray[dev].sHalo_flag == 1) { cudaMemcpy(d_shalos[dev], &deviceArray[dev].sHalo[0], chunk_X * sizeof(float), cudaMemcpyHostToDevice); } if (deviceArray[dev].eHalo_flag == 1) { cudaMemcpy(d_ehalos[dev], &deviceArray[dev].eHalo[0], chunk_Y * sizeof(float), cudaMemcpyHostToDevice); } if (deviceArray[dev].wHalo_flag == 1) { cudaMemcpy(d_whalos[dev], &deviceArray[dev].wHalo[0], chunk_Y * sizeof(float), cudaMemcpyHostToDevice); } } //Development phase 2 changes : For p2p operation communication initialize buffers for (int dev = 0; dev < numDevices; dev++) { cudaSetDevice(dev); //Copying Halos to the device if (deviceArray[dev].nHalo_flag == 1) { cudaMemcpy(x_buffer[dev], &deviceArray[dev].nHalo[0], chunk_X * sizeof(float), cudaMemcpyHostToDevice); } if (deviceArray[dev].wHalo_flag == 1) { cudaMemcpy(y_buffer[dev], &deviceArray[dev].eHalo[0], chunk_Y * sizeof(float), cudaMemcpyHostToDevice); } } } //=================================Domain Decomposition Logic Ends ================================================================= int blocksize = chunk_X; int threads = chunk_Y; //cout << endl<<"blocksize" << blocksize; //cout << endl<<"thread" << threads; //Call to kernal int iterations = 0; if (numJacobiIt != 0) { iterations = numJacobiIt; } else { cout << endl << " No. of iterations is zero exiting... "; //return; } //===========================================CUDA Stream implementation for performance. Phase 2 of Development ==================================================== //===========Algorithm Improvement: Identify the neighbours so that they could be launched together and the exchange can take place. Without having to wait for computation across all devices============================ cudaStream_t streams[4];//Possible to declare it dynamically ? Yes. Using Vectors. cudaStream_t streamsforHaloExcahnge[4]; //Note: Default stream for a device is always syncronizing so creating seperate streams for each device for (int i = 0; i < numDevices; i++) { cudaSetDevice(i); cudaStreamCreate(&streams[i]); if (p2penabled) { cudaStreamCreate(&streamsforHaloExcahnge[i]); } } /*Using a pagable memory first*/ //std::vector<float> partial_resultOnHost(chunk_X * chunk_Y); /*Using a pinned(page locked) memory for performance*/ vector<float*>partial_resultOnHost(numDevices); for (int dev = 0;dev < numDevices;dev++) { cudaSetDevice(dev); cudaMallocHost((void**)&partial_resultOnHost[dev], (chunk_X * chunk_Y) * sizeof(float)); } //Check performance high_resolution_clock::time_point t1 = high_resolution_clock::now(); cudaError_t status = cudaGetLastError(); for (int i = 0; i < iterations; i++) { //cout << endl << endl << "Iteration : " << i + 1 << endl << endl << endl; //TODO: optimization using kernel instead of For Loop //Performance changes by launching kernal seperately for (int dev = 0; dev < numDevices;dev++) { status = cudaSetDevice(dev); if (status != cudaSuccess) { cout << "SetDevice unsuccessful exiting"; return status; } jacobi_Simple << <blocksize, threads, 0, streams[dev] >> >(d_A0[dev], d_A1[dev], d_A2[dev], d_A3[dev], d_A4[dev], d_Vec_In[dev], d_Vec_Out[dev], d_Rhs[dev], deviceArray[dev].eHalo_flag, deviceArray[dev].wHalo_flag, deviceArray[dev].nHalo_flag, deviceArray[dev].sHalo_flag, d_ehalos[dev], d_whalos[dev], d_nhalos[dev], d_shalos[dev], deviceArray[dev].deviceID, numDevices, decom_Dim); } if (auto err = cudaGetLastError()) { cout << "Kernal Execution failed: " << cudaGetErrorString(err) << " Iteration :" << i << endl; return err; } for (int dev = 0; dev < numDevices;dev++) { cudaSetDevice(dev); //partial_resultOnHost.clear(); //cout << endl << endl << "Computation for Device " << dev; //TODO: Performance Upgrade: Currently serial has to be done cudaMemcpyAsync using CUDA Streams //Copy the intermediate result from Device to Host memory //cudaMemcpyAsync(&partial_resultOnHost[dev][0], d_Vec_Out[dev], domainDivision[dev] * sizeof(float), cudaMemcpyDeviceToHost, streams[dev]); //Note: Performance gain on swap vectors on Host rather than using streams and cudaMemcpyAsync if (i == (iterations - 1))//Copy the results just for the final iteration { cudaMemcpyAsync(&partial_resultOnHost[dev][0], d_Vec_Out[dev], domainDivision[dev] * sizeof(float), cudaMemcpyDeviceToHost, streams[dev]); continue; } //Check Intermeduate Results and store it //sendToPrint(&partial_resultOnHost[0], deviceArray[dev].devicePosition_X, deviceArray[dev].devicePosition_Y, numberOfDevicesAlong_X, chunk_X, chunk_Y, dim, size, result, numDevices, i, iterations); //Copy the intermediate result from the Host memory to the Device memory //cudaMemcpyAsync(d_Vec_In[dev], &partial_resultOnHost[dev][0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice, streams[dev]); //Store Halo positions after iteration for exchanging if (!p2penabled) { if (numDevices > 1) { if (deviceArray[dev].nHalo_flag == 1) { cudaMemcpyAsync(nHalo_pinned[dev], d_nhalos[dev], chunk_X * sizeof(float), cudaMemcpyDeviceToHost, streams[dev]); if (auto err = cudaGetLastError()) { cout << "d_nhalos copy failed D2H: " << cudaGetErrorString(err) << endl; return err; } } if (deviceArray[dev].sHalo_flag == 1) { cudaMemcpyAsync(sHalo_pinned[dev], d_shalos[dev], chunk_X * sizeof(float), cudaMemcpyDeviceToHost, streams[dev]); if (auto err = cudaGetLastError()) { cout << "d_shalos copy failed D2H: " << cudaGetErrorString(err) << endl; return err; } } if (deviceArray[dev].eHalo_flag == 1) { cudaMemcpyAsync(eHalo_pinned[dev], d_ehalos[dev], chunk_Y * sizeof(float), cudaMemcpyDeviceToHost, streams[dev]); if (auto err = cudaGetLastError()) { cout << "d_ehalos copy failed D2H: " << cudaGetErrorString(err) << endl; return err; } } if (deviceArray[dev].wHalo_flag == 1) { cudaMemcpyAsync(wHalo_pinned[dev], d_whalos[dev], chunk_Y * sizeof(float), cudaMemcpyDeviceToHost, streams[dev]); if (auto err = cudaGetLastError()) { cout << "d_whalos copy failed D2H " << cudaGetErrorString(err) << endl; return err; } } } } } if (auto err = cudaGetLastError()) { cout << "Data copy failed 2: " << cudaGetErrorString(err) << endl; return err; } //Exchange Halos after each iteration except the last iteration if ((i < (iterations-1))) { //Synchronize streams from each device for (int dev = 0; dev < numDevices; dev++) { cudaSetDevice(dev); cudaStreamSynchronize(streams[dev]); if (auto err = cudaGetLastError()) { cout << "Stream " << dev << " synchronize error for iteration : " << i << ". ERROR IS: " << cudaGetErrorString(err) << endl; return err; } } if ((!p2penabled)) { bool exchangeComplete = false; //Note: Using Pinned memory on Host for Halos -> Performance Approach 1 //exchangehalos_onHost(numDevices, deviceArray, numberOfDevicesAlong_X); exchangeComplete = exchangehalos_onHostPinned(numDevices, deviceArray, numberOfDevicesAlong_X, nHalo_pinned, sHalo_pinned, eHalo_pinned, wHalo_pinned); if (exchangeComplete) { for (int dev = 0; dev < numDevices; dev++) { //Swap input output vectors for all devices swap(d_Vec_In[dev], d_Vec_Out[dev]); cudaSetDevice(dev); //Copying Halos to the device if (deviceArray[dev].nHalo_flag == 1) { cudaMemcpyAsync(d_nhalos[dev], nHalo_pinned[dev], chunk_X * sizeof(float), cudaMemcpyHostToDevice, streams[dev]); } if (auto err = cudaGetLastError()) { cout << "d_nhalos copy failed H2D: " << cudaGetErrorString(err) << endl; return err; } if (deviceArray[dev].sHalo_flag == 1) { cudaMemcpyAsync(d_shalos[dev], sHalo_pinned[dev], chunk_X * sizeof(float), cudaMemcpyHostToDevice, streams[dev]); } if (auto err = cudaGetLastError()) { cout << "d_shalos copy failed H2D: " << cudaGetErrorString(err) << endl; return err; } if (deviceArray[dev].eHalo_flag == 1) { cudaMemcpyAsync(d_ehalos[dev], eHalo_pinned[dev], chunk_Y * sizeof(float), cudaMemcpyHostToDevice, streams[dev]); } if (auto err = cudaGetLastError()) { cout << "d_ehalos copy failed H2D: " << cudaGetErrorString(err) << endl; return err; } if (deviceArray[dev].wHalo_flag == 1) { cudaMemcpyAsync(d_whalos[dev], wHalo_pinned[dev], chunk_Y * sizeof(float), cudaMemcpyHostToDevice, streams[dev]); } if (auto err = cudaGetLastError()) { cout << "d_whalos copy failed H2D: " << cudaGetErrorString(err) << endl; return err; } } } } else { for (int dev = 0;dev < numDevices;dev++) { //Swap input output vectors for all devices swap(d_Vec_In[dev], d_Vec_Out[dev]); cudaSetDevice(dev); int getDevCoord_X = deviceArray[dev].devicePosition_X; int getDevCoord_Y = deviceArray[dev].devicePosition_Y; //Check if device is having a north Halo buffer if (deviceArray[dev].nHalo_flag == 1) { int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X + 1, getDevCoord_Y, numberOfDevicesAlong_X); //Exchange Halos //Send to the device cudaMemcpyPeerAsync(x_buffer[dev], dev, d_shalos[devIDtoNorth], devIDtoNorth, chunk_X * sizeof(float), streamsforHaloExcahnge[dev]); //Recieve from the device cudaMemcpyPeerAsync(d_shalos[devIDtoNorth], devIDtoNorth, d_nhalos[dev], dev, chunk_X * sizeof(float), streamsforHaloExcahnge[dev]); cudaMemcpyAsync(d_nhalos[dev], x_buffer[dev], chunk_X * sizeof(float), cudaMemcpyDeviceToDevice, streamsforHaloExcahnge[dev]); } //Check if device is having a east Halo buffer if (deviceArray[dev].eHalo_flag == 1) { int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y + 1, numberOfDevicesAlong_Y); //Exchange Halos //Send to the device cudaMemcpyPeerAsync(y_buffer[dev], dev, d_whalos[devIDtoEast], devIDtoEast, chunk_Y * sizeof(float), streamsforHaloExcahnge[dev]); //Recieve from the device cudaMemcpyPeerAsync(d_whalos[devIDtoEast], devIDtoEast, d_ehalos[dev], dev, chunk_Y * sizeof(float), streamsforHaloExcahnge[dev]); cudaMemcpyAsync(d_ehalos[dev], y_buffer[dev], chunk_Y * sizeof(float), cudaMemcpyDeviceToDevice, streamsforHaloExcahnge[dev]); } } } } } if (auto err = cudaGetLastError()) { cout << "Data copy failed 3: " << cudaGetErrorString(err) << endl; return err; } high_resolution_clock::time_point t2 = high_resolution_clock::now(); auto duration = duration_cast<microseconds>(t2 - t1).count(); cout << endl << "Iterations successful. Time taken in microseconds :" << duration << endl; //Sync and Destroy streams for (int i = 0; i < numDevices; ++i) { //Synchro the streams cudaSetDevice(i); cudaStreamSynchronize(streams[i]); cudaStreamDestroy(streams[i]); if (p2penabled) { cudaStreamSynchronize(streamsforHaloExcahnge[i]); cudaStreamDestroy(streamsforHaloExcahnge[i]); } } //Results copied to disk for (int dev = 0; dev < numDevices;dev++) { sendToPrint(&partial_resultOnHost[dev][0], deviceArray[dev].devicePosition_X, deviceArray[dev].devicePosition_Y, numberOfDevicesAlong_X, chunk_X, chunk_Y, dim, size, result, numDevices, iterations - 1, iterations); } //==========================================Performance using CUDA stream ends=========================================================================== //Done in phase 2 of development: Disble P2P across devices if (p2penabled) { disableP2P(numDevices); } //Free memory on devices for (int dev = 0; dev < numDevices; dev++) { cudaSetDevice(dev); cudaFree(d_A0[dev]); cudaFree(d_A1[dev]); cudaFree(d_A2[dev]); cudaFree(d_A3[dev]); cudaFree(d_A4[dev]); cudaFree(d_Vec_In[dev]); cudaFree(d_Vec_Out[dev]); cudaFree(d_nhalos[dev]); cudaFree(d_shalos[dev]); cudaFree(d_ehalos[dev]); cudaFree(d_whalos[dev]); cudaFree(d_Rhs[dev]); cudaFreeHost(partial_resultOnHost[dev]); cudaFreeHost(nHalo_pinned[dev]); cudaFreeHost(sHalo_pinned[dev]); cudaFreeHost(wHalo_pinned[dev]); cudaFreeHost(eHalo_pinned[dev]); cudaDeviceReset(); } cout << endl << "Device Memory free successful." << endl; //Take care of dynamic mem location //delete[] domainDivision; return cudaSuccess; } int performJacobi_MultiGPU2D_Decom(unsigned int dim, unsigned int numJacobiIt, float* A0, float* A1, float* A2, float* A3, float* A4, float* rhs, float* x_in) { cudaError_t cudaStatus = performMultiGPUJacobi(dim, numJacobiIt, &A0[0], &A1[0], &A2[0], &A3[0], &A4[0], &rhs[0], &x_in[0]); if (cudaStatus != cudaSuccess) { cout << "Computation failed: " << endl; return 1; } if (cudaStatus != cudaSuccess) { cout << "Cuda Device Reset failed: " << endl; return 1; } return 0; }
ff72425b759500648faa4ac6daa37e28e322cc45.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <string> #include <iostream> #include <thread> #include <atomic> #include <vector> #include <mutex> #include <chrono> using namespace std; atomic<int> epoch; atomic<int> count; int num_threads; int duration; static int TOTAL_ITERATIONS_THREAD = 100; static int TOTAL_ITERATIONS_PROGRAM = 100; mutex thread_mutex; bool serial; double total_time_sync; double total_time_launch; #define gpuErrChk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __device__ unsigned long long int gclock64() { unsigned long long int rv; asm volatile ( "mov.u64 %0, %%globaltimer;" : "=l"(rv) ); return rv; } __global__ void spin(int *a, int runtime) { unsigned long long int start_clock = gclock64(); unsigned long long int clock_offset = 0; while (clock_offset < runtime) { clock_offset = gclock64() - start_clock; } a[0] = clock_offset; } void barrier(int& local_epoch) { int count_snapshot = count.fetch_add(1); if(count_snapshot == num_threads - 1) { count = 0; epoch ++; } while(local_epoch != epoch) {} local_epoch++; } void task(int tid) { int local_epoch = 1; int iteration = 0; double elapsed_time_sync = 0; double elapsed_time_launch = 0; hipEvent_t event; hipStream_t st; hipStreamCreate(&st); int* t; gpuErrChk(hipMalloc(&t, sizeof(int))); gpuErrChk(hipMemset(t, 0, sizeof(int))); gpuErrChk(hipEventCreateWithFlags(&event, hipEventDisableTiming)); while(iteration < TOTAL_ITERATIONS_THREAD) { barrier(local_epoch); chrono::duration<double, micro> usec; if(serial) thread_mutex.lock(); auto start_time_launch = chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( spin), dim3(1),dim3(1),0, st, t, duration); auto end_time_launch = chrono::high_resolution_clock::now(); //gpuErrChk(hipEventRecord(event, st)); auto start_time_sync = chrono::high_resolution_clock::now(); //gpuErrChk(hipEventSynchronize(event)); gpuErrChk(hipStreamSynchronize(st)); auto end_time_sync = chrono::high_resolution_clock::now(); if(serial) thread_mutex.unlock(); usec = end_time_sync - start_time_sync; elapsed_time_sync = elapsed_time_sync + usec.count(); usec = end_time_launch - start_time_launch; elapsed_time_launch = elapsed_time_launch + usec.count(); iteration++; } // Report latency thread_mutex.lock(); //cout << "Thread " << tid << ", launch: " << (float)elapsed_time_launch/(float)iteration // << ", sync: " << (float)elapsed_time_sync/(float)iteration << endl; total_time_launch = total_time_launch + elapsed_time_launch/(double)iteration; total_time_sync = total_time_sync + elapsed_time_sync/(double)iteration; thread_mutex.unlock(); hipStreamDestroy(st); } int main(int argc, char* argv[]) { num_threads = 2; duration = 1000; serial = false; if (argc > 1) { num_threads = atoi(argv[1]); } if (argc > 2) { duration = atoi(argv[2]); } if(argc > 3) { serial = (atoi(argv[3]) == 1); } std::cout << "num_threads= " << num_threads << std::endl; std::cout << "duration= " << duration << std::endl; std::cout << "serial= " << serial << std::endl; for(int n = 0; n < TOTAL_ITERATIONS_PROGRAM; n++) { count = 0; epoch = 0; vector<thread*> t; t.resize(num_threads); for(int i = 0; i < num_threads; i++) { t[i] = new thread(task, i); } for(int i = 0; i < num_threads; i++) { t[i]->join(); delete t[i]; } } cout << num_threads << " " << serial << " Launch: " << total_time_launch/(double)(TOTAL_ITERATIONS_PROGRAM*num_threads) << " Sync: " << total_time_sync/(double)(TOTAL_ITERATIONS_PROGRAM*num_threads) << endl; }
ff72425b759500648faa4ac6daa37e28e322cc45.cu
#include <string> #include <iostream> #include <thread> #include <atomic> #include <vector> #include <mutex> #include <chrono> using namespace std; atomic<int> epoch; atomic<int> count; int num_threads; int duration; static int TOTAL_ITERATIONS_THREAD = 100; static int TOTAL_ITERATIONS_PROGRAM = 100; mutex thread_mutex; bool serial; double total_time_sync; double total_time_launch; #define gpuErrChk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __device__ unsigned long long int gclock64() { unsigned long long int rv; asm volatile ( "mov.u64 %0, %%globaltimer;" : "=l"(rv) ); return rv; } __global__ void spin(int *a, int runtime) { unsigned long long int start_clock = gclock64(); unsigned long long int clock_offset = 0; while (clock_offset < runtime) { clock_offset = gclock64() - start_clock; } a[0] = clock_offset; } void barrier(int& local_epoch) { int count_snapshot = count.fetch_add(1); if(count_snapshot == num_threads - 1) { count = 0; epoch ++; } while(local_epoch != epoch) {} local_epoch++; } void task(int tid) { int local_epoch = 1; int iteration = 0; double elapsed_time_sync = 0; double elapsed_time_launch = 0; cudaEvent_t event; cudaStream_t st; cudaStreamCreate(&st); int* t; gpuErrChk(cudaMalloc(&t, sizeof(int))); gpuErrChk(cudaMemset(t, 0, sizeof(int))); gpuErrChk(cudaEventCreateWithFlags(&event, cudaEventDisableTiming)); while(iteration < TOTAL_ITERATIONS_THREAD) { barrier(local_epoch); chrono::duration<double, micro> usec; if(serial) thread_mutex.lock(); auto start_time_launch = chrono::high_resolution_clock::now(); spin<<<1,1,0, st>>>(t, duration); auto end_time_launch = chrono::high_resolution_clock::now(); //gpuErrChk(cudaEventRecord(event, st)); auto start_time_sync = chrono::high_resolution_clock::now(); //gpuErrChk(cudaEventSynchronize(event)); gpuErrChk(cudaStreamSynchronize(st)); auto end_time_sync = chrono::high_resolution_clock::now(); if(serial) thread_mutex.unlock(); usec = end_time_sync - start_time_sync; elapsed_time_sync = elapsed_time_sync + usec.count(); usec = end_time_launch - start_time_launch; elapsed_time_launch = elapsed_time_launch + usec.count(); iteration++; } // Report latency thread_mutex.lock(); //cout << "Thread " << tid << ", launch: " << (float)elapsed_time_launch/(float)iteration // << ", sync: " << (float)elapsed_time_sync/(float)iteration << endl; total_time_launch = total_time_launch + elapsed_time_launch/(double)iteration; total_time_sync = total_time_sync + elapsed_time_sync/(double)iteration; thread_mutex.unlock(); cudaStreamDestroy(st); } int main(int argc, char* argv[]) { num_threads = 2; duration = 1000; serial = false; if (argc > 1) { num_threads = atoi(argv[1]); } if (argc > 2) { duration = atoi(argv[2]); } if(argc > 3) { serial = (atoi(argv[3]) == 1); } std::cout << "num_threads= " << num_threads << std::endl; std::cout << "duration= " << duration << std::endl; std::cout << "serial= " << serial << std::endl; for(int n = 0; n < TOTAL_ITERATIONS_PROGRAM; n++) { count = 0; epoch = 0; vector<thread*> t; t.resize(num_threads); for(int i = 0; i < num_threads; i++) { t[i] = new thread(task, i); } for(int i = 0; i < num_threads; i++) { t[i]->join(); delete t[i]; } } cout << num_threads << " " << serial << " Launch: " << total_time_launch/(double)(TOTAL_ITERATIONS_PROGRAM*num_threads) << " Sync: " << total_time_sync/(double)(TOTAL_ITERATIONS_PROGRAM*num_threads) << endl; }
4ad786472792faaf13a3019778c8044a5297485e.hip
// !!! This is a file automatically generated by hipify!!! #include "THHUNN.h" #include "common.h" #include "im2col.h" void THNN_CudaSpatialDilatedConvolution_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *weight, THCudaTensor *bias, THCudaTensor *columns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH) { THCUNN_assertSameGPU(state, 5, input, output, weight, columns, ones); if (bias) { THCUNN_assertSameGPU(state, 2, weight, bias); } THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected"); THArgCheck(weight->nDimension == 4, 4, "weight tensor must be 4D (nOutputPlane,nInputPlane,kH,kW)"); THArgCheck(!bias || weight->size[0] == bias->size[0], 4, "nOutputPlane mismatch in weight and bias"); THArgCheck(kW > 0 && kH > 0, 8, "kernel size should be greater than zero"); THArgCheck(dW > 0 && dH > 0, 10, "stride should be greater than zero"); // Params: int nInputPlane = weight->size[1]; int nOutputPlane = weight->size[0]; int batch = 1; if (input->nDimension == 3) { THArgCheck(input->size[0] == nInputPlane, 2, "input channels and nInputPlane dont match"); // Force batch batch = 0; THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]); } else { THArgCheck(input->size[1] == nInputPlane, 2, "input channels and nInputPlane dont match"); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - (dilationW * (kW - 1) + 1)) / dW + 1; long outputHeight = (inputHeight + 2*padH - (dilationH * (kH - 1) + 1)) / dH + 1; if (outputWidth < 1 || outputHeight < 1) THError("Given input size: (%dx%dx%d). Calculated output size: (%dx%dx%d). Output size is too small", nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth); // Batch size + input planes long batchSize = input->size[0]; // Resize output THCudaTensor_resize4d(state, output, batchSize, nOutputPlane, outputHeight, outputWidth); // Resize temporary columns THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth); // Define a buffer of ones, for bias accumulation // Note: this buffer can be shared with other modules, it only ever gets increased, // and always contains ones. if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) { // Resize plane and fill with ones... THCudaTensor_resize2d(state, ones, outputHeight, outputWidth); THCudaTensor_fill(state, ones, 1); } // Helpers THCudaTensor *input_n = THCudaTensor_new(state); THCudaTensor *output_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: THCudaTensor_select(state, input_n, input, 0, elt); THCudaTensor_select(state, output_n, output, 0, elt); // Do Bias first: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m_ = nOutputPlane; long n_ = outputHeight * outputWidth; long k_ = 1; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) if (bias) { THCudaBlas_gemm( state, 't', 'n', n_, m_, k_, 1, THCudaTensor_data(state, ones), k_, THCudaTensor_data(state, bias), k_, 0, THCudaTensor_data(state, output_n), n_ ); } else { THCudaTensor_zero(state, output_n); } // Extract columns: im2col( THCState_getCurrentStream(state), THCudaTensor_data(state, input_n), nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, THCudaTensor_data(state, columns) ); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = nOutputPlane; long n = columns->size[1]; long k = nInputPlane*kH*kW; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_gemm( state, 'n', 'n', n, m, k, 1, THCudaTensor_data(state, columns), n, THCudaTensor_data(state, weight), k, 1, THCudaTensor_data(state, output_n), n ); } // Free THCudaTensor_free(state, input_n); THCudaTensor_free(state, output_n); // Resize output if (batch == 0) { THCudaTensor_resize3d(state, output, nOutputPlane, outputHeight, outputWidth); THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth); } } void THNN_CudaSpatialDilatedConvolution_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *weight, THCudaTensor *gradColumns, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH ) { THCUNN_assertSameGPU(state, 5, input, gradOutput, weight, gradColumns, gradInput); THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected"); THArgCheck(weight->nDimension == 4, 4, "weight tensor must be 4D (nOutputPlane,nInputPlane,kH,kW)"); THArgCheck(kW > 0 && kH > 0, 9, "kernel size should be greater than zero"); THArgCheck(dW > 0 && dH > 0, 11, "stride should be greater than zero"); // Params int nInputPlane = weight->size[1]; int nOutputPlane = weight->size[0]; int batch = 1; if (input->nDimension == 3) { // Force batch batch = 0; THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]); THCudaTensor_resize4d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - (dilationW * (kW - 1) + 1)) / dW + 1; long outputHeight = (inputHeight + 2*padH - (dilationH * (kH - 1) + 1)) / dH + 1; // Batch size + input planes long batchSize = input->size[0]; // Resize output THCudaTensor_resize4d(state, gradInput, batchSize, nInputPlane, inputHeight, inputWidth); // Resize temporary columns THCudaTensor_resize2d(state, gradColumns, nInputPlane*kW*kH, outputHeight*outputWidth); // Helpers THCudaTensor *gradInput_n = THCudaTensor_new(state); THCudaTensor *gradOutput_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per sample: THCudaTensor_select(state, gradInput_n, gradInput, 0, elt); THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = nInputPlane*kW*kH; long n = gradColumns->size[1]; long k = nOutputPlane; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_gemm( state, 'n', 't', n, m, k, 1, THCudaTensor_data(state, gradOutput_n), n, THCudaTensor_data(state, weight), m, 0, THCudaTensor_data(state, gradColumns), n ); // Unpack columns back into input: col2im( THCState_getCurrentStream(state), THCudaTensor_data(state, gradColumns), nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, THCudaTensor_data(state, gradInput_n) ); } // Free THCudaTensor_free(state, gradInput_n); THCudaTensor_free(state, gradOutput_n); // Resize output if (batch == 0) { THCudaTensor_resize3d(state, gradOutput, nOutputPlane, outputHeight, outputWidth); THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth); THCudaTensor_resize3d(state, gradInput, nInputPlane, inputHeight, inputWidth); } } void THNN_CudaSpatialDilatedConvolution_accGradParameters(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradWeight, THCudaTensor *gradBias, THCudaTensor *columns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, float scale) { THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight, columns, ones); if (gradBias) { THCUNN_assertSameGPU(state, 2, gradWeight, gradBias); } THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected"); THArgCheck(gradWeight->nDimension == 4, 4, "gradWeight tensor must be 4D (nOutputPlane,nInputPlane,kH,kW)"); THArgCheck(!gradBias || gradWeight->size[0] == gradBias->size[0], 4, "nOutputPlane mismatch in gradWeight and gradBias"); THArgCheck(kW > 0 && kH > 0, 8, "kernel size should be greater than zero"); THArgCheck(dW > 0 && dH > 0, 10, "stride should be greater than zero"); // Params int nInputPlane = gradWeight->size[1]; int nOutputPlane = gradWeight->size[0]; int batch = 1; if (input->nDimension == 3) { // Force batch batch = 0; THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]); THCudaTensor_resize4d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - (dilationW * (kW - 1) + 1)) / dW + 1; long outputHeight = (inputHeight + 2*padH - (dilationH * (kH - 1) + 1)) / dH + 1; // Batch size + input planes long batchSize = input->size[0]; // Define a buffer of ones, for bias accumulation if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) { // Resize plane and fill with ones... THCudaTensor_resize2d(state, ones, outputHeight, outputWidth); THCudaTensor_fill(state, ones, 1); } // Resize temporary columns THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth); // Helpers THCudaTensor *input_n = THCudaTensor_new(state); THCudaTensor *gradOutput_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: THCudaTensor_select(state, input_n, input, 0, elt); THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt); // Extract columns: im2col( THCState_getCurrentStream(state), THCudaTensor_data(state, input_n), nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, THCudaTensor_data(state, columns) ); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = nOutputPlane; long n = nInputPlane*kW*kH; long k = columns->size[1]; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_gemm( state, 't', 'n', n, m, k, scale, THCudaTensor_data(state, columns), k, THCudaTensor_data(state, gradOutput_n), k, 1, THCudaTensor_data(state, gradWeight), n ); // Do Bias: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m_ = nOutputPlane; long k_ = outputHeight * outputWidth; // Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices) if (gradBias) { THCudaBlas_gemv( state, 't', k_, m_, scale, THCudaTensor_data(state, gradOutput_n), k_, THCudaTensor_data(state, ones), 1, 1, THCudaTensor_data(state, gradBias), 1 ); } } // Free THCudaTensor_free(state, input_n); THCudaTensor_free(state, gradOutput_n); // Resize if (batch == 0) { THCudaTensor_resize3d(state, gradOutput, nOutputPlane, outputHeight, outputWidth); THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth); } }
4ad786472792faaf13a3019778c8044a5297485e.cu
#include "THCUNN.h" #include "common.h" #include "im2col.h" void THNN_CudaSpatialDilatedConvolution_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *weight, THCudaTensor *bias, THCudaTensor *columns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH) { THCUNN_assertSameGPU(state, 5, input, output, weight, columns, ones); if (bias) { THCUNN_assertSameGPU(state, 2, weight, bias); } THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected"); THArgCheck(weight->nDimension == 4, 4, "weight tensor must be 4D (nOutputPlane,nInputPlane,kH,kW)"); THArgCheck(!bias || weight->size[0] == bias->size[0], 4, "nOutputPlane mismatch in weight and bias"); THArgCheck(kW > 0 && kH > 0, 8, "kernel size should be greater than zero"); THArgCheck(dW > 0 && dH > 0, 10, "stride should be greater than zero"); // Params: int nInputPlane = weight->size[1]; int nOutputPlane = weight->size[0]; int batch = 1; if (input->nDimension == 3) { THArgCheck(input->size[0] == nInputPlane, 2, "input channels and nInputPlane dont match"); // Force batch batch = 0; THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]); } else { THArgCheck(input->size[1] == nInputPlane, 2, "input channels and nInputPlane dont match"); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - (dilationW * (kW - 1) + 1)) / dW + 1; long outputHeight = (inputHeight + 2*padH - (dilationH * (kH - 1) + 1)) / dH + 1; if (outputWidth < 1 || outputHeight < 1) THError("Given input size: (%dx%dx%d). Calculated output size: (%dx%dx%d). Output size is too small", nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth); // Batch size + input planes long batchSize = input->size[0]; // Resize output THCudaTensor_resize4d(state, output, batchSize, nOutputPlane, outputHeight, outputWidth); // Resize temporary columns THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth); // Define a buffer of ones, for bias accumulation // Note: this buffer can be shared with other modules, it only ever gets increased, // and always contains ones. if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) { // Resize plane and fill with ones... THCudaTensor_resize2d(state, ones, outputHeight, outputWidth); THCudaTensor_fill(state, ones, 1); } // Helpers THCudaTensor *input_n = THCudaTensor_new(state); THCudaTensor *output_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: THCudaTensor_select(state, input_n, input, 0, elt); THCudaTensor_select(state, output_n, output, 0, elt); // Do Bias first: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m_ = nOutputPlane; long n_ = outputHeight * outputWidth; long k_ = 1; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) if (bias) { THCudaBlas_gemm( state, 't', 'n', n_, m_, k_, 1, THCudaTensor_data(state, ones), k_, THCudaTensor_data(state, bias), k_, 0, THCudaTensor_data(state, output_n), n_ ); } else { THCudaTensor_zero(state, output_n); } // Extract columns: im2col( THCState_getCurrentStream(state), THCudaTensor_data(state, input_n), nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, THCudaTensor_data(state, columns) ); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = nOutputPlane; long n = columns->size[1]; long k = nInputPlane*kH*kW; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_gemm( state, 'n', 'n', n, m, k, 1, THCudaTensor_data(state, columns), n, THCudaTensor_data(state, weight), k, 1, THCudaTensor_data(state, output_n), n ); } // Free THCudaTensor_free(state, input_n); THCudaTensor_free(state, output_n); // Resize output if (batch == 0) { THCudaTensor_resize3d(state, output, nOutputPlane, outputHeight, outputWidth); THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth); } } void THNN_CudaSpatialDilatedConvolution_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *weight, THCudaTensor *gradColumns, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH ) { THCUNN_assertSameGPU(state, 5, input, gradOutput, weight, gradColumns, gradInput); THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected"); THArgCheck(weight->nDimension == 4, 4, "weight tensor must be 4D (nOutputPlane,nInputPlane,kH,kW)"); THArgCheck(kW > 0 && kH > 0, 9, "kernel size should be greater than zero"); THArgCheck(dW > 0 && dH > 0, 11, "stride should be greater than zero"); // Params int nInputPlane = weight->size[1]; int nOutputPlane = weight->size[0]; int batch = 1; if (input->nDimension == 3) { // Force batch batch = 0; THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]); THCudaTensor_resize4d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - (dilationW * (kW - 1) + 1)) / dW + 1; long outputHeight = (inputHeight + 2*padH - (dilationH * (kH - 1) + 1)) / dH + 1; // Batch size + input planes long batchSize = input->size[0]; // Resize output THCudaTensor_resize4d(state, gradInput, batchSize, nInputPlane, inputHeight, inputWidth); // Resize temporary columns THCudaTensor_resize2d(state, gradColumns, nInputPlane*kW*kH, outputHeight*outputWidth); // Helpers THCudaTensor *gradInput_n = THCudaTensor_new(state); THCudaTensor *gradOutput_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per sample: THCudaTensor_select(state, gradInput_n, gradInput, 0, elt); THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = nInputPlane*kW*kH; long n = gradColumns->size[1]; long k = nOutputPlane; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_gemm( state, 'n', 't', n, m, k, 1, THCudaTensor_data(state, gradOutput_n), n, THCudaTensor_data(state, weight), m, 0, THCudaTensor_data(state, gradColumns), n ); // Unpack columns back into input: col2im( THCState_getCurrentStream(state), THCudaTensor_data(state, gradColumns), nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, THCudaTensor_data(state, gradInput_n) ); } // Free THCudaTensor_free(state, gradInput_n); THCudaTensor_free(state, gradOutput_n); // Resize output if (batch == 0) { THCudaTensor_resize3d(state, gradOutput, nOutputPlane, outputHeight, outputWidth); THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth); THCudaTensor_resize3d(state, gradInput, nInputPlane, inputHeight, inputWidth); } } void THNN_CudaSpatialDilatedConvolution_accGradParameters(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradWeight, THCudaTensor *gradBias, THCudaTensor *columns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, float scale) { THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight, columns, ones); if (gradBias) { THCUNN_assertSameGPU(state, 2, gradWeight, gradBias); } THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected"); THArgCheck(gradWeight->nDimension == 4, 4, "gradWeight tensor must be 4D (nOutputPlane,nInputPlane,kH,kW)"); THArgCheck(!gradBias || gradWeight->size[0] == gradBias->size[0], 4, "nOutputPlane mismatch in gradWeight and gradBias"); THArgCheck(kW > 0 && kH > 0, 8, "kernel size should be greater than zero"); THArgCheck(dW > 0 && dH > 0, 10, "stride should be greater than zero"); // Params int nInputPlane = gradWeight->size[1]; int nOutputPlane = gradWeight->size[0]; int batch = 1; if (input->nDimension == 3) { // Force batch batch = 0; THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]); THCudaTensor_resize4d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - (dilationW * (kW - 1) + 1)) / dW + 1; long outputHeight = (inputHeight + 2*padH - (dilationH * (kH - 1) + 1)) / dH + 1; // Batch size + input planes long batchSize = input->size[0]; // Define a buffer of ones, for bias accumulation if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) { // Resize plane and fill with ones... THCudaTensor_resize2d(state, ones, outputHeight, outputWidth); THCudaTensor_fill(state, ones, 1); } // Resize temporary columns THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth); // Helpers THCudaTensor *input_n = THCudaTensor_new(state); THCudaTensor *gradOutput_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: THCudaTensor_select(state, input_n, input, 0, elt); THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt); // Extract columns: im2col( THCState_getCurrentStream(state), THCudaTensor_data(state, input_n), nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, THCudaTensor_data(state, columns) ); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = nOutputPlane; long n = nInputPlane*kW*kH; long k = columns->size[1]; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_gemm( state, 't', 'n', n, m, k, scale, THCudaTensor_data(state, columns), k, THCudaTensor_data(state, gradOutput_n), k, 1, THCudaTensor_data(state, gradWeight), n ); // Do Bias: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m_ = nOutputPlane; long k_ = outputHeight * outputWidth; // Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices) if (gradBias) { THCudaBlas_gemv( state, 't', k_, m_, scale, THCudaTensor_data(state, gradOutput_n), k_, THCudaTensor_data(state, ones), 1, 1, THCudaTensor_data(state, gradBias), 1 ); } } // Free THCudaTensor_free(state, input_n); THCudaTensor_free(state, gradOutput_n); // Resize if (batch == 0) { THCudaTensor_resize3d(state, gradOutput, nOutputPlane, outputHeight, outputWidth); THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth); } }
b3992b851fcd23923c874c724382106c4bcb6754.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cstdlib> __global__ void conv_batchnorm_fusion_kernel(float *output, float *input, float *weight, float *bias, int batch, int in_channel, int out_channel, int height, int width, int kernel_height, int kernel_width, int pad_height, int pad_width, int stride_height, int stride_width, int total_size) { int N = batch; int C = in_channel; int K = out_channel; int H = height; int W = width; int kH = kernel_height; int kW = kernel_width; int pH = pad_height; int pW = pad_width; int sH = stride_height; int sW = stride_width; int P = ((H + 2 * pH - kH) / sH) + 1; int Q = ((W + 2 * pW - kW) / sW) + 1; //tid : thread id int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= total_size) return; //q_idx : output w-index int q_idx = tid % Q; int idx = tid / Q; //p_idx : output h-index int p_idx = idx % P; idx /= P; //k_idx : output channel-index int k_idx = idx % K; //n_idx : output batch-index int n_idx = idx / K; //output(n_idx, k_idx, p_idx, q_idx) float sum = 0.0f; for (int c_idx = 0; c_idx < C; c_idx++) { for (int kh_idx = 0; kh_idx < kH; kh_idx++) { int h_idx = p_idx * sH + kh_idx - pH; if (h_idx >= 0 && h_idx < H) { for (int kw_idx = 0; kw_idx < kW; kw_idx++) { int w_idx = q_idx * sW + kw_idx - pW; if (w_idx >= 0 && w_idx < W) { int input_index = n_idx * C * H * W + c_idx * H * W + h_idx * W + w_idx; int weight_index = k_idx * C * kH * kW + c_idx * kH * kW + kh_idx * kW + kw_idx; sum += input[input_index] * weight[weight_index]; } } } } } sum += bias[k_idx]; output[tid] = sum; //if (tid < 5) // printf("%dth thread : %f\n", tid, output[tid]); } void conv_batchnorm_fusion(float *output, float *input, float *weight, float *bias, int batch, int in_channel, int out_channel, int height, int width, int kernel_height, int kernel_width, int pad_height, int pad_width, int stride_height, int stride_width) { int N = batch; int C = in_channel; int K = out_channel; int H = height; int W = width; int kH = kernel_height; int kW = kernel_width; int pH = pad_height; int pW = pad_width; int sH = stride_height; int sW = stride_width; int P = ((H + 2 * pH - kH) / sH) + 1; int Q = ((W + 2 * pW - kW) / sW) + 1; int THREADS_PER_BLOCK = 256; int TOTAL_SIZE = N * K * P * Q; int NUMBER_OF_BLOCKS = (TOTAL_SIZE + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; hipLaunchKernelGGL(( conv_batchnorm_fusion_kernel) , dim3(NUMBER_OF_BLOCKS), dim3(THREADS_PER_BLOCK) , 0, 0, output, input, weight, bias, N, C, K, H, W, kH, kW, pH, pW, sH, sW, TOTAL_SIZE); }
b3992b851fcd23923c874c724382106c4bcb6754.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cstdlib> __global__ void conv_batchnorm_fusion_kernel(float *output, float *input, float *weight, float *bias, int batch, int in_channel, int out_channel, int height, int width, int kernel_height, int kernel_width, int pad_height, int pad_width, int stride_height, int stride_width, int total_size) { int N = batch; int C = in_channel; int K = out_channel; int H = height; int W = width; int kH = kernel_height; int kW = kernel_width; int pH = pad_height; int pW = pad_width; int sH = stride_height; int sW = stride_width; int P = ((H + 2 * pH - kH) / sH) + 1; int Q = ((W + 2 * pW - kW) / sW) + 1; //tid : thread id int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= total_size) return; //q_idx : output w-index int q_idx = tid % Q; int idx = tid / Q; //p_idx : output h-index int p_idx = idx % P; idx /= P; //k_idx : output channel-index int k_idx = idx % K; //n_idx : output batch-index int n_idx = idx / K; //output(n_idx, k_idx, p_idx, q_idx) float sum = 0.0f; for (int c_idx = 0; c_idx < C; c_idx++) { for (int kh_idx = 0; kh_idx < kH; kh_idx++) { int h_idx = p_idx * sH + kh_idx - pH; if (h_idx >= 0 && h_idx < H) { for (int kw_idx = 0; kw_idx < kW; kw_idx++) { int w_idx = q_idx * sW + kw_idx - pW; if (w_idx >= 0 && w_idx < W) { int input_index = n_idx * C * H * W + c_idx * H * W + h_idx * W + w_idx; int weight_index = k_idx * C * kH * kW + c_idx * kH * kW + kh_idx * kW + kw_idx; sum += input[input_index] * weight[weight_index]; } } } } } sum += bias[k_idx]; output[tid] = sum; //if (tid < 5) // printf("%dth thread : %f\n", tid, output[tid]); } void conv_batchnorm_fusion(float *output, float *input, float *weight, float *bias, int batch, int in_channel, int out_channel, int height, int width, int kernel_height, int kernel_width, int pad_height, int pad_width, int stride_height, int stride_width) { int N = batch; int C = in_channel; int K = out_channel; int H = height; int W = width; int kH = kernel_height; int kW = kernel_width; int pH = pad_height; int pW = pad_width; int sH = stride_height; int sW = stride_width; int P = ((H + 2 * pH - kH) / sH) + 1; int Q = ((W + 2 * pW - kW) / sW) + 1; int THREADS_PER_BLOCK = 256; int TOTAL_SIZE = N * K * P * Q; int NUMBER_OF_BLOCKS = (TOTAL_SIZE + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; conv_batchnorm_fusion_kernel <<<NUMBER_OF_BLOCKS, THREADS_PER_BLOCK >>> (output, input, weight, bias, N, C, K, H, W, kH, kW, pH, pW, sH, sW, TOTAL_SIZE); }
123b01baa5d066e0cd292609e2202e9b29cdf067.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <bits/stdc++.h> #define milliseconds 1e3 using namespace std; typedef struct _Node_info{ u_short parent_index; u_int potential_flow; } Node_info; void readInput(const char* filename, u_int total_nodes, u_short* residual_capacity) { ifstream file; file.open(filename); if (!file) { cout << "Error reading file!"; exit(1); } string line; u_int source, destination; u_short capacity; while (file) { getline(file, line); if (line.empty()) { continue; } std::stringstream linestream(line); linestream >> source >> destination >> capacity; residual_capacity[source * total_nodes + destination] = capacity; } file.close(); } __global__ void find_augmenting_path(u_short* residual_capacity, Node_info* node_info, bool* frontier, bool* visited, u_int total_nodes, u_int sink, u_int* locks){ int node_id = blockIdx.x * blockDim.x + threadIdx.x; if(!frontier[sink] && node_id < total_nodes && frontier[node_id]){ frontier[node_id] = false; visited[node_id] = true; Node_info *neighbour; Node_info current_node_info = node_info[node_id]; u_int capacity; for (u_int i = node_id; i < total_nodes; ++i){ if(frontier[i] || visited[i] || ((capacity = residual_capacity[node_id * total_nodes + i]) <= 0)){ continue; } if(atomicCAS(locks+i, 0 , 1) == 1 || frontier[i]){ continue; } frontier[i] = true; locks[i] = 0; neighbour = node_info + i; neighbour->parent_index = node_id; neighbour->potential_flow = min(current_node_info.potential_flow, capacity); } for (u_int i = 0; i < node_id; ++i){ if(frontier[i] || visited[i] || ((capacity = residual_capacity[node_id * total_nodes + i]) <= 0)){ continue; } if(atomicCAS(locks+i, 0 , 1) == 1 || frontier[i]){ continue; } frontier[i] = true; locks[i] = 0; neighbour = node_info + i; neighbour->parent_index = node_id; neighbour->potential_flow = min(current_node_info.potential_flow, capacity); } } } __global__ void reset(Node_info* node_info, bool* frontier, bool* visited, int source, int total_nodes, u_int* locks){ int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < total_nodes){ frontier[id] = id == source; visited[id] = false; node_info[id].potential_flow = UINT_MAX; locks[id] = 0; } } __global__ void augment_path(Node_info* node_infos, bool* do_change_capacity , u_int total_nodes, u_short* residual_capacity, u_int bottleneck_flow){ int node_id = blockIdx.x * blockDim.x + threadIdx.x; if(node_id < total_nodes && do_change_capacity[node_id]){ Node_info* current_node_info = node_infos + node_id; residual_capacity[current_node_info->parent_index * total_nodes + node_id] -= bottleneck_flow; residual_capacity[node_id * total_nodes + current_node_info->parent_index] += bottleneck_flow; } } void reset_host(bool* frontier, int source, int total_nodes, bool* do_change_capacity){ frontier[source] = true; do_change_capacity[source] = false; for (int i = source+1; i < total_nodes; i++) { frontier[i] = false; do_change_capacity[i] = false; } for (int i = 0; i < source; i++) { frontier[i] = false; do_change_capacity[i] = false; } } bool is_frontier_empty_or_sink_found(bool* frontier, int N, int sink_pos){ for (int i = N-1; i > -1; --i) { if(frontier[i]){ return i == sink_pos; } } return true; } int main(int argc, char** argv){ if(argc < 3){ printf("Specify filename & number of vertices\n"); return 1; } u_int N = atoi(argv[2]); u_short *residual_capacity; size_t matrix_size = N * N * sizeof(u_short); residual_capacity = (u_short *)malloc(matrix_size); memset(residual_capacity, 0, matrix_size); readInput(argv[1], N, residual_capacity); u_int source=0, sink=N-1; u_int current_vertex, bottleneck_flow; u_int max_flow = 0; Node_info* current_node_info; u_short* d_residual_capacity; u_int* d_locks; bool* frontier; bool* d_frontier, *d_visited, *d_do_change_capacity, *do_change_capacity; Node_info* node_info; Node_info* d_node_info; clock_t start_time = clock(); size_t node_infos_size = N * sizeof(Node_info); node_info = (Node_info*)malloc(node_infos_size); size_t vertices_size = N * sizeof(bool); frontier = (bool *)malloc(vertices_size); do_change_capacity = (bool *)malloc(vertices_size); size_t locks_size = N * sizeof(u_int); hipMalloc((void **)&d_residual_capacity, matrix_size); hipMalloc((void **)&d_locks, locks_size); hipMalloc((void **)&d_node_info,node_infos_size); hipMalloc((void **)&d_frontier, vertices_size); hipMalloc((void **)&d_visited, vertices_size); hipMalloc((void **)&d_do_change_capacity, vertices_size); hipMemcpy(d_residual_capacity, residual_capacity, matrix_size, hipMemcpyHostToDevice); bool found_augmenting_path; int threads = 256; int blocks = ceil(N * 1.0 /threads); do{ // reset visited, frontier, node_info, locks hipLaunchKernelGGL(( reset), dim3(blocks), dim3(threads) , 0, 0, d_node_info, d_frontier, d_visited, source, N, d_locks); reset_host(frontier, source, N, do_change_capacity); while(!is_frontier_empty_or_sink_found(frontier, N, sink)){ // Invoke kernel hipLaunchKernelGGL(( find_augmenting_path), dim3(blocks), dim3(threads) , 0, 0, d_residual_capacity, d_node_info, d_frontier, d_visited, N, sink, d_locks); // Copy back frontier from device hipMemcpy(frontier, d_frontier, vertices_size, hipMemcpyDeviceToHost); } found_augmenting_path = frontier[sink]; if(!found_augmenting_path){ break; } // copy node_info from device to host hipMemcpy(node_info, d_node_info, node_infos_size, hipMemcpyDeviceToHost); bottleneck_flow = node_info[sink].potential_flow; max_flow += bottleneck_flow; for(current_vertex = sink; current_vertex != source; current_vertex = current_node_info->parent_index){ current_node_info = node_info + current_vertex; do_change_capacity[current_vertex] = true; } hipMemcpy(d_do_change_capacity, do_change_capacity, vertices_size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( augment_path), dim3(blocks), dim3(threads) , 0, 0, d_node_info, d_do_change_capacity, N, d_residual_capacity, bottleneck_flow); }while(found_augmenting_path); cout << "\nmaxflow " << max_flow << endl; double time_taken = ((double)clock() - start_time)/CLOCKS_PER_SEC * milliseconds; // in milliseconds cout << time_taken << " ms for thread size- " << threads << endl; free(residual_capacity); free(frontier); free(node_info); hipFree(d_residual_capacity); hipFree(d_node_info); hipFree(d_frontier); hipFree(d_visited); return 0; }
123b01baa5d066e0cd292609e2202e9b29cdf067.cu
#include <bits/stdc++.h> #define milliseconds 1e3 using namespace std; typedef struct _Node_info{ u_short parent_index; u_int potential_flow; } Node_info; void readInput(const char* filename, u_int total_nodes, u_short* residual_capacity) { ifstream file; file.open(filename); if (!file) { cout << "Error reading file!"; exit(1); } string line; u_int source, destination; u_short capacity; while (file) { getline(file, line); if (line.empty()) { continue; } std::stringstream linestream(line); linestream >> source >> destination >> capacity; residual_capacity[source * total_nodes + destination] = capacity; } file.close(); } __global__ void find_augmenting_path(u_short* residual_capacity, Node_info* node_info, bool* frontier, bool* visited, u_int total_nodes, u_int sink, u_int* locks){ int node_id = blockIdx.x * blockDim.x + threadIdx.x; if(!frontier[sink] && node_id < total_nodes && frontier[node_id]){ frontier[node_id] = false; visited[node_id] = true; Node_info *neighbour; Node_info current_node_info = node_info[node_id]; u_int capacity; for (u_int i = node_id; i < total_nodes; ++i){ if(frontier[i] || visited[i] || ((capacity = residual_capacity[node_id * total_nodes + i]) <= 0)){ continue; } if(atomicCAS(locks+i, 0 , 1) == 1 || frontier[i]){ continue; } frontier[i] = true; locks[i] = 0; neighbour = node_info + i; neighbour->parent_index = node_id; neighbour->potential_flow = min(current_node_info.potential_flow, capacity); } for (u_int i = 0; i < node_id; ++i){ if(frontier[i] || visited[i] || ((capacity = residual_capacity[node_id * total_nodes + i]) <= 0)){ continue; } if(atomicCAS(locks+i, 0 , 1) == 1 || frontier[i]){ continue; } frontier[i] = true; locks[i] = 0; neighbour = node_info + i; neighbour->parent_index = node_id; neighbour->potential_flow = min(current_node_info.potential_flow, capacity); } } } __global__ void reset(Node_info* node_info, bool* frontier, bool* visited, int source, int total_nodes, u_int* locks){ int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < total_nodes){ frontier[id] = id == source; visited[id] = false; node_info[id].potential_flow = UINT_MAX; locks[id] = 0; } } __global__ void augment_path(Node_info* node_infos, bool* do_change_capacity , u_int total_nodes, u_short* residual_capacity, u_int bottleneck_flow){ int node_id = blockIdx.x * blockDim.x + threadIdx.x; if(node_id < total_nodes && do_change_capacity[node_id]){ Node_info* current_node_info = node_infos + node_id; residual_capacity[current_node_info->parent_index * total_nodes + node_id] -= bottleneck_flow; residual_capacity[node_id * total_nodes + current_node_info->parent_index] += bottleneck_flow; } } void reset_host(bool* frontier, int source, int total_nodes, bool* do_change_capacity){ frontier[source] = true; do_change_capacity[source] = false; for (int i = source+1; i < total_nodes; i++) { frontier[i] = false; do_change_capacity[i] = false; } for (int i = 0; i < source; i++) { frontier[i] = false; do_change_capacity[i] = false; } } bool is_frontier_empty_or_sink_found(bool* frontier, int N, int sink_pos){ for (int i = N-1; i > -1; --i) { if(frontier[i]){ return i == sink_pos; } } return true; } int main(int argc, char** argv){ if(argc < 3){ printf("Specify filename & number of vertices\n"); return 1; } u_int N = atoi(argv[2]); u_short *residual_capacity; size_t matrix_size = N * N * sizeof(u_short); residual_capacity = (u_short *)malloc(matrix_size); memset(residual_capacity, 0, matrix_size); readInput(argv[1], N, residual_capacity); u_int source=0, sink=N-1; u_int current_vertex, bottleneck_flow; u_int max_flow = 0; Node_info* current_node_info; u_short* d_residual_capacity; u_int* d_locks; bool* frontier; bool* d_frontier, *d_visited, *d_do_change_capacity, *do_change_capacity; Node_info* node_info; Node_info* d_node_info; clock_t start_time = clock(); size_t node_infos_size = N * sizeof(Node_info); node_info = (Node_info*)malloc(node_infos_size); size_t vertices_size = N * sizeof(bool); frontier = (bool *)malloc(vertices_size); do_change_capacity = (bool *)malloc(vertices_size); size_t locks_size = N * sizeof(u_int); cudaMalloc((void **)&d_residual_capacity, matrix_size); cudaMalloc((void **)&d_locks, locks_size); cudaMalloc((void **)&d_node_info,node_infos_size); cudaMalloc((void **)&d_frontier, vertices_size); cudaMalloc((void **)&d_visited, vertices_size); cudaMalloc((void **)&d_do_change_capacity, vertices_size); cudaMemcpy(d_residual_capacity, residual_capacity, matrix_size, cudaMemcpyHostToDevice); bool found_augmenting_path; int threads = 256; int blocks = ceil(N * 1.0 /threads); do{ // reset visited, frontier, node_info, locks reset<<<blocks, threads >>>(d_node_info, d_frontier, d_visited, source, N, d_locks); reset_host(frontier, source, N, do_change_capacity); while(!is_frontier_empty_or_sink_found(frontier, N, sink)){ // Invoke kernel find_augmenting_path<<< blocks, threads >>>(d_residual_capacity, d_node_info, d_frontier, d_visited, N, sink, d_locks); // Copy back frontier from device cudaMemcpy(frontier, d_frontier, vertices_size, cudaMemcpyDeviceToHost); } found_augmenting_path = frontier[sink]; if(!found_augmenting_path){ break; } // copy node_info from device to host cudaMemcpy(node_info, d_node_info, node_infos_size, cudaMemcpyDeviceToHost); bottleneck_flow = node_info[sink].potential_flow; max_flow += bottleneck_flow; for(current_vertex = sink; current_vertex != source; current_vertex = current_node_info->parent_index){ current_node_info = node_info + current_vertex; do_change_capacity[current_vertex] = true; } cudaMemcpy(d_do_change_capacity, do_change_capacity, vertices_size, cudaMemcpyHostToDevice); augment_path<<< blocks, threads >>>(d_node_info, d_do_change_capacity, N, d_residual_capacity, bottleneck_flow); }while(found_augmenting_path); cout << "\nmaxflow " << max_flow << endl; double time_taken = ((double)clock() - start_time)/CLOCKS_PER_SEC * milliseconds; // in milliseconds cout << time_taken << " ms for thread size- " << threads << endl; free(residual_capacity); free(frontier); free(node_info); cudaFree(d_residual_capacity); cudaFree(d_node_info); cudaFree(d_frontier); cudaFree(d_visited); return 0; }
277d3b0a84598fc3b791a76644f0822778682fe6.hip
// !!! This is a file automatically generated by hipify!!! #include "common/book.h" #include <hip/hip_runtime.h> #include <cuda_device_runtime_api.h> #include <hip/hip_runtime.h> #include <stdio.h> #define N 10 __global__ void add(int *a, int *b, int *c) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N) { c[tid] = a[tid] + b[tid]; tid += blockDim.x * gridDim.x; } } int main(void) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; HANDLE_ERROR(hipMalloc((void **)&dev_a, N * sizeof(int))); HANDLE_ERROR(hipMalloc((void **)&dev_b, N * sizeof(int))); HANDLE_ERROR(hipMalloc((void **)&dev_c, N * sizeof(int))); for (int i = 0; i < N; ++i) { a[i] = -i; b[i] = i * i; } HANDLE_ERROR(hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( add), dim3(128), dim3(128), 0, 0, dev_a, dev_b, dev_c); HANDLE_ERROR(hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost)); for (int i = 0; i < N; ++i) { printf("%d + %d = %d\n", a[i], b[i], c[i]); } hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); return 0; }
277d3b0a84598fc3b791a76644f0822778682fe6.cu
#include "common/book.h" #include <cuda.h> #include <cuda_device_runtime_api.h> #include <cuda_runtime.h> #include <stdio.h> #define N 10 __global__ void add(int *a, int *b, int *c) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N) { c[tid] = a[tid] + b[tid]; tid += blockDim.x * gridDim.x; } } int main(void) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; HANDLE_ERROR(cudaMalloc((void **)&dev_a, N * sizeof(int))); HANDLE_ERROR(cudaMalloc((void **)&dev_b, N * sizeof(int))); HANDLE_ERROR(cudaMalloc((void **)&dev_c, N * sizeof(int))); for (int i = 0; i < N; ++i) { a[i] = -i; b[i] = i * i; } HANDLE_ERROR(cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice)); add<<<128, 128>>>(dev_a, dev_b, dev_c); HANDLE_ERROR(cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 0; i < N; ++i) { printf("%d + %d = %d\n", a[i], b[i], c[i]); } cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; }
b709bde563e0ccdac0b07d99c7c82843dcb0235d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @generated s Tue Aug 13 16:45:08 2013 */ #include "common_magma.h" /* //////////////////////////////////////////////////////////////////////////// -- This is an auxiliary routine called from sgehrd. The routine is called in 16 blocks, 32 thread per block and initializes to zero the 1st 32x32 block of A. */ __global__ void sset_to_zero(float *A, int lda) { int ind = blockIdx.x*lda + threadIdx.x; A += ind; A[0] = MAGMA_S_ZERO; // A[16*lda] = 0.; } __global__ void sset_nbxnb_to_zero(int nb, float *A, int lda) { int ind = blockIdx.x*lda + threadIdx.x, i, j; A += ind; for(i=0; i<nb; i+=32) { for(j=0; j<nb; j+=32) A[j] = MAGMA_S_ZERO; A += 32*lda; } } extern "C" void szero_32x32_block(float *A, magma_int_t lda) { // sset_to_zero<<< 16, 32, 0, magma_stream >>>(A, lda); hipLaunchKernelGGL(( sset_to_zero), dim3(32), dim3(32), 0, magma_stream , A, lda); } extern "C" void szero_nbxnb_block(magma_int_t nb, float *A, magma_int_t lda) { hipLaunchKernelGGL(( sset_nbxnb_to_zero), dim3(32), dim3(32), 0, magma_stream , nb, A, lda); } /* //////////////////////////////////////////////////////////////////////////// -- GPU kernel for initializing a matrix by 0 */ #define slaset_threads 64 __global__ void slaset(int m, int n, float *A, int lda) { int ibx = blockIdx.x * slaset_threads; int iby = blockIdx.y * 32; int ind = ibx + threadIdx.x; A += ind + __mul24(iby, lda); #pragma unroll for(int i=0; i<32; i++) if (iby+i < n && ind < m) A[i*lda] = MAGMA_S_ZERO; } __global__ void slaset_identity(int m, int n, float *A, int lda) { int ibx = blockIdx.x * slaset_threads; int iby = blockIdx.y * 32; int ind = ibx + threadIdx.x; A += ind + __mul24(iby, lda); #pragma unroll for(int i=0; i<32; i++) if (iby+i < n && ind < m) { if (ind != i+iby) A[i*lda] = MAGMA_S_ZERO; else A[i*lda] = MAGMA_S_ONE; } } __global__ void slaset_identityonly(int m, int n, float *A, int lda) { int ibx = blockIdx.x * slaset_threads; int iby = blockIdx.y * 32; int ind = ibx + threadIdx.x; A += ind + __mul24(iby, lda); #pragma unroll for(int i=0; i<32; i++) if (iby+i < n && ind < m) { if (ind == i+iby) A[i*lda] = MAGMA_S_ONE; } } __global__ void slasetlower(int m, int n, float *A, int lda) { int ibx = blockIdx.x * slaset_threads; int iby = blockIdx.y * 32; int ind = ibx + threadIdx.x; A += ind + __mul24(iby, lda); #pragma unroll for(int i=0; i<32; i++) if (iby+i < n && ind < m && ind > i+iby) A[i*lda] = MAGMA_S_ZERO; } __global__ void slasetupper(int m, int n, float *A, int lda) { int ibx = blockIdx.x * slaset_threads; int iby = blockIdx.y * 32; int ind = ibx + threadIdx.x; A += ind + __mul24(iby, lda); #pragma unroll for(int i=0; i<32; i++) if (iby+i < n && ind < m && ind < i+iby) A[i*lda] = MAGMA_S_ZERO; } /* //////////////////////////////////////////////////////////////////////////// -- Set the m x n matrix pointed by A to 0 on the GPU. */ extern "C" void magmablas_slaset(char uplo, magma_int_t m, magma_int_t n, float *A, magma_int_t lda) { dim3 threads(slaset_threads, 1, 1); dim3 grid(m/slaset_threads+(m % slaset_threads != 0), n/32+(n%32!=0)); if (m!=0 && n !=0) if (uplo == MagmaLower) hipLaunchKernelGGL(( slasetlower), dim3(grid), dim3(threads), 0, magma_stream , m, n, A, lda); else if (uplo == MagmaUpper) hipLaunchKernelGGL(( slasetupper), dim3(grid), dim3(threads), 0, magma_stream , m, n, A, lda); else hipLaunchKernelGGL(( slaset), dim3(grid), dim3(threads), 0, magma_stream , m, n, A, lda); } /* //////////////////////////////////////////////////////////////////////////// -- Set the m x n matrix pointed by A to I on the GPU. */ extern "C" void magmablas_slaset_identity(magma_int_t m, magma_int_t n, float *A, magma_int_t lda) { dim3 threads(slaset_threads, 1, 1); dim3 grid(m/slaset_threads+(m % slaset_threads != 0), n/32+(n%32!=0)); if (m!=0 && n !=0) hipLaunchKernelGGL(( slaset_identity), dim3(grid), dim3(threads), 0, magma_stream , m, n, A, lda); } /* //////////////////////////////////////////////////////////////////////////// -- Set the m x n matrix pointed by A to I on the diag without touching the offdiag GPU. */ extern "C" void magmablas_slaset_identityonly(magma_int_t m, magma_int_t n, float *A, magma_int_t lda) { dim3 threads(slaset_threads, 1, 1); dim3 grid(m/slaset_threads+(m % slaset_threads != 0), n/32+(n%32!=0)); if (m!=0 && n !=0) hipLaunchKernelGGL(( slaset_identityonly), dim3(grid), dim3(threads), 0, magma_stream , m, n, A, lda); } /* //////////////////////////////////////////////////////////////////////////// -- Given two matrices, 'a' on the CPU and 'da' on the GPU, this function returns the Frobenious norm of the difference of the two matrices. The function is used for debugging. */ extern "C" float cpu_gpu_sdiff( magma_int_t M, magma_int_t N, const float *a, magma_int_t lda, const float *da, magma_int_t ldda ) { magma_int_t d_one = 1; magma_int_t j; float c_neg_one = MAGMA_S_NEG_ONE; float work[1]; float *ha = (float*)malloc( M * N * sizeof(float)); float res; hipblasGetMatrix(M, N, sizeof(float), da, ldda, ha, M); for(j=0; j<N; j++) blasf77_saxpy(&M, &c_neg_one, a+j*lda, &d_one, ha+j*M, &d_one); res = lapackf77_slange("f", &M, &N, ha, &M, work); free(ha); return res; } /* //////////////////////////////////////////////////////////////////////////// -- GPU kernel for setting 0 in the nb-1 upper subdiagonals and 1 in the diagonal @author Raffaele Solca */ __global__ void ssetdiag1subdiag0_L(int k, float *A, int lda) { int nb = blockDim.x; int ibx = blockIdx.x * nb; int ind = ibx + threadIdx.x + 1; A += ind - nb + __mul24((ibx), lda); float tmp = MAGMA_S_ZERO; if(threadIdx.x == nb-1) tmp = MAGMA_S_ONE; #pragma unroll for(int i=0; i<nb; i++) if (ibx+i < k && ind + i >= nb) { A[i*(lda+1)] = tmp; } } /* //////////////////////////////////////////////////////////////////////////// -- GPU kernel for setting 0 in the nb-1 lower subdiagonals and 1 in the diagonal @author Raffaele Solca */ __global__ void ssetdiag1subdiag0_U(int k, float *A, int lda) { int nb = blockDim.x; int ibx = blockIdx.x * nb; int ind = ibx + threadIdx.x; A += ind + __mul24((ibx), lda); float tmp = MAGMA_S_ZERO; if(threadIdx.x == 0) tmp = MAGMA_S_ONE; #pragma unroll for(int i=0; i<nb; i++) if (ibx+i < k && ind + i < k) { A[i*(lda+1)] = tmp; } } /* //////////////////////////////////////////////////////////////////////////// -- Set 1s in the diagonal and 0s in the nb-1 lower (UPLO='U') or upper (UPLO='L') subdiagonals. stream and no stream interfaces @author Raffaele Solca */ extern "C" void magmablas_ssetdiag1subdiag0_stream(char uplo, magma_int_t k, magma_int_t nb, float *A, magma_int_t lda, magma_queue_t stream) { dim3 threads(nb, 1, 1); dim3 grid((k-1)/nb+1); if(k>lda) fprintf(stderr,"wrong second argument of ssetdiag1subdiag0"); if(uplo == MagmaLower) hipLaunchKernelGGL(( ssetdiag1subdiag0_L), dim3(grid), dim3(threads), 0, stream , k, A, lda); else if(uplo == MagmaUpper) { hipLaunchKernelGGL(( ssetdiag1subdiag0_U), dim3(grid), dim3(threads), 0, stream , k, A, lda); } else fprintf(stderr,"wrong first argument of ssetdiag1subdiag0"); return; } extern "C" void magmablas_ssetdiag1subdiag0(char uplo, magma_int_t k, magma_int_t nb, float *A, magma_int_t lda) { magmablas_ssetdiag1subdiag0_stream(uplo, k, nb, A, lda, magma_stream); }
b709bde563e0ccdac0b07d99c7c82843dcb0235d.cu
/* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @generated s Tue Aug 13 16:45:08 2013 */ #include "common_magma.h" /* //////////////////////////////////////////////////////////////////////////// -- This is an auxiliary routine called from sgehrd. The routine is called in 16 blocks, 32 thread per block and initializes to zero the 1st 32x32 block of A. */ __global__ void sset_to_zero(float *A, int lda) { int ind = blockIdx.x*lda + threadIdx.x; A += ind; A[0] = MAGMA_S_ZERO; // A[16*lda] = 0.; } __global__ void sset_nbxnb_to_zero(int nb, float *A, int lda) { int ind = blockIdx.x*lda + threadIdx.x, i, j; A += ind; for(i=0; i<nb; i+=32) { for(j=0; j<nb; j+=32) A[j] = MAGMA_S_ZERO; A += 32*lda; } } extern "C" void szero_32x32_block(float *A, magma_int_t lda) { // sset_to_zero<<< 16, 32, 0, magma_stream >>>(A, lda); sset_to_zero<<< 32, 32, 0, magma_stream >>>(A, lda); } extern "C" void szero_nbxnb_block(magma_int_t nb, float *A, magma_int_t lda) { sset_nbxnb_to_zero<<< 32, 32, 0, magma_stream >>>(nb, A, lda); } /* //////////////////////////////////////////////////////////////////////////// -- GPU kernel for initializing a matrix by 0 */ #define slaset_threads 64 __global__ void slaset(int m, int n, float *A, int lda) { int ibx = blockIdx.x * slaset_threads; int iby = blockIdx.y * 32; int ind = ibx + threadIdx.x; A += ind + __mul24(iby, lda); #pragma unroll for(int i=0; i<32; i++) if (iby+i < n && ind < m) A[i*lda] = MAGMA_S_ZERO; } __global__ void slaset_identity(int m, int n, float *A, int lda) { int ibx = blockIdx.x * slaset_threads; int iby = blockIdx.y * 32; int ind = ibx + threadIdx.x; A += ind + __mul24(iby, lda); #pragma unroll for(int i=0; i<32; i++) if (iby+i < n && ind < m) { if (ind != i+iby) A[i*lda] = MAGMA_S_ZERO; else A[i*lda] = MAGMA_S_ONE; } } __global__ void slaset_identityonly(int m, int n, float *A, int lda) { int ibx = blockIdx.x * slaset_threads; int iby = blockIdx.y * 32; int ind = ibx + threadIdx.x; A += ind + __mul24(iby, lda); #pragma unroll for(int i=0; i<32; i++) if (iby+i < n && ind < m) { if (ind == i+iby) A[i*lda] = MAGMA_S_ONE; } } __global__ void slasetlower(int m, int n, float *A, int lda) { int ibx = blockIdx.x * slaset_threads; int iby = blockIdx.y * 32; int ind = ibx + threadIdx.x; A += ind + __mul24(iby, lda); #pragma unroll for(int i=0; i<32; i++) if (iby+i < n && ind < m && ind > i+iby) A[i*lda] = MAGMA_S_ZERO; } __global__ void slasetupper(int m, int n, float *A, int lda) { int ibx = blockIdx.x * slaset_threads; int iby = blockIdx.y * 32; int ind = ibx + threadIdx.x; A += ind + __mul24(iby, lda); #pragma unroll for(int i=0; i<32; i++) if (iby+i < n && ind < m && ind < i+iby) A[i*lda] = MAGMA_S_ZERO; } /* //////////////////////////////////////////////////////////////////////////// -- Set the m x n matrix pointed by A to 0 on the GPU. */ extern "C" void magmablas_slaset(char uplo, magma_int_t m, magma_int_t n, float *A, magma_int_t lda) { dim3 threads(slaset_threads, 1, 1); dim3 grid(m/slaset_threads+(m % slaset_threads != 0), n/32+(n%32!=0)); if (m!=0 && n !=0) if (uplo == MagmaLower) slasetlower<<< grid, threads, 0, magma_stream >>> (m, n, A, lda); else if (uplo == MagmaUpper) slasetupper<<< grid, threads, 0, magma_stream >>> (m, n, A, lda); else slaset<<< grid, threads, 0, magma_stream >>> (m, n, A, lda); } /* //////////////////////////////////////////////////////////////////////////// -- Set the m x n matrix pointed by A to I on the GPU. */ extern "C" void magmablas_slaset_identity(magma_int_t m, magma_int_t n, float *A, magma_int_t lda) { dim3 threads(slaset_threads, 1, 1); dim3 grid(m/slaset_threads+(m % slaset_threads != 0), n/32+(n%32!=0)); if (m!=0 && n !=0) slaset_identity<<< grid, threads, 0, magma_stream >>> (m, n, A, lda); } /* //////////////////////////////////////////////////////////////////////////// -- Set the m x n matrix pointed by A to I on the diag without touching the offdiag GPU. */ extern "C" void magmablas_slaset_identityonly(magma_int_t m, magma_int_t n, float *A, magma_int_t lda) { dim3 threads(slaset_threads, 1, 1); dim3 grid(m/slaset_threads+(m % slaset_threads != 0), n/32+(n%32!=0)); if (m!=0 && n !=0) slaset_identityonly<<< grid, threads, 0, magma_stream >>> (m, n, A, lda); } /* //////////////////////////////////////////////////////////////////////////// -- Given two matrices, 'a' on the CPU and 'da' on the GPU, this function returns the Frobenious norm of the difference of the two matrices. The function is used for debugging. */ extern "C" float cpu_gpu_sdiff( magma_int_t M, magma_int_t N, const float *a, magma_int_t lda, const float *da, magma_int_t ldda ) { magma_int_t d_one = 1; magma_int_t j; float c_neg_one = MAGMA_S_NEG_ONE; float work[1]; float *ha = (float*)malloc( M * N * sizeof(float)); float res; cublasGetMatrix(M, N, sizeof(float), da, ldda, ha, M); for(j=0; j<N; j++) blasf77_saxpy(&M, &c_neg_one, a+j*lda, &d_one, ha+j*M, &d_one); res = lapackf77_slange("f", &M, &N, ha, &M, work); free(ha); return res; } /* //////////////////////////////////////////////////////////////////////////// -- GPU kernel for setting 0 in the nb-1 upper subdiagonals and 1 in the diagonal @author Raffaele Solca */ __global__ void ssetdiag1subdiag0_L(int k, float *A, int lda) { int nb = blockDim.x; int ibx = blockIdx.x * nb; int ind = ibx + threadIdx.x + 1; A += ind - nb + __mul24((ibx), lda); float tmp = MAGMA_S_ZERO; if(threadIdx.x == nb-1) tmp = MAGMA_S_ONE; #pragma unroll for(int i=0; i<nb; i++) if (ibx+i < k && ind + i >= nb) { A[i*(lda+1)] = tmp; } } /* //////////////////////////////////////////////////////////////////////////// -- GPU kernel for setting 0 in the nb-1 lower subdiagonals and 1 in the diagonal @author Raffaele Solca */ __global__ void ssetdiag1subdiag0_U(int k, float *A, int lda) { int nb = blockDim.x; int ibx = blockIdx.x * nb; int ind = ibx + threadIdx.x; A += ind + __mul24((ibx), lda); float tmp = MAGMA_S_ZERO; if(threadIdx.x == 0) tmp = MAGMA_S_ONE; #pragma unroll for(int i=0; i<nb; i++) if (ibx+i < k && ind + i < k) { A[i*(lda+1)] = tmp; } } /* //////////////////////////////////////////////////////////////////////////// -- Set 1s in the diagonal and 0s in the nb-1 lower (UPLO='U') or upper (UPLO='L') subdiagonals. stream and no stream interfaces @author Raffaele Solca */ extern "C" void magmablas_ssetdiag1subdiag0_stream(char uplo, magma_int_t k, magma_int_t nb, float *A, magma_int_t lda, magma_queue_t stream) { dim3 threads(nb, 1, 1); dim3 grid((k-1)/nb+1); if(k>lda) fprintf(stderr,"wrong second argument of ssetdiag1subdiag0"); if(uplo == MagmaLower) ssetdiag1subdiag0_L<<< grid, threads, 0, stream >>> (k, A, lda); else if(uplo == MagmaUpper) { ssetdiag1subdiag0_U<<< grid, threads, 0, stream >>> (k, A, lda); } else fprintf(stderr,"wrong first argument of ssetdiag1subdiag0"); return; } extern "C" void magmablas_ssetdiag1subdiag0(char uplo, magma_int_t k, magma_int_t nb, float *A, magma_int_t lda) { magmablas_ssetdiag1subdiag0_stream(uplo, k, nb, A, lda, magma_stream); }
e0f956ab8ced332bd15fcc2f3d5f7af90975b2e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <hipcub/hipcub.hpp> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/operator_fallback_gpu.h" #include "cross_entropy_wsl_op.h" namespace caffe2 { namespace { __global__ void LabelBalanceWSLKernel(const int outer_size, const int inner_size, const int* targets_ptr, const float ignore_value, float* count_ptr) { int i = blockIdx.x; int last_idx = (i + 1) * inner_size; float pos = 0; float neg = 0; for (int in_idx = i * inner_size + threadIdx.x; in_idx < last_idx; in_idx += blockDim.x) { if (targets_ptr[in_idx] == ignore_value) { continue; } if (targets_ptr[in_idx] > 0) { pos += 1; } else { neg += 1; } } typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; float pos_sum = BlockReduce(temp_storage).Sum(pos); typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce2; __shared__ typename BlockReduce2::TempStorage temp_storage2; float neg_sum = BlockReduce2(temp_storage2).Sum(neg); if (threadIdx.x == 0) { count_ptr[i * 2] = pos_sum; count_ptr[i * 2 + 1] = neg_sum; } } __global__ void LabelCrossEntropyWSLKernel_BATCHWISE( const int outer_size, const int C, const int inner_size, const float* Xdata, const int* labeldata, const float* countdata, const float log_threshold, const float ignore_value, float* Ydata) { // outer_size = B // C = C // inner_size = H * W // Xdata = B * C * H * W // labeldata = B * 1 * H * W int i = blockIdx.x; float pos = countdata[i * 2]; float neg = countdata[i * 2 + 1]; float value = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { int labelidx = i * inner_size + j; int Xidx = (i * C + labeldata[labelidx]) * inner_size + j; CUDA_KERNEL_ASSERT(labeldata[labelidx] >= 0 && labeldata[labelidx] < C); if (labeldata[labelidx] == ignore_value) { continue; } if (labeldata[labelidx] > 0) { value += -logf(max(Xdata[Xidx], log_threshold)) / pos; } else { value += -logf(max(Xdata[Xidx], log_threshold)) / neg; } } typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; float sum = BlockReduce(temp_storage).Sum(value); if (threadIdx.x == 0) { // Ydata[i] = sum / inner_size; Ydata[i] = sum; } } __global__ void LabelCrossEntropyWSLKernel_CLASSWISE( const int outer_size, const int C, const int inner_size, const float* Xdata, const int* labeldata, const float* countdata, const int batch_size, const int num_classes, const float log_threshold, const float ignore_value, float* Ydata) { // outer_size = B * C // inner_size = H * W // Xdata = B * C * H * W // labeldata = B * 1 * H * W int i = blockIdx.x; int b = i / num_classes; int c = i % num_classes; float pos = countdata[b * 2]; float neg = countdata[b * 2 + 1]; float value = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { int Xidx = i * inner_size + j; int labelidx = b * inner_size + j; CUDA_KERNEL_ASSERT(labeldata[labelidx] >= 0 && labeldata[labelidx] < C); if (labeldata[labelidx] != c) { continue; } if (labeldata[labelidx] == ignore_value) { continue; } if (labeldata[labelidx] > 0) { value += -logf(max(Xdata[Xidx], log_threshold)) / pos; } else { value += -logf(max(Xdata[Xidx], log_threshold)) / neg; } } typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; float sum = BlockReduce(temp_storage).Sum(value); if (threadIdx.x == 0) { // Ydata[i] = sum / inner_size; Ydata[i] = sum; } } __global__ void LabelCrossEntropyWSLGradientKernel_BATCHWISE( const int outer_size, const int C, const int inner_size, const float* Xdata, const int* labeldata, const float* dYdata, const float* countdata, const float log_threshold, const float ignore_value, float* dXdata) { int i = blockIdx.x; float pos = countdata[i * 2]; float neg = countdata[i * 2 + 1]; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { int labelidx = i * inner_size + j; int Xidx = (i * C + labeldata[labelidx]) * inner_size + j; CUDA_KERNEL_ASSERT(labeldata[labelidx] >= 0 && labeldata[labelidx] < C); if (labeldata[labelidx] == ignore_value) { continue; } if (labeldata[labelidx] > 0) { dXdata[Xidx] = -dYdata[i] / max(Xdata[Xidx], log_threshold) / pos; } else { dXdata[Xidx] = -dYdata[i] / max(Xdata[Xidx], log_threshold) / neg; } } } __global__ void LabelCrossEntropyWSLGradientKernel_CLASSWISE( const int outer_size, const int C, const int inner_size, const float* Xdata, const int* labeldata, const float* dYdata, const float* countdata, const int batch_size, const int num_classes, const float log_threshold, const float ignore_value, float* dXdata) { int i = blockIdx.x; int b = i / num_classes; int c = i % num_classes; float pos = countdata[b * 2]; float neg = countdata[b * 2 + 1]; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { int Xidx = i * inner_size + j; int labelidx = b * inner_size + j; CUDA_KERNEL_ASSERT(labeldata[labelidx] >= 0 && labeldata[labelidx] < C); if (labeldata[labelidx] != c) { continue; } if (labeldata[labelidx] == ignore_value) { continue; } if (labeldata[labelidx] > 0) { dXdata[Xidx] = -dYdata[i] / max(Xdata[Xidx], log_threshold) / pos; } else { dXdata[Xidx] = -dYdata[i] / max(Xdata[Xidx], log_threshold) / neg; } } } } // namespace template <> bool LabelCrossEntropyWSLOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& label = Input(1); CAFFE_ENFORCE_EQ(X.dim(), 4); CAFFE_ENFORCE_EQ(label.dim(), 4); CAFFE_ENFORCE_EQ(X.dim32(0), label.dim32(0)); CAFFE_ENFORCE_EQ(1, label.dim32(1)); CAFFE_ENFORCE_EQ(X.dim32(2), label.dim32(2)); CAFFE_ENFORCE_EQ(X.dim32(3), label.dim32(3)); const int batch_size = X.dim32(0); const int num_classes = X.dim32(1); const auto inner_size = X.dim32(2) * X.dim32(3); // const auto outer_size = X.dim32(0); const auto outer_size = X.numel() / inner_size; auto* Y = Output(0); auto* count = Output(1); if (X.dim() == 0) { Y->Resize(std::vector<int64_t>{}); count->Resize(std::vector<int64_t>{}); } else { std::vector<int64_t> dims(X.sizes().begin(), X.sizes().end() - 2); Y->Resize(dims); dims.push_back(2); count->Resize(dims); } // Y->Resize(vector<int64_t>(outer_size)); // count->Resize(vector<int64_t>(outer_size, 2)); hipLaunchKernelGGL(( LabelBalanceWSLKernel), dim3(batch_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), batch_size, inner_size, label.data<int>(), ignore_value_, count->mutable_data<float>()); hipLaunchKernelGGL(( LabelCrossEntropyWSLKernel_CLASSWISE), dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), outer_size, num_classes, inner_size, X.data<float>(), label.data<int>(), count->data<float>(), batch_size, num_classes, kLOG_THRESHOLD(), ignore_value_, Y->mutable_data<float>()); return true; } template <> bool LabelCrossEntropyWSLGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& label = Input(1); auto& dY = Input(2); auto& count = Input(3); const int batch_size = X.dim32(0); const int num_classes = X.dim32(1); const auto inner_size = X.dim32(2) * X.dim32(3); // const auto outer_size = X.dim32(0); const auto outer_size = X.numel() / inner_size; auto* dX = Output(0); dX->ResizeLike(X); math::Set<float, CUDAContext>(dX->numel(), 0.f, dX->mutable_data<float>(), &context_); hipLaunchKernelGGL(( LabelCrossEntropyWSLGradientKernel_CLASSWISE), dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), outer_size, num_classes, inner_size, X.data<float>(), label.data<int>(), dY.data<float>(), count.data<float>(), batch_size, num_classes, kLOG_THRESHOLD(), ignore_value_, dX->mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(LabelCrossEntropyWSL, LabelCrossEntropyWSLOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(LabelCrossEntropyWSLGradient, LabelCrossEntropyWSLGradientOp<float, CUDAContext>); namespace { template <typename T> inline __device__ T gpu_atomic_add(const T val, T* address); template <> inline __device__ float gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } __global__ void CrossEntropyWithLogitsKernel(const int nthreads, const float* Xdata, const float* Ldata, const float log_threshold, float* Ydata) { CUDA_1D_KERNEL_LOOP(i, nthreads) { float prob = max(Xdata[i], log_threshold); float one_prob = max(1 - Xdata[i], log_threshold); float* address = Ydata + 0; float val = -1.0 * (Ldata[i] * log(prob) + (1 - Ldata[i]) * log(one_prob)); gpu_atomic_add(val, address); } } __global__ void CrossEntropyWithLogitsGradientKernel( const int nthreads, const float* Xdata, const float* Ldata, const float* dYdata, const float log_threshold, const float diff_threshold, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, nthreads) { float grad = dYdata[0]; float prob = max(Xdata[i], log_threshold); float one_prob = max(1 - Xdata[i], log_threshold); dXdata[i] = min(grad * (-1 * Ldata[i] / prob - (-1) * (1 - Ldata[i]) / one_prob), diff_threshold); } } } // namespace template <> bool CrossEntropyWithLogitsOp<float, CUDAContext>::RunOnDevice() { const auto& X = Input(0); const auto& L = Input(1); // if (InputSize() > 2) { // printf("Found unused input in CrossEntropyWithLogits %d\n", // InputSize() - 2); //} CAFFE_ENFORCE_EQ(X.dim(), 2); CAFFE_ENFORCE_EQ(X.sizes(), L.sizes()); int N = X.dim32(0); auto* Y = Output(0); Y->Resize(vector<int64_t>{}); math::Set<float, CUDAContext>(Y->numel(), 0.f, Y->mutable_data<float>(), &context_); const float* Xdata = X.data<float>(); const float* Ldata = L.data<float>(); auto* Ydata = Y->mutable_data<float>(); hipLaunchKernelGGL(( CrossEntropyWithLogitsKernel), dim3(CAFFE_GET_BLOCKS(X.numel())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.numel(), Xdata, Ldata, kLOG_THRESHOLD(), Ydata); math::Scale<float, float, CUDAContext>(Y->numel(), float(1.0 / N), Ydata, Ydata, &context_); return true; } template <> bool CrossEntropyWithLogitsGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& L = Input(1); auto& dY = Input(2); CAFFE_ENFORCE_EQ(X.numel(), L.numel()); CAFFE_ENFORCE_EQ(X.dim32(0), L.dim32(0)); CAFFE_ENFORCE_EQ(X.dim32(1), L.dim32(1)); CAFFE_ENFORCE_EQ(dY.numel(), 1); int N = X.dim32(0); auto* dX = Output(0); dX->ResizeLike(X); math::Set<float, CUDAContext>(dX->numel(), 0.f, dX->mutable_data<float>(), &context_); const float* Xdata = X.data<float>(); const float* Ldata = L.data<float>(); const float* dYdata = dY.data<float>(); float* dXdata = dX->mutable_data<float>(); hipLaunchKernelGGL(( CrossEntropyWithLogitsGradientKernel), dim3(CAFFE_GET_BLOCKS(X.numel())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.numel(), Xdata, Ldata, dYdata, kLOG_THRESHOLD(), kDIFF_THRESHOLD(), dXdata); math::Scale<float, float, CUDAContext>(dX->numel(), float(1.0 / N), dXdata, dXdata, &context_); return true; } // REGISTER_CUDA_OPERATOR(CrossEntropyWithLogits, // CrossEntropyWithLogitsOp<float, CUDAContext>); // REGISTER_CUDA_OPERATOR(CrossEntropyWithLogitsGradient, // CrossEntropyWithLogitsGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(CrossEntropyWithLogits, GPUFallbackOp); REGISTER_CUDA_OPERATOR(CrossEntropyWithLogitsGradient, GPUFallbackOp); REGISTER_CUDA_OPERATOR(WeightedCrossEntropyWithLogits, GPUFallbackOp); REGISTER_CUDA_OPERATOR(WeightedCrossEntropyWithLogitsGradient, GPUFallbackOp); namespace { __device__ float sigmoid_xent_forward(float lgt, float tgt) { return lgt * (tgt - (lgt >= 0)) - log(1 + exp(lgt - 2 * lgt * (lgt >= 0))); } __device__ float sigmoid_xent_backward(float lgt, float tgt) { return tgt - 1. / (1. + exp(-lgt)); } __global__ void SigmoidBalanceWSLKernel(const int outer_size, const int inner_size, const float* targets_ptr, const float ignore_value, float* count_ptr) { int i = blockIdx.x; int last_idx = (i + 1) * inner_size; float pos = 0; float neg = 0; for (int in_idx = i * inner_size + threadIdx.x; in_idx < last_idx; in_idx += blockDim.x) { if (targets_ptr[in_idx] == ignore_value) { continue; } if (targets_ptr[in_idx] > 0.5) { pos += 1; } else { neg += 1; } } typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; float pos_sum = BlockReduce(temp_storage).Sum(pos); typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce2; __shared__ typename BlockReduce2::TempStorage temp_storage2; float neg_sum = BlockReduce2(temp_storage2).Sum(neg); if (threadIdx.x == 0) { count_ptr[i * 2] = pos_sum; count_ptr[i * 2 + 1] = neg_sum; } } __global__ void SigmoidCrossEntropyWithLogitsWSLKernel( const int outer_size, const int inner_size, const float* logits_ptr, const float* targets_ptr, const float* count_ptr, const float ignore_value, float* out_ptr) { int i = blockIdx.x; int last_idx = (i + 1) * inner_size; float value = 0; float pos = count_ptr[i * 2]; float neg = count_ptr[i * 2 + 1]; for (int in_idx = i * inner_size + threadIdx.x; in_idx < last_idx; in_idx += blockDim.x) { if (targets_ptr[in_idx] == ignore_value) { continue; } if (targets_ptr[in_idx] > 0.5) { value += sigmoid_xent_forward(logits_ptr[in_idx], targets_ptr[in_idx]) / pos; } else { value += sigmoid_xent_forward(logits_ptr[in_idx], targets_ptr[in_idx]) / neg; } } typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; float sum = BlockReduce(temp_storage).Sum(value); if (threadIdx.x == 0) { out_ptr[i] = -sum; } } __global__ void SigmoidCrossEntropyWithLogitsWSLGradientKernel( const int outer_size, const int inner_size, const float* g_ptr, const float* logits_ptr, const float* targets_ptr, const float* count_ptr, const float ignore_value, float* out_ptr) { CUDA_1D_KERNEL_LOOP(in_idx, outer_size * inner_size) { int i = in_idx / inner_size; if (targets_ptr[in_idx] == ignore_value) { out_ptr[in_idx] = 0.0; continue; } // auto g_factor = -g_ptr[i] / inner_size; float g_factor; float count; if (targets_ptr[in_idx] > 0.5) { count = count_ptr[i * 2]; } else { count = count_ptr[i * 2 + 1]; } if (count > 0) { g_factor = -g_ptr[i] / count; } else { g_factor = 0; } out_ptr[in_idx] = g_factor * sigmoid_xent_backward(logits_ptr[in_idx], targets_ptr[in_idx]); } } } // namespace template <> bool SigmoidCrossEntropyWithLogitsWSLOp<float, CUDAContext>::RunOnDevice() { auto& logits = Input(0); auto& targets = Input(1); CAFFE_ENFORCE(logits.sizes() == targets.sizes()); // const auto inner_size = logits.dim() > 0 ? logits.sizes().back() : 1; const auto inner_size = logits.dim32(2) * logits.dim32(3); const auto outer_size = logits.numel() / inner_size; auto* out = Output(0); auto* count = Output(1); if (logits.dim() == 0) { out->Resize(std::vector<int64_t>{}); count->Resize(std::vector<int64_t>{}); } else { std::vector<int64_t> dims(logits.sizes().begin(), logits.sizes().end() - 2); out->Resize(dims); dims.push_back(2); count->Resize(dims); } auto* out_ptr = out->mutable_data<float>(); auto* count_ptr = count->mutable_data<float>(); auto* logits_ptr = logits.data<float>(); auto* targets_ptr = targets.data<float>(); if (logits.numel() <= 0) { // nothing to do, not even launching kernel return true; } hipLaunchKernelGGL(( SigmoidBalanceWSLKernel), dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), outer_size, inner_size, targets_ptr, ignore_value_, count_ptr); hipLaunchKernelGGL(( SigmoidCrossEntropyWithLogitsWSLKernel), dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), outer_size, inner_size, logits_ptr, targets_ptr, count_ptr, ignore_value_, out_ptr); return true; } template <> bool SigmoidCrossEntropyWithLogitsWSLGradientOp<float, CUDAContext>::RunOnDevice() { auto& g = Input(0); auto& logits = Input(1); auto& targets = Input(2); auto& count = Input(3); CAFFE_ENFORCE(logits.sizes() == targets.sizes()); // const auto inner_size = logits.dim() > 0 ? logits.sizes().back() : 1; const auto inner_size = logits.dim32(2) * logits.dim32(3); const auto outer_size = logits.numel() / inner_size; CAFFE_ENFORCE(g.numel() == outer_size); auto* out = Output(0); out->ResizeLike(logits); auto* out_ptr = out->mutable_data<float>(); auto* logits_ptr = logits.data<float>(); auto* targets_ptr = targets.data<float>(); auto* g_ptr = g.data<float>(); auto* count_ptr = count.data<float>(); hipLaunchKernelGGL(( SigmoidCrossEntropyWithLogitsWSLGradientKernel), dim3(CAFFE_GET_BLOCKS(outer_size * inner_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), outer_size, inner_size, g_ptr, logits_ptr, targets_ptr, count_ptr, ignore_value_, out_ptr); return true; } REGISTER_CUDA_OPERATOR(SigmoidCrossEntropyWithLogitsWSL, SigmoidCrossEntropyWithLogitsWSLOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SigmoidCrossEntropyWithLogitsWSLGradient, SigmoidCrossEntropyWithLogitsWSLGradientOp<float, CUDAContext>); } // namespace caffe2
e0f956ab8ced332bd15fcc2f3d5f7af90975b2e5.cu
#include <assert.h> #include <cub/block/block_reduce.cuh> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/operator_fallback_gpu.h" #include "cross_entropy_wsl_op.h" namespace caffe2 { namespace { __global__ void LabelBalanceWSLKernel(const int outer_size, const int inner_size, const int* targets_ptr, const float ignore_value, float* count_ptr) { int i = blockIdx.x; int last_idx = (i + 1) * inner_size; float pos = 0; float neg = 0; for (int in_idx = i * inner_size + threadIdx.x; in_idx < last_idx; in_idx += blockDim.x) { if (targets_ptr[in_idx] == ignore_value) { continue; } if (targets_ptr[in_idx] > 0) { pos += 1; } else { neg += 1; } } typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; float pos_sum = BlockReduce(temp_storage).Sum(pos); typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce2; __shared__ typename BlockReduce2::TempStorage temp_storage2; float neg_sum = BlockReduce2(temp_storage2).Sum(neg); if (threadIdx.x == 0) { count_ptr[i * 2] = pos_sum; count_ptr[i * 2 + 1] = neg_sum; } } __global__ void LabelCrossEntropyWSLKernel_BATCHWISE( const int outer_size, const int C, const int inner_size, const float* Xdata, const int* labeldata, const float* countdata, const float log_threshold, const float ignore_value, float* Ydata) { // outer_size = B // C = C // inner_size = H * W // Xdata = B * C * H * W // labeldata = B * 1 * H * W int i = blockIdx.x; float pos = countdata[i * 2]; float neg = countdata[i * 2 + 1]; float value = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { int labelidx = i * inner_size + j; int Xidx = (i * C + labeldata[labelidx]) * inner_size + j; CUDA_KERNEL_ASSERT(labeldata[labelidx] >= 0 && labeldata[labelidx] < C); if (labeldata[labelidx] == ignore_value) { continue; } if (labeldata[labelidx] > 0) { value += -logf(max(Xdata[Xidx], log_threshold)) / pos; } else { value += -logf(max(Xdata[Xidx], log_threshold)) / neg; } } typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; float sum = BlockReduce(temp_storage).Sum(value); if (threadIdx.x == 0) { // Ydata[i] = sum / inner_size; Ydata[i] = sum; } } __global__ void LabelCrossEntropyWSLKernel_CLASSWISE( const int outer_size, const int C, const int inner_size, const float* Xdata, const int* labeldata, const float* countdata, const int batch_size, const int num_classes, const float log_threshold, const float ignore_value, float* Ydata) { // outer_size = B * C // inner_size = H * W // Xdata = B * C * H * W // labeldata = B * 1 * H * W int i = blockIdx.x; int b = i / num_classes; int c = i % num_classes; float pos = countdata[b * 2]; float neg = countdata[b * 2 + 1]; float value = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { int Xidx = i * inner_size + j; int labelidx = b * inner_size + j; CUDA_KERNEL_ASSERT(labeldata[labelidx] >= 0 && labeldata[labelidx] < C); if (labeldata[labelidx] != c) { continue; } if (labeldata[labelidx] == ignore_value) { continue; } if (labeldata[labelidx] > 0) { value += -logf(max(Xdata[Xidx], log_threshold)) / pos; } else { value += -logf(max(Xdata[Xidx], log_threshold)) / neg; } } typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; float sum = BlockReduce(temp_storage).Sum(value); if (threadIdx.x == 0) { // Ydata[i] = sum / inner_size; Ydata[i] = sum; } } __global__ void LabelCrossEntropyWSLGradientKernel_BATCHWISE( const int outer_size, const int C, const int inner_size, const float* Xdata, const int* labeldata, const float* dYdata, const float* countdata, const float log_threshold, const float ignore_value, float* dXdata) { int i = blockIdx.x; float pos = countdata[i * 2]; float neg = countdata[i * 2 + 1]; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { int labelidx = i * inner_size + j; int Xidx = (i * C + labeldata[labelidx]) * inner_size + j; CUDA_KERNEL_ASSERT(labeldata[labelidx] >= 0 && labeldata[labelidx] < C); if (labeldata[labelidx] == ignore_value) { continue; } if (labeldata[labelidx] > 0) { dXdata[Xidx] = -dYdata[i] / max(Xdata[Xidx], log_threshold) / pos; } else { dXdata[Xidx] = -dYdata[i] / max(Xdata[Xidx], log_threshold) / neg; } } } __global__ void LabelCrossEntropyWSLGradientKernel_CLASSWISE( const int outer_size, const int C, const int inner_size, const float* Xdata, const int* labeldata, const float* dYdata, const float* countdata, const int batch_size, const int num_classes, const float log_threshold, const float ignore_value, float* dXdata) { int i = blockIdx.x; int b = i / num_classes; int c = i % num_classes; float pos = countdata[b * 2]; float neg = countdata[b * 2 + 1]; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { int Xidx = i * inner_size + j; int labelidx = b * inner_size + j; CUDA_KERNEL_ASSERT(labeldata[labelidx] >= 0 && labeldata[labelidx] < C); if (labeldata[labelidx] != c) { continue; } if (labeldata[labelidx] == ignore_value) { continue; } if (labeldata[labelidx] > 0) { dXdata[Xidx] = -dYdata[i] / max(Xdata[Xidx], log_threshold) / pos; } else { dXdata[Xidx] = -dYdata[i] / max(Xdata[Xidx], log_threshold) / neg; } } } } // namespace template <> bool LabelCrossEntropyWSLOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& label = Input(1); CAFFE_ENFORCE_EQ(X.dim(), 4); CAFFE_ENFORCE_EQ(label.dim(), 4); CAFFE_ENFORCE_EQ(X.dim32(0), label.dim32(0)); CAFFE_ENFORCE_EQ(1, label.dim32(1)); CAFFE_ENFORCE_EQ(X.dim32(2), label.dim32(2)); CAFFE_ENFORCE_EQ(X.dim32(3), label.dim32(3)); const int batch_size = X.dim32(0); const int num_classes = X.dim32(1); const auto inner_size = X.dim32(2) * X.dim32(3); // const auto outer_size = X.dim32(0); const auto outer_size = X.numel() / inner_size; auto* Y = Output(0); auto* count = Output(1); if (X.dim() == 0) { Y->Resize(std::vector<int64_t>{}); count->Resize(std::vector<int64_t>{}); } else { std::vector<int64_t> dims(X.sizes().begin(), X.sizes().end() - 2); Y->Resize(dims); dims.push_back(2); count->Resize(dims); } // Y->Resize(vector<int64_t>(outer_size)); // count->Resize(vector<int64_t>(outer_size, 2)); LabelBalanceWSLKernel<<<batch_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( batch_size, inner_size, label.data<int>(), ignore_value_, count->mutable_data<float>()); LabelCrossEntropyWSLKernel_CLASSWISE<<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( outer_size, num_classes, inner_size, X.data<float>(), label.data<int>(), count->data<float>(), batch_size, num_classes, kLOG_THRESHOLD(), ignore_value_, Y->mutable_data<float>()); return true; } template <> bool LabelCrossEntropyWSLGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& label = Input(1); auto& dY = Input(2); auto& count = Input(3); const int batch_size = X.dim32(0); const int num_classes = X.dim32(1); const auto inner_size = X.dim32(2) * X.dim32(3); // const auto outer_size = X.dim32(0); const auto outer_size = X.numel() / inner_size; auto* dX = Output(0); dX->ResizeLike(X); math::Set<float, CUDAContext>(dX->numel(), 0.f, dX->mutable_data<float>(), &context_); LabelCrossEntropyWSLGradientKernel_CLASSWISE<<< outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( outer_size, num_classes, inner_size, X.data<float>(), label.data<int>(), dY.data<float>(), count.data<float>(), batch_size, num_classes, kLOG_THRESHOLD(), ignore_value_, dX->mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(LabelCrossEntropyWSL, LabelCrossEntropyWSLOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(LabelCrossEntropyWSLGradient, LabelCrossEntropyWSLGradientOp<float, CUDAContext>); namespace { template <typename T> inline __device__ T gpu_atomic_add(const T val, T* address); template <> inline __device__ float gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } __global__ void CrossEntropyWithLogitsKernel(const int nthreads, const float* Xdata, const float* Ldata, const float log_threshold, float* Ydata) { CUDA_1D_KERNEL_LOOP(i, nthreads) { float prob = max(Xdata[i], log_threshold); float one_prob = max(1 - Xdata[i], log_threshold); float* address = Ydata + 0; float val = -1.0 * (Ldata[i] * log(prob) + (1 - Ldata[i]) * log(one_prob)); gpu_atomic_add(val, address); } } __global__ void CrossEntropyWithLogitsGradientKernel( const int nthreads, const float* Xdata, const float* Ldata, const float* dYdata, const float log_threshold, const float diff_threshold, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, nthreads) { float grad = dYdata[0]; float prob = max(Xdata[i], log_threshold); float one_prob = max(1 - Xdata[i], log_threshold); dXdata[i] = min(grad * (-1 * Ldata[i] / prob - (-1) * (1 - Ldata[i]) / one_prob), diff_threshold); } } } // namespace template <> bool CrossEntropyWithLogitsOp<float, CUDAContext>::RunOnDevice() { const auto& X = Input(0); const auto& L = Input(1); // if (InputSize() > 2) { // printf("Found unused input in CrossEntropyWithLogits %d\n", // InputSize() - 2); //} CAFFE_ENFORCE_EQ(X.dim(), 2); CAFFE_ENFORCE_EQ(X.sizes(), L.sizes()); int N = X.dim32(0); auto* Y = Output(0); Y->Resize(vector<int64_t>{}); math::Set<float, CUDAContext>(Y->numel(), 0.f, Y->mutable_data<float>(), &context_); const float* Xdata = X.data<float>(); const float* Ldata = L.data<float>(); auto* Ydata = Y->mutable_data<float>(); CrossEntropyWithLogitsKernel<<<CAFFE_GET_BLOCKS(X.numel()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.numel(), Xdata, Ldata, kLOG_THRESHOLD(), Ydata); math::Scale<float, float, CUDAContext>(Y->numel(), float(1.0 / N), Ydata, Ydata, &context_); return true; } template <> bool CrossEntropyWithLogitsGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& L = Input(1); auto& dY = Input(2); CAFFE_ENFORCE_EQ(X.numel(), L.numel()); CAFFE_ENFORCE_EQ(X.dim32(0), L.dim32(0)); CAFFE_ENFORCE_EQ(X.dim32(1), L.dim32(1)); CAFFE_ENFORCE_EQ(dY.numel(), 1); int N = X.dim32(0); auto* dX = Output(0); dX->ResizeLike(X); math::Set<float, CUDAContext>(dX->numel(), 0.f, dX->mutable_data<float>(), &context_); const float* Xdata = X.data<float>(); const float* Ldata = L.data<float>(); const float* dYdata = dY.data<float>(); float* dXdata = dX->mutable_data<float>(); CrossEntropyWithLogitsGradientKernel<<<CAFFE_GET_BLOCKS(X.numel()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.numel(), Xdata, Ldata, dYdata, kLOG_THRESHOLD(), kDIFF_THRESHOLD(), dXdata); math::Scale<float, float, CUDAContext>(dX->numel(), float(1.0 / N), dXdata, dXdata, &context_); return true; } // REGISTER_CUDA_OPERATOR(CrossEntropyWithLogits, // CrossEntropyWithLogitsOp<float, CUDAContext>); // REGISTER_CUDA_OPERATOR(CrossEntropyWithLogitsGradient, // CrossEntropyWithLogitsGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(CrossEntropyWithLogits, GPUFallbackOp); REGISTER_CUDA_OPERATOR(CrossEntropyWithLogitsGradient, GPUFallbackOp); REGISTER_CUDA_OPERATOR(WeightedCrossEntropyWithLogits, GPUFallbackOp); REGISTER_CUDA_OPERATOR(WeightedCrossEntropyWithLogitsGradient, GPUFallbackOp); namespace { __device__ float sigmoid_xent_forward(float lgt, float tgt) { return lgt * (tgt - (lgt >= 0)) - log(1 + exp(lgt - 2 * lgt * (lgt >= 0))); } __device__ float sigmoid_xent_backward(float lgt, float tgt) { return tgt - 1. / (1. + exp(-lgt)); } __global__ void SigmoidBalanceWSLKernel(const int outer_size, const int inner_size, const float* targets_ptr, const float ignore_value, float* count_ptr) { int i = blockIdx.x; int last_idx = (i + 1) * inner_size; float pos = 0; float neg = 0; for (int in_idx = i * inner_size + threadIdx.x; in_idx < last_idx; in_idx += blockDim.x) { if (targets_ptr[in_idx] == ignore_value) { continue; } if (targets_ptr[in_idx] > 0.5) { pos += 1; } else { neg += 1; } } typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; float pos_sum = BlockReduce(temp_storage).Sum(pos); typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce2; __shared__ typename BlockReduce2::TempStorage temp_storage2; float neg_sum = BlockReduce2(temp_storage2).Sum(neg); if (threadIdx.x == 0) { count_ptr[i * 2] = pos_sum; count_ptr[i * 2 + 1] = neg_sum; } } __global__ void SigmoidCrossEntropyWithLogitsWSLKernel( const int outer_size, const int inner_size, const float* logits_ptr, const float* targets_ptr, const float* count_ptr, const float ignore_value, float* out_ptr) { int i = blockIdx.x; int last_idx = (i + 1) * inner_size; float value = 0; float pos = count_ptr[i * 2]; float neg = count_ptr[i * 2 + 1]; for (int in_idx = i * inner_size + threadIdx.x; in_idx < last_idx; in_idx += blockDim.x) { if (targets_ptr[in_idx] == ignore_value) { continue; } if (targets_ptr[in_idx] > 0.5) { value += sigmoid_xent_forward(logits_ptr[in_idx], targets_ptr[in_idx]) / pos; } else { value += sigmoid_xent_forward(logits_ptr[in_idx], targets_ptr[in_idx]) / neg; } } typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; float sum = BlockReduce(temp_storage).Sum(value); if (threadIdx.x == 0) { out_ptr[i] = -sum; } } __global__ void SigmoidCrossEntropyWithLogitsWSLGradientKernel( const int outer_size, const int inner_size, const float* g_ptr, const float* logits_ptr, const float* targets_ptr, const float* count_ptr, const float ignore_value, float* out_ptr) { CUDA_1D_KERNEL_LOOP(in_idx, outer_size * inner_size) { int i = in_idx / inner_size; if (targets_ptr[in_idx] == ignore_value) { out_ptr[in_idx] = 0.0; continue; } // auto g_factor = -g_ptr[i] / inner_size; float g_factor; float count; if (targets_ptr[in_idx] > 0.5) { count = count_ptr[i * 2]; } else { count = count_ptr[i * 2 + 1]; } if (count > 0) { g_factor = -g_ptr[i] / count; } else { g_factor = 0; } out_ptr[in_idx] = g_factor * sigmoid_xent_backward(logits_ptr[in_idx], targets_ptr[in_idx]); } } } // namespace template <> bool SigmoidCrossEntropyWithLogitsWSLOp<float, CUDAContext>::RunOnDevice() { auto& logits = Input(0); auto& targets = Input(1); CAFFE_ENFORCE(logits.sizes() == targets.sizes()); // const auto inner_size = logits.dim() > 0 ? logits.sizes().back() : 1; const auto inner_size = logits.dim32(2) * logits.dim32(3); const auto outer_size = logits.numel() / inner_size; auto* out = Output(0); auto* count = Output(1); if (logits.dim() == 0) { out->Resize(std::vector<int64_t>{}); count->Resize(std::vector<int64_t>{}); } else { std::vector<int64_t> dims(logits.sizes().begin(), logits.sizes().end() - 2); out->Resize(dims); dims.push_back(2); count->Resize(dims); } auto* out_ptr = out->mutable_data<float>(); auto* count_ptr = count->mutable_data<float>(); auto* logits_ptr = logits.data<float>(); auto* targets_ptr = targets.data<float>(); if (logits.numel() <= 0) { // nothing to do, not even launching kernel return true; } SigmoidBalanceWSLKernel<<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( outer_size, inner_size, targets_ptr, ignore_value_, count_ptr); SigmoidCrossEntropyWithLogitsWSLKernel<<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( outer_size, inner_size, logits_ptr, targets_ptr, count_ptr, ignore_value_, out_ptr); return true; } template <> bool SigmoidCrossEntropyWithLogitsWSLGradientOp<float, CUDAContext>::RunOnDevice() { auto& g = Input(0); auto& logits = Input(1); auto& targets = Input(2); auto& count = Input(3); CAFFE_ENFORCE(logits.sizes() == targets.sizes()); // const auto inner_size = logits.dim() > 0 ? logits.sizes().back() : 1; const auto inner_size = logits.dim32(2) * logits.dim32(3); const auto outer_size = logits.numel() / inner_size; CAFFE_ENFORCE(g.numel() == outer_size); auto* out = Output(0); out->ResizeLike(logits); auto* out_ptr = out->mutable_data<float>(); auto* logits_ptr = logits.data<float>(); auto* targets_ptr = targets.data<float>(); auto* g_ptr = g.data<float>(); auto* count_ptr = count.data<float>(); SigmoidCrossEntropyWithLogitsWSLGradientKernel<<< CAFFE_GET_BLOCKS(outer_size * inner_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(outer_size, inner_size, g_ptr, logits_ptr, targets_ptr, count_ptr, ignore_value_, out_ptr); return true; } REGISTER_CUDA_OPERATOR(SigmoidCrossEntropyWithLogitsWSL, SigmoidCrossEntropyWithLogitsWSLOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SigmoidCrossEntropyWithLogitsWSLGradient, SigmoidCrossEntropyWithLogitsWSLGradientOp<float, CUDAContext>); } // namespace caffe2
peer2peerMemcpy.hip
// !!! This is a file automatically generated by hipify!!! /* * * peer2peerMemcpy.cu * * Microbenchmark of peer-to-peer memcpy using portable pinned memory. * The CUDA driver implements a similar strategy for GPUs that * cannot perform peer-to-peer directly, e.g. because they are plugged * into different I/O hubs. * * Build with: nvcc -I ../chLib <options> peer2peerMemcpy.cu * Requires: No minimum SM requirement. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include "chError.h" #include "chTimer.h" #define STAGING_BUFFER_SIZE 1048576 void *g_hostBuffers[2]; // Indexed as follows: [device][event] hipEvent_t g_events[2][2]; // these are already defined on some platforms - make our // own definitions that will work. #undef min #undef max #define min(a,b) ((a)<(b)?(a):(b)) #define max(a,b) ((b)<(a)?(a):(b)) void chMemcpyPeerToPeer( void *_dst, int dstDevice, const void *_src, int srcDevice, size_t N ) { hipError_t status; char *dst = (char *) _dst; const char *src = (const char *) _src; int stagingIndex = 0; while ( N ) { size_t thisCopySize = min( N, STAGING_BUFFER_SIZE ); cuda(SetDevice( srcDevice ) ); cuda(StreamWaitEvent( NULL, g_events[dstDevice][stagingIndex], 0 ) ); cuda(MemcpyAsync( g_hostBuffers[stagingIndex], src, thisCopySize, hipMemcpyDeviceToHost, NULL ) ); cuda(EventRecord( g_events[srcDevice][stagingIndex] ) ); cuda(SetDevice( dstDevice ) ); cuda(StreamWaitEvent( NULL, g_events[srcDevice][stagingIndex], 0 ) ); cuda(MemcpyAsync( dst, g_hostBuffers[stagingIndex], thisCopySize, hipMemcpyHostToDevice, NULL ) ); cuda(EventRecord( g_events[dstDevice][stagingIndex] ) ); dst += thisCopySize; src += thisCopySize; N -= thisCopySize; stagingIndex = 1 - stagingIndex; } // Wait until both devices are done cuda(SetDevice( srcDevice ) ); cuda(DeviceSynchronize() ); cuda(SetDevice( dstDevice ) ); cuda(DeviceSynchronize() ); Error: return; } bool TestMemcpy( int *dst, int dstDevice, int *src, int srcDevice, int *srcHost, const int *srcOriginal, size_t dstOffset, size_t srcOffset, size_t numInts ) { hipError_t status; memset( srcHost, 0, numInts ); hipSetDevice( srcDevice ); cuda(Memcpy( src+srcOffset, srcOriginal+srcOffset, numInts*sizeof(int), hipMemcpyHostToDevice ) ); memset( srcHost, 0, numInts*sizeof(int) ); chMemcpyPeerToPeer( dst+dstOffset, dstDevice, src+srcOffset, srcDevice, numInts*sizeof(int) ); cuda(Memcpy( srcHost, dst+dstOffset, numInts*sizeof(int), hipMemcpyDeviceToHost ) ); for ( size_t i = 0; i < numInts; i++ ) { if ( srcHost[i] != srcOriginal[srcOffset+i] ) { return false; } } return true; Error: return false; } int main( int argc, char *argv[] ) { int deviceCount; hipError_t status; int *deviceInt[2]; int *hostInt = 0; const size_t numInts = 8*1048576; const int cIterations = 10; int *testVector = 0; printf( "Peer-to-peer memcpy... " ); fflush( stdout ); chTimerTimestamp start, stop; memset( deviceInt, 0, sizeof(deviceInt) ); cuda(GetDeviceCount( &deviceCount ) ); if ( deviceCount < 2 ) { printf( "Peer-to-peer requires at least 2 devices\n" ); exit(1); } for ( int i = 0; i < 2; i++ ) { hipSetDevice( i ); cuda(EventCreate( &g_events[i][0] ) ); cuda(EventRecord( g_events[i][0], 0 ) ); // so it is signaled on first synchronize cuda(EventCreate( &g_events[i][1] ) ); cuda(EventRecord( g_events[i][1], 0 ) ); // so it is signaled on first synchronize cuda(Malloc( &deviceInt[i], numInts*sizeof(int) ) ); } cuda(HostAlloc( &g_hostBuffers[0], STAGING_BUFFER_SIZE, hipHostMallocPortable ) ); cuda(HostAlloc( &g_hostBuffers[1], STAGING_BUFFER_SIZE, hipHostMallocPortable ) ); cuda(HostAlloc( &hostInt, numInts*sizeof(int), 0 ) ); testVector = (int *) malloc( numInts*sizeof(int) ); if ( ! testVector ) { printf( "malloc() failed\n" ); return 1; } for ( size_t i = 0; i < numInts; i++ ) { testVector[i] = rand(); } if ( ! TestMemcpy( deviceInt[0], 0, deviceInt[1], 1, hostInt, testVector, 0, 0, numInts ) ) { goto Error; } for ( int i = 0; i < cIterations; i++ ) { size_t dstOffset = rand() % (numInts-1); size_t srcOffset = rand() % (numInts-1); size_t intsThisIteration = 1 + rand() % (numInts-max(dstOffset,srcOffset)-1); if ( ! TestMemcpy( deviceInt[0], 0, deviceInt[1], 1, hostInt, testVector, dstOffset, srcOffset, intsThisIteration ) ) { //TestMemcpy( deviceInt, hostInt, testVector, dstOffset, srcOffset, intsThisIteration ); goto Error; } } chTimerGetTime( &start ); for ( int i = 0; i < cIterations; i++ ) { chMemcpyPeerToPeer( deviceInt[0], 0, deviceInt[1], 1, numInts*sizeof(int) ) ; } cuda(DeviceSynchronize() ); chTimerGetTime( &stop ); { double MBytes = cIterations*numInts*sizeof(int) / 1048576.0; double MBpers = MBytes / chTimerElapsedTime( &start, &stop ); printf( "%.2f MB/s\n", MBpers ); } hipFree( deviceInt ); hipHostFree( hostInt ); return 0; Error: printf( "Error\n" ); return 1; }
peer2peerMemcpy.cu
/* * * peer2peerMemcpy.cu * * Microbenchmark of peer-to-peer memcpy using portable pinned memory. * The CUDA driver implements a similar strategy for GPUs that * cannot perform peer-to-peer directly, e.g. because they are plugged * into different I/O hubs. * * Build with: nvcc -I ../chLib <options> peer2peerMemcpy.cu * Requires: No minimum SM requirement. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include "chError.h" #include "chTimer.h" #define STAGING_BUFFER_SIZE 1048576 void *g_hostBuffers[2]; // Indexed as follows: [device][event] cudaEvent_t g_events[2][2]; // these are already defined on some platforms - make our // own definitions that will work. #undef min #undef max #define min(a,b) ((a)<(b)?(a):(b)) #define max(a,b) ((b)<(a)?(a):(b)) void chMemcpyPeerToPeer( void *_dst, int dstDevice, const void *_src, int srcDevice, size_t N ) { cudaError_t status; char *dst = (char *) _dst; const char *src = (const char *) _src; int stagingIndex = 0; while ( N ) { size_t thisCopySize = min( N, STAGING_BUFFER_SIZE ); cuda(SetDevice( srcDevice ) ); cuda(StreamWaitEvent( NULL, g_events[dstDevice][stagingIndex], 0 ) ); cuda(MemcpyAsync( g_hostBuffers[stagingIndex], src, thisCopySize, cudaMemcpyDeviceToHost, NULL ) ); cuda(EventRecord( g_events[srcDevice][stagingIndex] ) ); cuda(SetDevice( dstDevice ) ); cuda(StreamWaitEvent( NULL, g_events[srcDevice][stagingIndex], 0 ) ); cuda(MemcpyAsync( dst, g_hostBuffers[stagingIndex], thisCopySize, cudaMemcpyHostToDevice, NULL ) ); cuda(EventRecord( g_events[dstDevice][stagingIndex] ) ); dst += thisCopySize; src += thisCopySize; N -= thisCopySize; stagingIndex = 1 - stagingIndex; } // Wait until both devices are done cuda(SetDevice( srcDevice ) ); cuda(DeviceSynchronize() ); cuda(SetDevice( dstDevice ) ); cuda(DeviceSynchronize() ); Error: return; } bool TestMemcpy( int *dst, int dstDevice, int *src, int srcDevice, int *srcHost, const int *srcOriginal, size_t dstOffset, size_t srcOffset, size_t numInts ) { cudaError_t status; memset( srcHost, 0, numInts ); cudaSetDevice( srcDevice ); cuda(Memcpy( src+srcOffset, srcOriginal+srcOffset, numInts*sizeof(int), cudaMemcpyHostToDevice ) ); memset( srcHost, 0, numInts*sizeof(int) ); chMemcpyPeerToPeer( dst+dstOffset, dstDevice, src+srcOffset, srcDevice, numInts*sizeof(int) ); cuda(Memcpy( srcHost, dst+dstOffset, numInts*sizeof(int), cudaMemcpyDeviceToHost ) ); for ( size_t i = 0; i < numInts; i++ ) { if ( srcHost[i] != srcOriginal[srcOffset+i] ) { return false; } } return true; Error: return false; } int main( int argc, char *argv[] ) { int deviceCount; cudaError_t status; int *deviceInt[2]; int *hostInt = 0; const size_t numInts = 8*1048576; const int cIterations = 10; int *testVector = 0; printf( "Peer-to-peer memcpy... " ); fflush( stdout ); chTimerTimestamp start, stop; memset( deviceInt, 0, sizeof(deviceInt) ); cuda(GetDeviceCount( &deviceCount ) ); if ( deviceCount < 2 ) { printf( "Peer-to-peer requires at least 2 devices\n" ); exit(1); } for ( int i = 0; i < 2; i++ ) { cudaSetDevice( i ); cuda(EventCreate( &g_events[i][0] ) ); cuda(EventRecord( g_events[i][0], 0 ) ); // so it is signaled on first synchronize cuda(EventCreate( &g_events[i][1] ) ); cuda(EventRecord( g_events[i][1], 0 ) ); // so it is signaled on first synchronize cuda(Malloc( &deviceInt[i], numInts*sizeof(int) ) ); } cuda(HostAlloc( &g_hostBuffers[0], STAGING_BUFFER_SIZE, cudaHostAllocPortable ) ); cuda(HostAlloc( &g_hostBuffers[1], STAGING_BUFFER_SIZE, cudaHostAllocPortable ) ); cuda(HostAlloc( &hostInt, numInts*sizeof(int), 0 ) ); testVector = (int *) malloc( numInts*sizeof(int) ); if ( ! testVector ) { printf( "malloc() failed\n" ); return 1; } for ( size_t i = 0; i < numInts; i++ ) { testVector[i] = rand(); } if ( ! TestMemcpy( deviceInt[0], 0, deviceInt[1], 1, hostInt, testVector, 0, 0, numInts ) ) { goto Error; } for ( int i = 0; i < cIterations; i++ ) { size_t dstOffset = rand() % (numInts-1); size_t srcOffset = rand() % (numInts-1); size_t intsThisIteration = 1 + rand() % (numInts-max(dstOffset,srcOffset)-1); if ( ! TestMemcpy( deviceInt[0], 0, deviceInt[1], 1, hostInt, testVector, dstOffset, srcOffset, intsThisIteration ) ) { //TestMemcpy( deviceInt, hostInt, testVector, dstOffset, srcOffset, intsThisIteration ); goto Error; } } chTimerGetTime( &start ); for ( int i = 0; i < cIterations; i++ ) { chMemcpyPeerToPeer( deviceInt[0], 0, deviceInt[1], 1, numInts*sizeof(int) ) ; } cuda(DeviceSynchronize() ); chTimerGetTime( &stop ); { double MBytes = cIterations*numInts*sizeof(int) / 1048576.0; double MBpers = MBytes / chTimerElapsedTime( &start, &stop ); printf( "%.2f MB/s\n", MBpers ); } cudaFree( deviceInt ); cudaFreeHost( hostInt ); return 0; Error: printf( "Error\n" ); return 1; }
c80331d1b383bf814c0c1a0cfe6dcf7f817a29f0.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> namespace { template <typename scalar_t> __global__ void cross_prod_cuda_forward_kernel( const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> x1, const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> x2, torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> y) { const int n2 = x2.size(2); const int n1 = x1.size(2); // // cannot parallize across channels, because it will case modifying the the location by multiple threads at the same time // const int inc12 = blockIdx.x * blockDim.x + threadIdx.x; // const int ic = inc12 / (n1*n2); // const int in12 = inc12 % (n1*n2); // const int in1 = in12 / n2; // const int in2 = in12 % n2; // if (inc12 < n1 * n2 * c ){ // y[blockIdx.y][in1][in2] += (x1[blockIdx.y][ic][in1] - x2[blockIdx.y][ic][in2]) * (x1[blockIdx.y][ic][in1] - x2[blockIdx.y][ic][in2]) ; // } const int in12 = blockIdx.x * blockDim.x + threadIdx.x; const int in1 = in12 / n2; const int in2 = in12 % n2; if (in12 < n1 * n2 ){ y[blockIdx.y][in1][in2][0] = x1[blockIdx.y][1][in1] * x2[blockIdx.y][2][in2] - x1[blockIdx.y][2][in1] * x2[blockIdx.y][1][in2]; y[blockIdx.y][in1][in2][1] = x1[blockIdx.y][2][in1] * x2[blockIdx.y][0][in2] - x1[blockIdx.y][0][in1] * x2[blockIdx.y][2][in2]; y[blockIdx.y][in1][in2][2] = x1[blockIdx.y][0][in1] * x2[blockIdx.y][1][in2] - x1[blockIdx.y][1][in1] * x2[blockIdx.y][0][in2]; } } } torch::Tensor cross_prod_cuda_forward( torch::Tensor x1, torch::Tensor x2) { const auto batch_size = x1.size(0); const auto channel_size = x1.size(1); const auto n1 = x1.size(2); const auto n2 = x2.size(2); auto options = torch::TensorOptions().dtype(x1.dtype()).layout(torch::kStrided).device(x1.device()).requires_grad(true); auto y = torch::zeros({batch_size, n1, n2, 3}, options); // printf("x1 device: %d \n", x1.device().type()); // printf("x1 index: %d \n", x1.device().index()); const int threads = 1024; // cannot parallize across channels, because it will case modifying the the location by multiple threads at the same time // const dim3 blocks((n1 * n2 * channel_size + threads - 1) / threads, batch_size); const dim3 blocks((n1 * n2 + threads - 1) / threads, batch_size); // const dim3 blocks(1, 1); int device_id = x1.device().index(); hipSetDevice(device_id); AT_DISPATCH_FLOATING_TYPES(x1.type(), "cross_prod_forward_cuda", ([&] { hipLaunchKernelGGL(( cross_prod_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, x1.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), x2.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), y.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>()); })); hipDeviceSynchronize(); return y; }
c80331d1b383bf814c0c1a0cfe6dcf7f817a29f0.cu
#include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> namespace { template <typename scalar_t> __global__ void cross_prod_cuda_forward_kernel( const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> x1, const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> x2, torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> y) { const int n2 = x2.size(2); const int n1 = x1.size(2); // // cannot parallize across channels, because it will case modifying the the location by multiple threads at the same time // const int inc12 = blockIdx.x * blockDim.x + threadIdx.x; // const int ic = inc12 / (n1*n2); // const int in12 = inc12 % (n1*n2); // const int in1 = in12 / n2; // const int in2 = in12 % n2; // if (inc12 < n1 * n2 * c ){ // y[blockIdx.y][in1][in2] += (x1[blockIdx.y][ic][in1] - x2[blockIdx.y][ic][in2]) * (x1[blockIdx.y][ic][in1] - x2[blockIdx.y][ic][in2]) ; // } const int in12 = blockIdx.x * blockDim.x + threadIdx.x; const int in1 = in12 / n2; const int in2 = in12 % n2; if (in12 < n1 * n2 ){ y[blockIdx.y][in1][in2][0] = x1[blockIdx.y][1][in1] * x2[blockIdx.y][2][in2] - x1[blockIdx.y][2][in1] * x2[blockIdx.y][1][in2]; y[blockIdx.y][in1][in2][1] = x1[blockIdx.y][2][in1] * x2[blockIdx.y][0][in2] - x1[blockIdx.y][0][in1] * x2[blockIdx.y][2][in2]; y[blockIdx.y][in1][in2][2] = x1[blockIdx.y][0][in1] * x2[blockIdx.y][1][in2] - x1[blockIdx.y][1][in1] * x2[blockIdx.y][0][in2]; } } } torch::Tensor cross_prod_cuda_forward( torch::Tensor x1, torch::Tensor x2) { const auto batch_size = x1.size(0); const auto channel_size = x1.size(1); const auto n1 = x1.size(2); const auto n2 = x2.size(2); auto options = torch::TensorOptions().dtype(x1.dtype()).layout(torch::kStrided).device(x1.device()).requires_grad(true); auto y = torch::zeros({batch_size, n1, n2, 3}, options); // printf("x1 device: %d \n", x1.device().type()); // printf("x1 index: %d \n", x1.device().index()); const int threads = 1024; // cannot parallize across channels, because it will case modifying the the location by multiple threads at the same time // const dim3 blocks((n1 * n2 * channel_size + threads - 1) / threads, batch_size); const dim3 blocks((n1 * n2 + threads - 1) / threads, batch_size); // const dim3 blocks(1, 1); int device_id = x1.device().index(); cudaSetDevice(device_id); AT_DISPATCH_FLOATING_TYPES(x1.type(), "cross_prod_forward_cuda", ([&] { cross_prod_cuda_forward_kernel<scalar_t><<<blocks, threads>>>( x1.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), x2.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), y.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>()); })); cudaDeviceSynchronize(); return y; }
ceb6e451d18202ea4fe4afa5e8050bdd9ed18c7c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Author: ddlee, [email protected] * modified from MXNet's MultiboxTarget Operator */ #include "./assign_anchor-inl.h" #include <mshadow/cuda/tensor_gpu-inl.cuh> #define ASSIGN_ANCHOR_CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \ } while (0) namespace mshadow { namespace cuda { template<typename DType> __global__ void InitGroundTruthFlags(DType *gt_flags, const DType *labels, const int num_batches, const int num_labels, const int label_width) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num_batches * num_labels) return; int b = index / num_labels; int l = index % num_labels; if (*(labels + b * num_labels * label_width + l * label_width) == -1.f) { *(gt_flags + b * num_labels + l) = 0; // dummy gt } else { *(gt_flags + b * num_labels + l) = 1; // need to be matched } } template<typename DType> __global__ void FindBestMatches(DType *best_matches, DType *gt_flags, DType *gt_count, DType *anchor_flags, DType *anchor_cls, const DType *labels, const DType *overlaps, const int num_anchors, const int num_labels, const int label_width) { int nbatch = blockIdx.x; gt_flags += nbatch * num_labels; gt_count += nbatch * num_labels; overlaps += nbatch * num_anchors * num_labels; best_matches += nbatch * num_anchors; anchor_flags += nbatch * num_anchors; labels += nbatch * num_labels * label_width; anchor_cls += nbatch * num_anchors; const int num_threads = kMaxThreadsPerBlock; __shared__ int max_indices_y[kMaxThreadsPerBlock]; __shared__ int max_indices_x[kMaxThreadsPerBlock]; __shared__ float max_values[kMaxThreadsPerBlock]; while (1) { // check if all done. bool finished = true; for (int i = 0; i < num_labels; ++i) { if (gt_flags[i] > .5) { finished = false; break; } } if (finished) break; // all done. // finding max indices in different threads int max_x = -1; int max_y = -1; DType max_value = 1e-6; // start with very small overlap for (int i = threadIdx.x; i < num_anchors; i += num_threads) { if (anchor_flags[i] > .5) continue; for (int j = 0; j < num_labels; ++j) { if (gt_flags[j] > .5) { DType temp = overlaps[i * num_labels + j]; if (temp > max_value) { max_x = j; max_y = i; max_value = temp; } } } } max_indices_x[threadIdx.x] = max_x; max_indices_y[threadIdx.x] = max_y; max_values[threadIdx.x] = max_value; __syncthreads(); if (threadIdx.x == 0) { // merge results and assign best match, over all threads int max_x = -1; int max_y = -1; DType max_value = -1; for (int k = 0; k < num_threads; ++k) { if (max_indices_y[k] < 0 || max_indices_x[k] < 0) continue; float temp = max_values[k]; if (temp > max_value) { max_x = max_indices_x[k]; max_y = max_indices_y[k]; max_value = temp; } } if (max_x >= 0 && max_y >= 0) { best_matches[max_y] = max_value; int offset_l = static_cast<int>(max_x) * label_width; anchor_cls[max_y] = labels[offset_l] + 1; // mark flags as visited // best match // gt_count: -1 -> 0 // anchor_flag: -1 -> 1 gt_flags[max_x] = 0.f; gt_count[max_x] = 0.f; anchor_flags[max_y] = 1.f; } else { // no more good matches for (int i = 0; i < num_labels; ++i) { gt_flags[i] = 0.f; } } } __syncthreads(); } } template<typename DType> __global__ void FindGoodMatches(DType *best_matches, DType *anchor_flags, DType *match, DType *anchor_cls, const DType *labels, const DType *overlaps, const int num_anchors, const int num_labels, const int label_width, const float overlap_threshold) { int nbatch = blockIdx.x; overlaps += nbatch * num_anchors * num_labels; best_matches += nbatch * num_anchors; anchor_flags += nbatch * num_anchors; match += nbatch * num_anchors; anchor_cls += nbatch * num_anchors; labels += nbatch * num_labels * label_width; const int num_threads = kMaxThreadsPerBlock; for (int i = threadIdx.x; i < num_anchors; i += num_threads) { if (anchor_flags[i] < 0) { int idx = -1; float max_value = -1.f; for (int j = 0; j < num_labels; ++j) { DType temp = overlaps[i * num_labels + j]; if (temp > max_value) { max_value = temp; idx = j; } } if (max_value > overlap_threshold && (idx >= 0)) { best_matches[i] = max_value; int offset_l = static_cast<int>(idx) * label_width; anchor_cls[i] = labels[offset_l] + 1; // good match // anchor_flag: -1 -> 0.9 anchor_flags[i] = 0.9f; // cache good anchor matched label id match[i] = idx; } } } } template<typename DType> __global__ void CollectGoodMatches(DType *gt_count, const DType *match, const int num_anchors, const int num_labels){ int nbatch = blockIdx.x; gt_count += nbatch * num_labels; match += nbatch * num_anchors; int idx = -1; for (int i = 0; i < num_anchors; i++){ idx = int(match[i]); if (idx > -1){ // accummulate each good match on that gt gt_count[idx] += 1.f; } } } } // namespace cuda template<typename DType> inline void AssignAnchorForward(const Tensor<gpu, 2, DType> &anchor_flags_, const Tensor<gpu, 2, DType> &best_matches_, const Tensor<gpu, 2, DType> &gt_count_, const Tensor<gpu, 2, DType> &anchor_cls_, const Tensor<gpu, 2, DType> &anchors, const Tensor<gpu, 3, DType> &labels, const Tensor<gpu, 4, DType> &temp_space, const float overlap_threshold) { const int num_batches = labels.size(0); const int num_labels = labels.size(1); const int label_width = labels.size(2); const int num_anchors = anchors.size(0); CHECK_GE(num_batches, 1); CHECK_GT(num_labels, 2); CHECK_GE(num_anchors, 1); temp_space[1] = -1.f; temp_space[2] = -1.f; DType *gt_flags = temp_space[1].dptr_; DType *match = temp_space[2].dptr_; DType *gt_count = gt_count_.dptr_; DType *anchor_flags = anchor_flags_.dptr_; DType *best_matches = best_matches_.dptr_; DType *anchor_cls = anchor_cls_.dptr_; // init ground-truth flags, by checking valid labels const int num_threads = cuda::kMaxThreadsPerBlock; dim3 init_thread_dim(num_threads); dim3 init_block_dim((num_batches * num_labels - 1) / num_threads + 1); cuda::CheckLaunchParam(init_block_dim, init_thread_dim, "AssignAnchor Init"); hipLaunchKernelGGL(( cuda::InitGroundTruthFlags<DType>), dim3(init_block_dim), dim3(init_thread_dim), 0, 0, gt_flags, labels.dptr_, num_batches, num_labels, label_width); ASSIGN_ANCHOR_CUDA_CHECK(hipPeekAtLastError()); // compute best matches const DType *overlaps = temp_space[0].dptr_; cuda::CheckLaunchParam(num_batches, num_threads, "AssignAnchor Matching"); hipLaunchKernelGGL(( cuda::FindBestMatches<DType>), dim3(num_batches), dim3(num_threads), 0, 0, best_matches, gt_flags, gt_count, anchor_flags, anchor_cls, labels.dptr_, overlaps, num_anchors, num_labels, label_width); ASSIGN_ANCHOR_CUDA_CHECK(hipPeekAtLastError()); // find good matches with overlap > threshold cuda::CheckLaunchParam(num_batches, num_threads, "AssignAnchor FindGood"); hipLaunchKernelGGL(( cuda::FindGoodMatches<DType>), dim3(num_batches), dim3(num_threads), 0, 0, best_matches, anchor_flags, match, anchor_cls, labels.dptr_, overlaps, num_anchors, num_labels, label_width, overlap_threshold); ASSIGN_ANCHOR_CUDA_CHECK(hipPeekAtLastError()); cuda::CheckLaunchParam(num_batches, 1, "AssignAnchor Collect"); hipLaunchKernelGGL(( cuda::CollectGoodMatches<DType>), dim3(num_batches), dim3(1), 0, 0, gt_count, match, num_anchors, num_labels); ASSIGN_ANCHOR_CUDA_CHECK(hipPeekAtLastError()); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator *CreateOp<gpu>(AssignAnchorParam param, int dtype) { Operator *op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new AssignAnchorOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
ceb6e451d18202ea4fe4afa5e8050bdd9ed18c7c.cu
/* * Author: ddlee, [email protected] * modified from MXNet's MultiboxTarget Operator */ #include "./assign_anchor-inl.h" #include <mshadow/cuda/tensor_gpu-inl.cuh> #define ASSIGN_ANCHOR_CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \ } while (0) namespace mshadow { namespace cuda { template<typename DType> __global__ void InitGroundTruthFlags(DType *gt_flags, const DType *labels, const int num_batches, const int num_labels, const int label_width) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num_batches * num_labels) return; int b = index / num_labels; int l = index % num_labels; if (*(labels + b * num_labels * label_width + l * label_width) == -1.f) { *(gt_flags + b * num_labels + l) = 0; // dummy gt } else { *(gt_flags + b * num_labels + l) = 1; // need to be matched } } template<typename DType> __global__ void FindBestMatches(DType *best_matches, DType *gt_flags, DType *gt_count, DType *anchor_flags, DType *anchor_cls, const DType *labels, const DType *overlaps, const int num_anchors, const int num_labels, const int label_width) { int nbatch = blockIdx.x; gt_flags += nbatch * num_labels; gt_count += nbatch * num_labels; overlaps += nbatch * num_anchors * num_labels; best_matches += nbatch * num_anchors; anchor_flags += nbatch * num_anchors; labels += nbatch * num_labels * label_width; anchor_cls += nbatch * num_anchors; const int num_threads = kMaxThreadsPerBlock; __shared__ int max_indices_y[kMaxThreadsPerBlock]; __shared__ int max_indices_x[kMaxThreadsPerBlock]; __shared__ float max_values[kMaxThreadsPerBlock]; while (1) { // check if all done. bool finished = true; for (int i = 0; i < num_labels; ++i) { if (gt_flags[i] > .5) { finished = false; break; } } if (finished) break; // all done. // finding max indices in different threads int max_x = -1; int max_y = -1; DType max_value = 1e-6; // start with very small overlap for (int i = threadIdx.x; i < num_anchors; i += num_threads) { if (anchor_flags[i] > .5) continue; for (int j = 0; j < num_labels; ++j) { if (gt_flags[j] > .5) { DType temp = overlaps[i * num_labels + j]; if (temp > max_value) { max_x = j; max_y = i; max_value = temp; } } } } max_indices_x[threadIdx.x] = max_x; max_indices_y[threadIdx.x] = max_y; max_values[threadIdx.x] = max_value; __syncthreads(); if (threadIdx.x == 0) { // merge results and assign best match, over all threads int max_x = -1; int max_y = -1; DType max_value = -1; for (int k = 0; k < num_threads; ++k) { if (max_indices_y[k] < 0 || max_indices_x[k] < 0) continue; float temp = max_values[k]; if (temp > max_value) { max_x = max_indices_x[k]; max_y = max_indices_y[k]; max_value = temp; } } if (max_x >= 0 && max_y >= 0) { best_matches[max_y] = max_value; int offset_l = static_cast<int>(max_x) * label_width; anchor_cls[max_y] = labels[offset_l] + 1; // mark flags as visited // best match // gt_count: -1 -> 0 // anchor_flag: -1 -> 1 gt_flags[max_x] = 0.f; gt_count[max_x] = 0.f; anchor_flags[max_y] = 1.f; } else { // no more good matches for (int i = 0; i < num_labels; ++i) { gt_flags[i] = 0.f; } } } __syncthreads(); } } template<typename DType> __global__ void FindGoodMatches(DType *best_matches, DType *anchor_flags, DType *match, DType *anchor_cls, const DType *labels, const DType *overlaps, const int num_anchors, const int num_labels, const int label_width, const float overlap_threshold) { int nbatch = blockIdx.x; overlaps += nbatch * num_anchors * num_labels; best_matches += nbatch * num_anchors; anchor_flags += nbatch * num_anchors; match += nbatch * num_anchors; anchor_cls += nbatch * num_anchors; labels += nbatch * num_labels * label_width; const int num_threads = kMaxThreadsPerBlock; for (int i = threadIdx.x; i < num_anchors; i += num_threads) { if (anchor_flags[i] < 0) { int idx = -1; float max_value = -1.f; for (int j = 0; j < num_labels; ++j) { DType temp = overlaps[i * num_labels + j]; if (temp > max_value) { max_value = temp; idx = j; } } if (max_value > overlap_threshold && (idx >= 0)) { best_matches[i] = max_value; int offset_l = static_cast<int>(idx) * label_width; anchor_cls[i] = labels[offset_l] + 1; // good match // anchor_flag: -1 -> 0.9 anchor_flags[i] = 0.9f; // cache good anchor matched label id match[i] = idx; } } } } template<typename DType> __global__ void CollectGoodMatches(DType *gt_count, const DType *match, const int num_anchors, const int num_labels){ int nbatch = blockIdx.x; gt_count += nbatch * num_labels; match += nbatch * num_anchors; int idx = -1; for (int i = 0; i < num_anchors; i++){ idx = int(match[i]); if (idx > -1){ // accummulate each good match on that gt gt_count[idx] += 1.f; } } } } // namespace cuda template<typename DType> inline void AssignAnchorForward(const Tensor<gpu, 2, DType> &anchor_flags_, const Tensor<gpu, 2, DType> &best_matches_, const Tensor<gpu, 2, DType> &gt_count_, const Tensor<gpu, 2, DType> &anchor_cls_, const Tensor<gpu, 2, DType> &anchors, const Tensor<gpu, 3, DType> &labels, const Tensor<gpu, 4, DType> &temp_space, const float overlap_threshold) { const int num_batches = labels.size(0); const int num_labels = labels.size(1); const int label_width = labels.size(2); const int num_anchors = anchors.size(0); CHECK_GE(num_batches, 1); CHECK_GT(num_labels, 2); CHECK_GE(num_anchors, 1); temp_space[1] = -1.f; temp_space[2] = -1.f; DType *gt_flags = temp_space[1].dptr_; DType *match = temp_space[2].dptr_; DType *gt_count = gt_count_.dptr_; DType *anchor_flags = anchor_flags_.dptr_; DType *best_matches = best_matches_.dptr_; DType *anchor_cls = anchor_cls_.dptr_; // init ground-truth flags, by checking valid labels const int num_threads = cuda::kMaxThreadsPerBlock; dim3 init_thread_dim(num_threads); dim3 init_block_dim((num_batches * num_labels - 1) / num_threads + 1); cuda::CheckLaunchParam(init_block_dim, init_thread_dim, "AssignAnchor Init"); cuda::InitGroundTruthFlags<DType><<<init_block_dim, init_thread_dim>>>( gt_flags, labels.dptr_, num_batches, num_labels, label_width); ASSIGN_ANCHOR_CUDA_CHECK(cudaPeekAtLastError()); // compute best matches const DType *overlaps = temp_space[0].dptr_; cuda::CheckLaunchParam(num_batches, num_threads, "AssignAnchor Matching"); cuda::FindBestMatches<DType><<<num_batches, num_threads>>>(best_matches, gt_flags, gt_count, anchor_flags, anchor_cls, labels.dptr_, overlaps, num_anchors, num_labels, label_width); ASSIGN_ANCHOR_CUDA_CHECK(cudaPeekAtLastError()); // find good matches with overlap > threshold cuda::CheckLaunchParam(num_batches, num_threads, "AssignAnchor FindGood"); cuda::FindGoodMatches<DType><<<num_batches, num_threads>>>(best_matches, anchor_flags, match, anchor_cls, labels.dptr_, overlaps, num_anchors, num_labels, label_width, overlap_threshold); ASSIGN_ANCHOR_CUDA_CHECK(cudaPeekAtLastError()); cuda::CheckLaunchParam(num_batches, 1, "AssignAnchor Collect"); cuda::CollectGoodMatches<DType><<<num_batches, 1>>>(gt_count, match, num_anchors, num_labels); ASSIGN_ANCHOR_CUDA_CHECK(cudaPeekAtLastError()); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator *CreateOp<gpu>(AssignAnchorParam param, int dtype) { Operator *op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new AssignAnchorOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
642dacb05f3e51b7242d2ae2e59c34d4af788fba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the doubleing point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in doubleing-point. extern "C" // Round to nearest integer value in doubleing-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two doubleing point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the doubleing-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision doubleing-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision doubleing-point remainder. extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //WARNING : device_sum size should be gridDim.x __global__ void vec_fmod (int n, double *result, double *x, double *y) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; int id = idy * gridDim.x * blockDim.x + idx; if (id < n) { result[id] = fmod(x[id], y[id]); } }
642dacb05f3e51b7242d2ae2e59c34d4af788fba.cu
#include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument × p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the doubleing point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in doubleing-point. extern "C" // Round to nearest integer value in doubleing-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument × p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two doubleing point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the doubleing-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision doubleing-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision doubleing-point remainder. extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //WARNING : device_sum size should be gridDim.x __global__ void vec_fmod (int n, double *result, double *x, double *y) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; int id = idy * gridDim.x * blockDim.x + idx; if (id < n) { result[id] = fmod(x[id], y[id]); } }
22add1c034167ce02b6437d3b398c8a839221a00.hip
// !!! This is a file automatically generated by hipify!!! #include "stdio.h" #include "CudaMatrixKernels.hh" #include <math_constants.h> #include <hip/hip_runtime.h> #include <limits> #ifdef __CDT_PARSER__ #define __global__ #define __device__ #define __host__ #define __shared__ #endif #define THREADS_PER_BLOCK 1024 /*****************************************************************************/ /* HELPER FUNCTIONS */ /*****************************************************************************/ /* * * atomicAdd for double * */ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif /*****************************************************************************/ /* * * mixed precision axpy * */ __global__ void __cuda_axpy(int nElements, float alpha, const float *x, double *y){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) y[index] += alpha * x[index]; } void _cuda_axpy(int nElements, float alpha, const float *x, double *y) { int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_axpy) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, nElements, alpha, x, y); } /* * * exp * */ template<typename T> __global__ void __cuda_exp(T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = exp(data[index]); } template<typename T> void _cuda_exp(T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_exp) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, nElements); } template __global__ void __cuda_exp<float>(float *, unsigned int); template __global__ void __cuda_exp<double>(double *, unsigned int); template void _cuda_exp<float>(float *, unsigned int, unsigned int); template void _cuda_exp<double>(double *, unsigned int, unsigned int); /* * * signedPow * */ template<typename T> __global__ void __cuda_signedPow(T *data, unsigned int nElements, T p){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) { if(data[index] < 0) data[index] = -pow(-data[index], p); else data[index] = pow(data[index], p); } } template<typename T> void _cuda_signedPow(T *data, unsigned int nRows, unsigned int nColumns, T p) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_signedPow) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, nElements, p); } template __global__ void __cuda_signedPow<float>(float *, unsigned int, float); template __global__ void __cuda_signedPow<double>(double *, unsigned int, double); template void _cuda_signedPow<float>(float *, unsigned int, unsigned int, float); template void _cuda_signedPow<double>(double *, unsigned int, unsigned int, double); /* * * log * */ template<typename T> __global__ void __cuda_log(T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = log(data[index]); } template<typename T> void _cuda_log(T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_log) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, nElements); } template __global__ void __cuda_log<float>(float *, unsigned int); template __global__ void __cuda_log<double>(double *, unsigned int); template void _cuda_log<float>(float *, unsigned int, unsigned int); template void _cuda_log<double>(double *, unsigned int, unsigned int); /* * * sin * */ template<typename T> __global__ void __cuda_sin(T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = sin(data[index]); } template<typename T> void _cuda_sin(T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_sin) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, nElements); } template __global__ void __cuda_sin<float>(float *, unsigned int); template __global__ void __cuda_sin<double>(double *, unsigned int); template void _cuda_sin<float>(float *, unsigned int, unsigned int); template void _cuda_sin<double>(double *, unsigned int, unsigned int); /* * * cos * */ template<typename T> __global__ void __cuda_cos(T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = cos(data[index]); } template<typename T> void _cuda_cos(T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_cos) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, nElements); } template __global__ void __cuda_cos<float>(float *, unsigned int); template __global__ void __cuda_cos<double>(double *, unsigned int); template void _cuda_cos<float>(float *, unsigned int, unsigned int); template void _cuda_cos<double>(double *, unsigned int, unsigned int); /* * * asin * */ template<typename T> __global__ void __cuda_asin(T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = asin(data[index]); } template<typename T> void _cuda_asin(T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_asin) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, nElements); } template __global__ void __cuda_asin<float>(float *, unsigned int); template __global__ void __cuda_asin<double>(double *, unsigned int); template void _cuda_asin<float>(float *, unsigned int, unsigned int); template void _cuda_asin<double>(double *, unsigned int, unsigned int); /* * * acos * */ template<typename T> __global__ void __cuda_acos(T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = acos(data[index]); } template<typename T> void _cuda_acos(T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_acos) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, nElements); } template __global__ void __cuda_acos<float>(float *, unsigned int); template __global__ void __cuda_acos<double>(double *, unsigned int); template void _cuda_acos<float>(float *, unsigned int, unsigned int); template void _cuda_acos<double>(double *, unsigned int, unsigned int); /* * * abs * */ template<typename T> __global__ void __cuda_abs(T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) { if (data[index] < 0) data[index] = -data[index]; } } template<typename T> void _cuda_abs(T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_abs) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, nElements); } template __global__ void __cuda_abs<float>(float *, unsigned int); template __global__ void __cuda_abs<double>(double *, unsigned int); template void _cuda_abs<float>(float *, unsigned int, unsigned int); template void _cuda_abs<double>(double *, unsigned int, unsigned int); /* * * tanh * * */ template<typename T> __global__ void __cuda_tanh(T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = tanh(data[index]); } template<typename T> void _cuda_tanh(T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_tanh) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, nElements); } template __global__ void __cuda_tanh<float>(float *, unsigned int); template __global__ void __cuda_tanh<double>(double *, unsigned int); template void _cuda_tanh<float>(float *, unsigned int, unsigned int); template void _cuda_tanh<double>(double *, unsigned int, unsigned int); /* * * sigmoid * */ template<typename T> __global__ void __cuda_sigmoid1(T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = 1.0 / (1.0 + exp(-data[index])); } template<typename T> __global__ void __cuda_sigmoid(T gamma, T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = 1.0 / (1.0 + exp(-gamma * data[index])); } template<typename T> void _cuda_sigmoid(T gamma, T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); if (gamma == 1.0) hipLaunchKernelGGL(( __cuda_sigmoid1) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, nElements); else hipLaunchKernelGGL(( __cuda_sigmoid) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, gamma, data, nElements); } template void _cuda_sigmoid<double>(double gamma, double *data, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_sigmoid<double>(double gamma, double *data, unsigned int nElements); template __global__ void __cuda_sigmoid1<double>(double *data, unsigned int nElements); template void _cuda_sigmoid<float>(float gamma, float *data, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_sigmoid<float>(float gamma, float *data, unsigned int nElements); template __global__ void __cuda_sigmoid1<float>(float *data, unsigned int nElements); /* * * triangle * */ template<typename T> __global__ void __cuda_triangle(T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) { if ((data[index] < -1.0) || (data[index] > 1.0)) data[index] = 0.0; else if (data[index] < 0.0) data[index] = 1.0 + data[index]; else data[index] = 1.0 - data[index]; } } template<typename T> void _cuda_triangle(T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_triangle) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, nElements); } template void _cuda_triangle<double>(double *data, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_triangle<double>(double *data, unsigned int nElements); template void _cuda_triangle<float>(float *data, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_triangle<float>(float *data, unsigned int nElements); /* * * sum * */ template<typename T> __global__ void __cuda_sum(T *data, unsigned int nRows, unsigned int nColumns, T *result){ *result = 0; for (int i = 0; i < nRows * nColumns; i++){ *result += data[i]; } } template<typename T> void _cuda_sum(T *data, unsigned int nRows, unsigned int nColumns, T *result) { // no parallelization, but probably not relevant hipLaunchKernelGGL(( __cuda_sum) , dim3(1),dim3(1), 0, 0, data, nRows, nColumns, result); } template __global__ void __cuda_sum<double>(double *data, unsigned int nRows, unsigned int nColumns, double *result); template void _cuda_sum<double>(double *data, unsigned int nRows, unsigned int nColumns, double *result); template __global__ void __cuda_sum<float>(float *data, unsigned int nRows, unsigned int nColumns, float *result); template void _cuda_sum<float>(float *data, unsigned int nRows, unsigned int nColumns, float *result); /* * * columnwiseSquaredEuclideanDistance * */ template<typename T> __global__ void __cuda_columnwiseSquaredEuclideanDistance(const T *A, unsigned int nRows, unsigned int nColumns, const T *v, T *result){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nRows * nColumns) { T d = A[index] - v[index % nRows]; d = d*d; atomicAdd(&(result[index / nRows]), d); } } template<typename T> void _cuda_columnwiseSquaredEuclideanDistance(const T *A, unsigned int nRows, unsigned int nColumns, const T *v, T *result) { int gridSize = (int)ceil( (float) (nRows * nColumns)/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_columnwiseSquaredEuclideanDistance) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, A, nRows, nColumns, v, result); } template void _cuda_columnwiseSquaredEuclideanDistance<double>(const double *A, unsigned int nRows, unsigned int nColumns, const double *v, double *result); template __global__ void __cuda_columnwiseSquaredEuclideanDistance<double>(const double *A, unsigned int nRows, unsigned int nColumns, const double *v, double *result); template void _cuda_columnwiseSquaredEuclideanDistance<float>(const float *A, unsigned int nRows, unsigned int nColumns, const float *v, float *result); template __global__ void __cuda_columnwiseSquaredEuclideanDistance<float>(const float *A, unsigned int nRows, unsigned int nColumns, const float *v, float *result); /* * * clone * */ template<typename T> __global__ void __cuda_clone(const T *dataA, T *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nRowsB * nColumnsB) { unsigned int nRowsA = nRowsB / nClones; unsigned int rowA = (index % nRowsA); unsigned int colA = index / nRowsB; dataB[index] = dataA[colA * nRowsA + rowA]; } } template<typename T> void _cuda_clone(const T *dataA, T *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones) { int nElementsB = nRowsB * nColumnsB; int gridSize = (int)ceil( (float) nElementsB/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_clone) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, dataA, dataB, nRowsB, nColumnsB, nClones); } template void _cuda_clone<double>(const double *dataA, double *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones); template __global__ void __cuda_clone<double>(const double *dataA, double *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones); template void _cuda_clone<float>(const float *dataA, float *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones); template __global__ void __cuda_clone<float>(const float *dataA, float *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones); /* * * cloneElementwise * */ template<typename T> __global__ void __cuda_cloneElementwise(const T *dataA, T *dataB, unsigned int nElementsB, unsigned int nClones){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElementsB) { unsigned int indexA = index / nClones; dataB[index] = dataA[indexA]; } } template<typename T> void _cuda_cloneElementwise(const T *dataA, T *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones) { int nElementsB = nRowsB * nColumnsB; int gridSize = (int)ceil( (float) nElementsB/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_cloneElementwise) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, dataA, dataB, nElementsB, nClones); } template void _cuda_cloneElementwise<double>(const double *dataA, double *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones); template __global__ void __cuda_cloneElementwise<double>(const double *dataA, double *dataB, unsigned int nElementsB, unsigned int nClones); template void _cuda_cloneElementwise<float>(const float *dataA, float *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones); template __global__ void __cuda_cloneElementwise<float>(const float *dataA, float *dataB, unsigned int nElementsB, unsigned int nClones); /* * * addElementsByModuloIndex * */ template<typename T> __global__ void __cuda_addElementsByModuloIndex(const T *dataA, T *dataB, unsigned int nRowsA, unsigned int nRowsB, unsigned int nColumns){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nRowsB * nColumns) { unsigned int rowB = index % nRowsB; unsigned int column = index / nRowsB; for (unsigned int j = 0; j < nRowsA / nRowsB; j++) { dataB[index] += dataA[column * nRowsA + (rowB + j * nRowsB)]; } } } template<typename T> void _cuda_addElementsByModuloIndex(const T *dataA, T *dataB, unsigned int nRowsA, unsigned int nRowsB, unsigned int nColumns) { unsigned int nElementsB = nRowsB * nColumns; int gridSize = (int)ceil( (float) nElementsB/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_addElementsByModuloIndex) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, dataA, dataB, nRowsA, nRowsB, nColumns); } template void _cuda_addElementsByModuloIndex<double>(const double *dataA, double *dataB, unsigned int nRowsA, unsigned int nRowsB, unsigned int nColumns); template __global__ void __cuda_addElementsByModuloIndex<double>(const double *dataA, double *dataB, unsigned int nRowsA, unsigned int nRowsB, unsigned int nColumns); template void _cuda_addElementsByModuloIndex<float>(const float *dataA, float *dataB, unsigned int nRowsA, unsigned int nRowsB, unsigned int nColumns); template __global__ void __cuda_addElementsByModuloIndex<float>(const float *dataA, float *dataB, unsigned int nRowsA, unsigned int nRowsB, unsigned int nColumns); /* * * chiSquareFeatureMap * */ template<typename T> __global__ void __cuda_chiSquareFeatureMap(const T *dataA, T *dataB, unsigned int nElementsB, unsigned int n, T samplingDistance, T min){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElementsB) { unsigned int j = index % (2*n + 1); unsigned int baseIndex = index / (2*n + 1); T x = (dataA[baseIndex] > min ? dataA[baseIndex] : min); if (j == 0) { dataB[index] = sqrt(samplingDistance * x); } else if (j % 2 == 1) { T kappa = 1.0 / cosh(CUDART_PI * (j+1)/2 * samplingDistance); dataB[index] = sqrt(2 * kappa * samplingDistance * x) * cos((j+1)/2 * samplingDistance * log(x)); } else { T kappa = 1.0 / cosh(CUDART_PI * j/2 * samplingDistance); dataB[index] = sqrt(2 * kappa * samplingDistance * x) * sin(j/2 * samplingDistance * log(x)); } } } template<typename T> void _cuda_chiSquareFeatureMap(const T *dataA, T *dataB, unsigned int nElementsB, unsigned int n, T samplingDistance, T min) { int gridSize = (int)ceil( (float) nElementsB/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_chiSquareFeatureMap) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, dataA, dataB, nElementsB, n, samplingDistance, min); } template void _cuda_chiSquareFeatureMap<double>(const double *dataA, double *dataB, unsigned int nElementsB, unsigned int n, double samplingDistance, double min); template __global__ void __cuda_chiSquareFeatureMap<double>(const double *dataA, double *dataB, unsigned int nElementsB, unsigned int n, double samplingDistance, double min); template void _cuda_chiSquareFeatureMap<float>(const float *dataA, float *dataB, unsigned int nElementsB, unsigned int n, float samplingDistance, float min); template __global__ void __cuda_chiSquareFeatureMap<float>(const float *dataA, float *dataB, unsigned int nElementsB, unsigned int n, float samplingDistance, float min); /* * * histogramIntersectionFeatureMap * */ template<typename T> __global__ void __cuda_histogramIntersectionFeatureMap(const T *dataA, T *dataB, unsigned int nElementsB, unsigned int n, T samplingDistance, T min){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElementsB) { unsigned int j = index % (2*n + 1); unsigned int baseIndex = index / (2*n + 1); T x = (dataA[baseIndex] > min ? dataA[baseIndex] : min); if (j == 0) { dataB[index] = sqrt(2 / CUDART_PI * samplingDistance * x); } else if (j % 2 == 1) { T kappa = 2.0 / (CUDART_PI * (1 + 4 * (j+1)/2 * samplingDistance * (j+1)/2 * samplingDistance)); dataB[index] = sqrt(2 * kappa * samplingDistance * x) * cos((j+1)/2 * samplingDistance * log(x)); } else { T kappa = 2.0 / (CUDART_PI * (1 + 4 * j/2 * samplingDistance * j/2 * samplingDistance)); dataB[index] = sqrt(2 * kappa * samplingDistance * x) * sin(j/2 * samplingDistance * log(x)); } } } template<typename T> void _cuda_histogramIntersectionFeatureMap(const T *dataA, T *dataB, unsigned int nElementsB, unsigned int n, T samplingDistance, T min) { int gridSize = (int)ceil( (float) nElementsB/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_histogramIntersectionFeatureMap) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, dataA, dataB, nElementsB, n, samplingDistance, min); } template void _cuda_histogramIntersectionFeatureMap<double>(const double *dataA, double *dataB, unsigned int nElementsB, unsigned int n, double samplingDistance, double min); template __global__ void __cuda_histogramIntersectionFeatureMap<double>(const double *dataA, double *dataB, unsigned int nElementsB, unsigned int n, double samplingDistance, double min); template void _cuda_histogramIntersectionFeatureMap<float>(const float *dataA, float *dataB, unsigned int nElementsB, unsigned int n, float samplingDistance, float min); template __global__ void __cuda_histogramIntersectionFeatureMap<float>(const float *dataA, float *dataB, unsigned int nElementsB, unsigned int n, float samplingDistance, float min); /* * * elementwiseMultiplicationWithChiSquareFeatureMapDerivative * */ template<typename T> __global__ void __cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative(const T *dataA, T *dataB, unsigned int nElements, unsigned int n, T samplingDistance, T kappa0){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) { unsigned int j = index % (2 * n + 1); if (j == 0) { dataB[index] *= dataA[index]; } else if (j % 2 == 1) { dataB[index] *= dataA[index] - (j+1) * samplingDistance * dataA[index + 1]; } else { dataB[index] *= dataA[index] + j * samplingDistance * dataA[index - 1]; } dataB[index] *= samplingDistance * kappa0 / (2.0 * dataA[index - j] * dataA[index - j]); } } template<typename T> void _cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative(const T *dataA, T *dataB, unsigned int nElements, unsigned int n, T samplingDistance, T kappa0) { int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, dataA, dataB, nElements, n, samplingDistance, kappa0); } template void _cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative<double>(const double *dataA, double *dataB, unsigned int nElements, unsigned int n, double samplingDistance, double kappa0); template __global__ void __cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative<double>(const double *dataA, double *dataB, unsigned int nElements, unsigned int n, double samplingDistance, double kappa0); template void _cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative<float>(const float *dataA, float *dataB, unsigned int nElements, unsigned int n, float samplingDistance, float kappa0); template __global__ void __cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative<float>(const float *dataA, float *dataB, unsigned int nElements, unsigned int n, float samplingDistance, float kappa0); /* * * addSummedRows * */ template<typename T> __global__ void __cuda_addSummedRows(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const T scale){ unsigned int columnIndex = threadIdx.x + blockIdx.x * blockDim.x; if (columnIndex < nColumns){ float result = 0.0; for (unsigned int i = 0; i < nRows; i++){ // result += matrix(i,columnIndex) result += matrixDevPtr[columnIndex * nRows + i]; } vectorDevPtr[columnIndex] += scale * result; } } template<typename T> void _cuda_addSummedRows(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const T scale){ // parallelize over columns int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_addSummedRows) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, vectorDevPtr, matrixDevPtr, nRows, nColumns, scale); } template __global__ void __cuda_addSummedRows(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const double scale); template void _cuda_addSummedRows(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const double scale); template __global__ void __cuda_addSummedRows(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const float scale); template void _cuda_addSummedRows(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const float scale); /* * slightly faster version using tmp array * */ template<typename T> __global__ void __cuda_summedRowsTmp(const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, T *tmpDevPtr, unsigned int tmpRows){ unsigned int columnIndex = threadIdx.x + blockIdx.x * blockDim.x; unsigned int columnPart = blockIdx.y; if (columnIndex < nColumns){ unsigned int nRowsDiv = nRows / tmpRows; unsigned int startRow = columnPart * nRowsDiv; if (startRow < nRows){ unsigned int endRow = columnPart == tmpRows - 1 ? nRows : (columnPart + 1) * nRowsDiv; T result = 0.0; for (unsigned int i = startRow; i < endRow; i++){ // result += matrix(i, columnIndex) result += matrixDevPtr[columnIndex * nRows + i]; } tmpDevPtr[columnIndex*tmpRows + columnPart] = result; } } } template<typename T> void _cuda_addSummedRows(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, T *tmpDevPtr, unsigned int tmpRows, const T scale){ int gridDimx = (int)ceil( (float) nColumns / THREADS_PER_BLOCK); int gridDimy = tmpRows; dim3 gridSize(gridDimx,gridDimy); hipLaunchKernelGGL(( __cuda_summedRowsTmp) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, matrixDevPtr, nRows, nColumns, tmpDevPtr, tmpRows); _cuda_addSummedRows<T>(vectorDevPtr, tmpDevPtr, tmpRows, nColumns, scale); } template __global__ void __cuda_summedRowsTmp<double>(const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, double *tmpDevPtr, unsigned int tmpRows); template void _cuda_addSummedRows<double>(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, double *tmpDevPtr, unsigned int tmpRows, const double scale); template __global__ void __cuda_summedRowsTmp<float>(const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, float *tmpDevPtr, unsigned int tmpRows); template void _cuda_addSummedRows<float>(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, float *tmpDevPtr, unsigned int tmpRows, const float scale); /* * * addSummedColumns * */ template<typename T> __global__ void __cuda_addSummedColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const T scale){ unsigned int rowIndex = threadIdx.x + blockIdx.x * blockDim.x; if (rowIndex < nRows){ T result = 0.0; for (unsigned int i = 0; i < nColumns; i++){ // result += matrix(rowIndex,i) result += matrixDevPtr[i * nRows + rowIndex]; } vectorDevPtr[rowIndex] += scale * result; } } template<typename T> void _cuda_addSummedColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const T scale){ // parallelize over rows int gridSize = (int)ceil( (float) nRows/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_addSummedColumns) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, vectorDevPtr, matrixDevPtr, nRows, nColumns, scale); } template __global__ void __cuda_addSummedColumns<double>(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const double scale); template void _cuda_addSummedColumns<double>(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const double scale); template __global__ void __cuda_addSummedColumns<float>(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const float scale); template void _cuda_addSummedColumns<float>(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const float scale); /* * addSummedColumnsChannelWise * * */ template<typename T> __global__ void __cuda_addSummedColumnsChannelWise(T *vector, const T* matrix, const unsigned int channels, const unsigned int nRows, const unsigned int nColumns, const T scale) { unsigned int channelIndex = threadIdx.x + blockIdx.x * blockDim.x; if(channelIndex < channels) { unsigned int channelSize = nRows / channels; for(unsigned int i=0; i < channelSize; i++) { for(unsigned int j=0; j < nColumns; j++) { vector[channelIndex] += scale * matrix[j * nRows + channelIndex * channelSize + i]; } } } } template<typename T> void _cuda_addSummedColumnsChannelWise(T *vector, const T* matrix, const unsigned int channels, const unsigned int nRows, const unsigned int nColumns, const T scale) { int gridSize = (int)ceil( (float) channels/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_addSummedColumnsChannelWise), dim3(gridSize), dim3(THREADS_PER_BLOCK), 0, 0, vector, matrix, channels, nRows, nColumns, scale); } template __global__ void __cuda_addSummedColumnsChannelWise(double *vector, const double* matrix, const unsigned int channels, const unsigned int nRows, const unsigned int nColumns, const double scale); template __global__ void __cuda_addSummedColumnsChannelWise(float *vector, const float* matrix, const unsigned int channels, const unsigned int nRows, const unsigned int nColumns, const float scale); template void _cuda_addSummedColumnsChannelWise(double *vector, const double* matrix, const unsigned int channels, const unsigned int nRows, const unsigned int nColumns, const double scale); template void _cuda_addSummedColumnsChannelWise(float *vector, const float* matrix, const unsigned int channels, const unsigned int nRows, const unsigned int nColumns, const float scale); /* * * addSquaredSummedColumns * */ template<typename T> __global__ void __cuda_addSquaredSummedColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const T scale){ unsigned int rowIndex = threadIdx.x + blockIdx.x * blockDim.x; if (rowIndex < nRows){ T result = 0.0; for (unsigned int i = 0; i < nColumns; i++){ result += matrixDevPtr[i * nRows + rowIndex] * matrixDevPtr[i * nRows + rowIndex]; } vectorDevPtr[rowIndex] += scale * result; } } template<typename T> void _cuda_addSquaredSummedColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const T scale){ // parallelize over rows int gridSize = (int)ceil( (float) nRows/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_addSquaredSummedColumns) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, vectorDevPtr, matrixDevPtr, nRows, nColumns, scale); } template __global__ void __cuda_addSquaredSummedColumns(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const double scale); template void _cuda_addSquaredSummedColumns(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const double scale); template __global__ void __cuda_addSquaredSummedColumns(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const float scale); template void _cuda_addSquaredSummedColumns(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const float scale); /* * * addSummedNeighborsInARow * */ template<typename T> __global__ void __cuda_addSummedNeighborsInARow(T* dataA, const T* dataB, unsigned int elementsA, unsigned int nNeighbors){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < elementsA){ for (unsigned int n = 0; n < nNeighbors; n++){ dataA[index] += dataB[index * nNeighbors + n]; } } } template<typename T> void _cuda_addSummedNeighborsInARow(T* dataA, const T* dataB, unsigned int rowsA, unsigned int columnsA, unsigned int nNeighbors){ // parallelize over rows int gridSize = (int)ceil( (float) rowsA*columnsA/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_addSummedNeighborsInARow) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, dataA, dataB, rowsA * columnsA, nNeighbors); } template __global__ void __cuda_addSummedNeighborsInARow(double* dataA, const double* dataB, unsigned int elementsA, unsigned int nNeighbors); template void _cuda_addSummedNeighborsInARow(double* dataA, const double* dataB, unsigned int rowsA, unsigned int columnsA, unsigned int nNeighbors); template __global__ void __cuda_addSummedNeighborsInARow(float* dataA, const float* dataB, unsigned int elementsA, unsigned int nNeighbors); template void _cuda_addSummedNeighborsInARow(float* dataA, const float* dataB, unsigned int rowsA, unsigned int columnsA, unsigned int nNeighbors); /* * * addWeighted * */ template<typename T> __global__ void __cuda_addWeighted(T *data, const T *X, const T* weights, unsigned int nRows, unsigned int nColumns){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nRows * nColumns) { unsigned int col = index / nRows; data[index] += X[index] * weights[col]; } } template<typename T> void _cuda_addWeighted(T *data, const T *X, const T* weights, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_addWeighted) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, X, weights, nRows, nColumns); } template __global__ void __cuda_addWeighted<double>(double *data, const double *X, const double* weights, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_addWeighted<float>(float *data, const float *X, const float* weights, unsigned int nRows, unsigned int nColumns); template void _cuda_addWeighted<double>(double *data, const double *X, const double* weights, unsigned int nRows, unsigned int nColumns); template void _cuda_addWeighted<float>(float *data, const float *X, const float* weights, unsigned int nRows, unsigned int nColumns); /* * * elementwise multiplication * */ template<typename T> __global__ void __cuda_elementwiseMultiplication(T *data, T *datab, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = data[index] * datab[index]; } template<typename T> void _cuda_elementwiseMultiplication(T *data, T *datab, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_elementwiseMultiplication) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, datab, nElements); } template __global__ void __cuda_elementwiseMultiplication<double>(double *data, double *datab, unsigned int nElements); template __global__ void __cuda_elementwiseMultiplication<float>(float *data, float *datab, unsigned int nElements); template void _cuda_elementwiseMultiplication<double>(double *data, double *datab, unsigned int nRows, unsigned int nColumns); template void _cuda_elementwiseMultiplication<float>(float *data, float *datab, unsigned int nRows, unsigned int nColumns); /* * * elementwise division * */ template<typename T> __global__ void __cuda_elementwiseDivision(T *data, T *datab, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = data[index] / datab[index]; } template<typename T> void _cuda_elementwiseDivision(T *data, T *datab, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_elementwiseDivision) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, datab, nElements); } template __global__ void __cuda_elementwiseDivision<double>(double *data, double *datab, unsigned int nElements); template __global__ void __cuda_elementwiseDivision<float>(float *data, float *datab, unsigned int nElements); template void _cuda_elementwiseDivision<double>(double *data, double *datab, unsigned int nRows, unsigned int nColumns); template void _cuda_elementwiseDivision<float>(float *data, float *datab, unsigned int nRows, unsigned int nColumns); /* * * rprop Weight Update * */ template<typename T> __global__ void __cuda_rpropUpdate(T *currentValues, T *newGradients, T *oldGradients, T *updateValues, T increasingFactor, T decreasingFactor, T maxUpdateValue, T minUpdateValue, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) { T change = oldGradients[index] * newGradients[index]; if (change > 0) { updateValues[index] = updateValues[index] * increasingFactor; if (updateValues[index] > maxUpdateValue) updateValues[index] = maxUpdateValue; } else if (change < 0) { updateValues[index] = updateValues[index] * decreasingFactor; if (updateValues[index] < minUpdateValue) updateValues[index] = minUpdateValue; } if (newGradients[index] > 0) currentValues[index] = currentValues[index] - updateValues[index]; else if (newGradients[index] < 0) currentValues[index] = currentValues[index] + updateValues[index]; oldGradients[index] = newGradients[index]; } } template<typename T> void _cuda_rpropUpdate(T *currentValues, T *newGradients, T *oldGradients, T *updateValues, T increasingFactor, T decreasingFactor, T maxUpdateValue, T minUpdateValue, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_rpropUpdate) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, currentValues, newGradients, oldGradients, updateValues, increasingFactor, decreasingFactor, maxUpdateValue, minUpdateValue, nElements); } template __global__ void __cuda_rpropUpdate<double>(double *currentValues, double *newGradients, double *oldGradients, double *updateValues, double increasingFactor, double decreasingFactor, double maxUpdateValue, double minUpdateValue, unsigned int nElements); template __global__ void __cuda_rpropUpdate<float>(float *currentValues, float *newGradients, float *oldGradients, float *updateValues, float increasingFactor, float decreasingFactor, float maxUpdateValue, float minUpdateValue, unsigned int nElements); template void _cuda_rpropUpdate<double>(double *currentValues, double *newGradients, double *oldGradients, double *updateValues, double increasingFactor, double decreasingFactor, double maxUpdateValue, double minUpdateValue, unsigned int nRows, unsigned int nColumns); template void _cuda_rpropUpdate<float>(float *currentValues, float *newGradients, float *oldGradients, float *updateValues, float increasingFactor, float decreasingFactor, float maxUpdateValue, float minUpdateValue, unsigned int nRows, unsigned int nColumns); /* * * add constant elementwise * */ template<typename T> __global__ void __cuda_addConstantElementwise(T constant, T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = data[index] + constant; } template<typename T> void _cuda_addConstantElementwise(T constant, T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (T) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_addConstantElementwise) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, constant, data, nElements); } template __global__ void __cuda_addConstantElementwise<double>(double constant, double *data, unsigned int nElements); template void _cuda_addConstantElementwise<double>(double constant, double *data, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_addConstantElementwise<float>(float constant, float *data, unsigned int nElements); template void _cuda_addConstantElementwise<float>(float constant, float *data, unsigned int nRows, unsigned int nColumns); /* * * getMaxOfColumns * */ template<typename T> __global__ void __cuda_getMaxOfColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns){ unsigned int columnIndex = threadIdx.x + blockIdx.x * blockDim.x; if (columnIndex < nColumns){ T result = matrixDevPtr[columnIndex * nRows]; for (unsigned int i = 1; i < nRows; i++){ T val = matrixDevPtr[columnIndex * nRows + i]; result = fmax(result, val); } vectorDevPtr[columnIndex] = result; } } template<typename T> void _cuda_getMaxOfColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns){ // parallelize over columns int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_getMaxOfColumns) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, vectorDevPtr, matrixDevPtr, nRows, nColumns); } template __global__ void __cuda_getMaxOfColumns(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns); template void _cuda_getMaxOfColumns(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_getMaxOfColumns(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns); template void _cuda_getMaxOfColumns(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns); /* * slightly faster version using tmp array */ template<typename T> __global__ void __cuda_getMaxOfColumnsTmp(const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, T *tmpDevPtr, unsigned int tmpRows){ unsigned int columnIndex = threadIdx.x + blockIdx.x * blockDim.x; unsigned int columnPart = blockIdx.y; if (columnIndex < nColumns){ unsigned int nRowsDiv = nRows / tmpRows; unsigned int startRow = columnPart * nRowsDiv; if (startRow < nRows){ unsigned int endRow = columnPart == tmpRows - 1 ? nRows : (columnPart + 1) * nRowsDiv; T result = matrixDevPtr[columnIndex * nRows]; for (unsigned int i = startRow; i < endRow; i++){ // result += matrix(i, columnIndex) T val = matrixDevPtr[columnIndex * nRows + i]; result = fmax(result, val); } tmpDevPtr[columnIndex*tmpRows + columnPart] = result; } } } template<typename T> void _cuda_getMaxOfColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, T *tmpDevPtr, unsigned int tmpRows){ int gridDimx = (int)ceil( (float) nColumns / THREADS_PER_BLOCK); int gridDimy = tmpRows; dim3 gridSize(gridDimx,gridDimy); hipLaunchKernelGGL(( __cuda_getMaxOfColumnsTmp) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, matrixDevPtr, nRows, nColumns, tmpDevPtr, tmpRows); _cuda_getMaxOfColumns<T>(vectorDevPtr, tmpDevPtr, tmpRows, nColumns); } template __global__ void __cuda_getMaxOfColumnsTmp(const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, double *tmpDevPtr, unsigned int tmpRows); template void _cuda_getMaxOfColumns(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, double *tmpDevPtr, unsigned int tmpRows); template __global__ void __cuda_getMaxOfColumnsTmp(const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, float *tmpDevPtr, unsigned int tmpRows); template void _cuda_getMaxOfColumns(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, float *tmpDevPtr, unsigned int tmpRows); /* * * elementwiseMultiplicationWithSigmoidDerivative * */ template<typename T> __global__ void __cuda_elementwiseMultiplicationWithSigmoidDerivative(T *data, T *datab, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = data[index] * (datab[index] * (1 - datab[index])); } template<typename T> void _cuda_elementwiseMultiplicationWithSigmoidDerivative(T *data, T *datab, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_elementwiseMultiplicationWithSigmoidDerivative) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, datab, nElements); } template __global__ void __cuda_elementwiseMultiplicationWithSigmoidDerivative(double *data, double *datab, unsigned int nElements); template void _cuda_elementwiseMultiplicationWithSigmoidDerivative(double *data, double *datab, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_elementwiseMultiplicationWithSigmoidDerivative(float *data, float *datab, unsigned int nElements); template void _cuda_elementwiseMultiplicationWithSigmoidDerivative(float *data, float *datab, unsigned int nRows, unsigned int nColumns); /* * * elementwiseMultiplicationWithTriangleDerivative * */ template<typename T> __global__ void __cuda_elementwiseMultiplicationWithTriangleDerivative(T *data, T *datab, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) { if ((datab[index] < -1.0) || (datab[index] > 1.0) || (datab[index] == 0)) data[index] = 0; else if (datab[index] > 0.0) data[index] = -data[index]; } } template<typename T> void _cuda_elementwiseMultiplicationWithTriangleDerivative(T *data, T *datab, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_elementwiseMultiplicationWithTriangleDerivative) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, datab, nElements); } template __global__ void __cuda_elementwiseMultiplicationWithTriangleDerivative(double *data, double *datab, unsigned int nElements); template void _cuda_elementwiseMultiplicationWithTriangleDerivative(double *data, double *datab, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_elementwiseMultiplicationWithTriangleDerivative(float *data, float *datab, unsigned int nElements); template void _cuda_elementwiseMultiplicationWithTriangleDerivative(float *data, float *datab, unsigned int nRows, unsigned int nColumns); /* * * elementwiseMultiplicationWithTanhDerivative * */ template<typename T> __global__ void __cuda_elementwiseMultiplicationWithTanhDerivative(T *data, T *datab, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = data[index] * (1 - pow(datab[index],2)); } template<typename T> void _cuda_elementwiseMultiplicationWithTanhDerivative(T *data, T *datab, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_elementwiseMultiplicationWithTanhDerivative) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, datab, nElements); } template __global__ void __cuda_elementwiseMultiplicationWithTanhDerivative(double *data, double *datab, unsigned int nElements); template void _cuda_elementwiseMultiplicationWithTanhDerivative(double *data, double *datab, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_elementwiseMultiplicationWithTanhDerivative(float *data, float *datab, unsigned int nElements); template void _cuda_elementwiseMultiplicationWithTanhDerivative(float *data, float *datab, unsigned int nRows, unsigned int nColumns); /* * * multiplicationWithSoftmaxDerivative * */ template<typename T> __global__ void __cuda_multiplicationWithSoftmaxDerivative(T *data, T *datab, T *datac, unsigned int nElements, unsigned int nRows){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = datab[index] * (data[index] - datac[index/nRows]); } template<typename T> void _cuda_multiplicationWithSoftmaxDerivative(T *data, T *datab, T *datac, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_multiplicationWithSoftmaxDerivative) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, datab, datac, nElements, nRows); } template __global__ void __cuda_multiplicationWithSoftmaxDerivative(double *data, double *datab, double *datac, unsigned int nElements, unsigned int nRows); template void _cuda_multiplicationWithSoftmaxDerivative(double *data, double *datab, double *datac, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_multiplicationWithSoftmaxDerivative(float *data, float *datab, float *datac, unsigned int nElements, unsigned int nRows); template void _cuda_multiplicationWithSoftmaxDerivative(float *data, float *datab, float *datac, unsigned int nRows, unsigned int nColumns); /* * elementwiseMultiplicationWithClippedDerivative * */ template <typename T> __global__ void __cuda_elementwiseMultiplicationWithClippedDerivative(T *errOut, T *activations, unsigned int nElements, T thresholdLeft, T thresholdRight){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) { if ((activations[index] <= thresholdLeft) || (activations[index] >= thresholdRight)) errOut[index] = 0; } } template <typename T> void _cuda_elementwiseMultiplicationWithClippedDerivative(T *data, T *datab, unsigned int nRows, unsigned int nColumns, T thresholdLeft, T thresholdRight) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_elementwiseMultiplicationWithClippedDerivative<T>) , dim3(gridSize), dim3(THREADS_PER_BLOCK), 0, 0, data, datab, nElements, thresholdLeft, thresholdRight); } template __global__ void __cuda_elementwiseMultiplicationWithClippedDerivative<float>(float*, float*, unsigned int, float, float); template __global__ void __cuda_elementwiseMultiplicationWithClippedDerivative<double>(double*, double*, unsigned int, double, double); template void _cuda_elementwiseMultiplicationWithClippedDerivative<float>(float*, float*, unsigned int, unsigned int, float, float); template void _cuda_elementwiseMultiplicationWithClippedDerivative<double>(double*, double*, unsigned int, unsigned int, double, double); /* * elementwiseMultiplicationWithSignedPowDerivative * */ template <typename T> __global__ void __cuda_elementwiseMultiplicationWithSignedPowDerivative(T *errOut, T *activations, unsigned int nElements, T p){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) { if (activations[index] == 0) errOut[index] = 0; else if (activations[index] < 0) errOut[index] *= p * pow(-activations[index], p - 1); else errOut[index] *= p * pow(activations[index], p - 1); } } template <typename T> void _cuda_elementwiseMultiplicationWithSignedPowDerivative(T *data, T *datab, unsigned int nRows, unsigned int nColumns, T p) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_elementwiseMultiplicationWithSignedPowDerivative<T>) , dim3(gridSize), dim3(THREADS_PER_BLOCK), 0, 0, data, datab, nElements, p); } template __global__ void __cuda_elementwiseMultiplicationWithSignedPowDerivative<float>(float*, float*, unsigned int, float); template __global__ void __cuda_elementwiseMultiplicationWithSignedPowDerivative<double>(double*, double*, unsigned int, double); template void _cuda_elementwiseMultiplicationWithSignedPowDerivative<float>(float*, float*, unsigned int, unsigned int, float); template void _cuda_elementwiseMultiplicationWithSignedPowDerivative<double>(double*, double*, unsigned int, unsigned int, double); /* * elementwiseMultiplicationWithLogDerivative * */ template <typename T> __global__ void __cuda_elementwiseMultiplicationWithLogDerivative(T *errOut, T *activations, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) errOut[index] *= exp(-activations[index]); } template <typename T> void _cuda_elementwiseMultiplicationWithLogDerivative(T *data, T *datab, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_elementwiseMultiplicationWithLogDerivative<T>) , dim3(gridSize), dim3(THREADS_PER_BLOCK), 0, 0, data, datab, nElements); } template __global__ void __cuda_elementwiseMultiplicationWithLogDerivative<float>(float*, float*, unsigned int); template __global__ void __cuda_elementwiseMultiplicationWithLogDerivative<double>(double*, double*, unsigned int); template void _cuda_elementwiseMultiplicationWithLogDerivative<float>(float*, float*, unsigned int, unsigned int); template void _cuda_elementwiseMultiplicationWithLogDerivative<double>(double*, double*, unsigned int, unsigned int); /* * * multiplicationWithL2NormalizationDerivative * */ template<typename T> __global__ void __cuda_multiplicationWithL2NormalizationDerivative(T *data, T *datab, T *datac, T *datad, unsigned int nElements, unsigned int nRows){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = (data[index] - datab[index] * datac[index/nRows]) / datad[index/nRows]; } template<typename T> void _cuda_multiplicationWithL2NormalizationDerivative(T *data, T *datab, T *datac, T *datad, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_multiplicationWithL2NormalizationDerivative) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, datab, datac, datad, nElements, nRows); } template __global__ void __cuda_multiplicationWithL2NormalizationDerivative(double *data, double *datab, double *datac, double *datad, unsigned int nElements, unsigned int nRows); template void _cuda_multiplicationWithL2NormalizationDerivative(double *data, double *datab, double *datac, double *datad, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_multiplicationWithL2NormalizationDerivative(float *data, float *datab, float *datac, float *datad, unsigned int nElements, unsigned int nRows); template void _cuda_multiplicationWithL2NormalizationDerivative(float *data, float *datab, float *datac, float *datad, unsigned int nRows, unsigned int nColumns); /* * * addToAllColumns * */ template<typename T> __global__ void __cuda_addToAllColumns(T *data, T *datab, unsigned int nElements, unsigned int nRows, T alpha){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] += alpha * datab[index%nRows]; } template<typename T> void _cuda_addToAllColumns(T *data, T *datab, unsigned int nRows, unsigned int nColumns, T alpha) { // TODO implement kernel without % operator (slow on GPU) unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_addToAllColumns) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, datab, nElements, nRows, alpha); } template __global__ void __cuda_addToAllColumns<double>(double *data, double *datab, unsigned int nElements, unsigned int nRows, double alpha); template void _cuda_addToAllColumns<double>(double *data, double *datab, unsigned int nRows, unsigned int nColumns, double alpha); template __global__ void __cuda_addToAllColumns<float>(float *data, float *datab, unsigned int nElements, unsigned int nRows, float alpha); template void _cuda_addToAllColumns<float>(float *data, float *datab, unsigned int nRows, unsigned int nColumns, float alpha); /* * addToAllChannels * Adds one element of vector to one channel */ template<typename T> __global__ void __cuda_addToAllChannels(T *mat, T *vec, unsigned int channels, unsigned int nRows, unsigned int nElements, T alpha) { unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < nElements) { unsigned int channelSize = nRows / channels; mat[index] += alpha * vec[(index%nRows)/channelSize]; } } template<typename T> void _cuda_addToAllChannels(T *mat, T *vec, unsigned int channels, unsigned int nRows, unsigned int nColumns, T alpha) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_addToAllChannels), dim3(gridSize), dim3(THREADS_PER_BLOCK), 0, 0, mat, vec, channels, nRows, nElements, alpha); } template __global__ void __cuda_addToAllChannels(double *mat, double *vec, unsigned int channels, unsigned int nRows, unsigned int nElements, double alpha); template __global__ void __cuda_addToAllChannels(float *mat, float *vec, unsigned int channels, unsigned int nRows, unsigned int nElements, float alpha); template void _cuda_addToAllChannels(double *mat, double *vec, unsigned int channels, unsigned int nRows, unsigned int nColumns, double alpha); template void _cuda_addToAllChannels(float *mat, float *vec, unsigned int channels, unsigned int nRows, unsigned int nColumns, float alpha); /* * * addToAllRows * */ template<typename T> __global__ void __cuda_addToAllRows(T *data, T *datab, unsigned int nElements, unsigned int nRows, T alpha){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] += alpha * datab[index/nRows]; } template<typename T> void _cuda_addToAllRows(T *data, T *datab, unsigned int nRows, unsigned int nColumns, T alpha) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_addToAllRows) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, datab, nElements, nRows, alpha); } template __global__ void __cuda_addToAllRows<double>(double *data, double *datab, unsigned int nElements, unsigned int nRows, double alpha); template void _cuda_addToAllRows<double>(double *data, double *datab, unsigned int nRows, unsigned int nColumns, double alpha); template __global__ void __cuda_addToAllRows<float>(float *data, float *datab, unsigned int nElements, unsigned int nRows, float alpha); template void _cuda_addToAllRows<float>(float *data, float *datab, unsigned int nRows, unsigned int nColumns, float alpha); /* * * multiplyColumnsByScalars * */ template<typename T> __global__ void __cuda_multiplyColumnsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; unsigned int colIndex = index / nRows; if (index < nElements) matrixDevPtr[index] = matrixDevPtr[index] * vectorDevPtr[colIndex]; } template<typename T> void _cuda_multiplyColumnsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nColumns){ // TODO parallelization without mod operator (mod is slow on GPU) unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_multiplyColumnsByScalars) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, vectorDevPtr, matrixDevPtr, nRows, nElements); } template __global__ void __cuda_multiplyColumnsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows, unsigned int nElements); template void _cuda_multiplyColumnsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_multiplyColumnsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nElements); template void _cuda_multiplyColumnsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nColumns); /* * * divideColumnsByScalars * */ template<typename T> __global__ void __cuda_divideColumnsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; unsigned int colIndex = index / nRows; if (index < nElements) matrixDevPtr[index] = matrixDevPtr[index] / vectorDevPtr[colIndex]; } template<typename T> void _cuda_divideColumnsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nColumns){ // TODO parallelization without mod operator (mod is slow on GPU) unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_divideColumnsByScalars) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, vectorDevPtr, matrixDevPtr, nRows, nElements); } template __global__ void __cuda_divideColumnsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows, unsigned int nElements); template void _cuda_divideColumnsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_divideColumnsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nElements); template void _cuda_divideColumnsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nColumns); /* * * multiplyRowsByScalars * */ template<typename T> __global__ void __cuda_multiplyRowsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; unsigned int rowIndex = index % nRows; if (index < nElements) matrixDevPtr[index] = matrixDevPtr[index] * vectorDevPtr[rowIndex]; } template<typename T> void _cuda_multiplyRowsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nColumns){ // TODO parallelization without mod operator (mod is slow on GPU) unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_multiplyRowsByScalars) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, vectorDevPtr, matrixDevPtr, nRows, nElements); } template __global__ void __cuda_multiplyRowsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows,unsigned int nElements); template void _cuda_multiplyRowsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_multiplyRowsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nElements); template void _cuda_multiplyRowsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nColumns); /* * * divideRowsByScalars * */ template<typename T> __global__ void __cuda_divideRowsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; unsigned int rowIndex = index % nRows; if (index < nElements) matrixDevPtr[index] = matrixDevPtr[index] / vectorDevPtr[rowIndex]; } template<typename T> void _cuda_divideRowsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nColumns){ // TODO parallelization without mod operator (mod is slow on GPU) unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_divideRowsByScalars) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, vectorDevPtr, matrixDevPtr, nRows, nElements); } template __global__ void __cuda_divideRowsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows,unsigned int nElements); template void _cuda_divideRowsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_divideRowsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows,unsigned int nElements); template void _cuda_divideRowsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nColumns); /* * * fill * */ template<typename T> __global__ void __cuda_fill(T *data, T value, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = value; } template<typename T> void _cuda_fill(T *data, T value, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_fill) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, value, nElements); } template __global__ void __cuda_fill<double>(double *data, double value, unsigned int nElements); template void _cuda_fill<double>(double *data, double value, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_fill<float>(float *data, float value, unsigned int nElements); template void _cuda_fill<float>(float *data, float value, unsigned int nRows, unsigned int nColumns); /* * * Average Pooling * */ template<typename T> __global__ void __cuda_avgPool(const T *source, T *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride, const T minValue) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; unsigned int resultWidth = ceil((float)sourceWidth/stride); unsigned int resultHeight = ceil((float)sourceHeight/stride); unsigned int resultRows = resultWidth * resultHeight * sourceChannels; if(index < (resultRows * sourceColumns)) { int imageNum = index / resultRows; int resultPixelNum = index % resultRows; int channelNum = resultPixelNum / (resultWidth * resultHeight); resultPixelNum %= (resultWidth * resultHeight); int resultPixelX = resultPixelNum / resultHeight; int resultPixelY = resultPixelNum % resultHeight; int sourcePixelX = resultPixelX * stride; int sourcePixelY = resultPixelY * stride; T sum = 0; T num = 0; int index = -1; for(int i=sourcePixelX; (i<(sourcePixelX+poolSize)) && (i<sourceWidth); i++) { for(int j=sourcePixelY; (j<(sourcePixelY + poolSize)) && (j<sourceHeight); j++) { index = imageNum * sourceRows + channelNum * (sourceWidth * sourceHeight) + i * sourceHeight + j; sum += source[index]; num += 1; } } int resultIndex = imageNum * resultRows + channelNum * (resultWidth * resultHeight) + resultPixelX * resultHeight + resultPixelY; result[resultIndex] = sum / (poolSize * poolSize);//num; } } template<typename T> void _cuda_avgPool(const T *source, T *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride) { unsigned int nElements = (int)ceil((float)sourceWidth/stride) * (int)ceil((float)sourceHeight/stride) * sourceChannels * sourceColumns; int gridSize = (int)ceil((float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_avgPool), dim3(gridSize), dim3(THREADS_PER_BLOCK), 0, 0, source, result, sourceRows, sourceColumns, sourceWidth, sourceHeight, sourceChannels, poolSize, stride, std::numeric_limits<T>::min()); } template __global__ void __cuda_avgPool(const double *source, double *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride, double minValue); template __global__ void __cuda_avgPool(const float *source, float *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride, float minValue); template void _cuda_avgPool(const double *source, double *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); template void _cuda_avgPool(const float *source, float *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); /* * * Avg Pooling Backpropogation * */ template<typename T> __global__ void __cuda_backPropogateAvgPool(T *result, const T *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; unsigned int errorSignalWidth = ceil((double)sourceWidth/stride); unsigned int errorSignalHeight = ceil((double)sourceHeight/stride); unsigned int errorSignalRows = errorSignalWidth * errorSignalHeight * sourceChannels; if(index < (sourceRows * sourceColumns)) { int imageNum = index / sourceRows; int imagePixelNum = index % sourceRows; int channel = imagePixelNum / (sourceWidth * sourceHeight); imagePixelNum %= (sourceWidth * sourceHeight); int pixelX = imagePixelNum / sourceHeight; int pixelY = imagePixelNum % sourceHeight; int indexInErrorSignal = -1; //calculates start of the first grid containing current Pixel unsigned int gridStartX = (pixelX + 1 - (int)poolSize) < 0 ? 0 : (unsigned int)(ceil((float)(pixelX + 1 - (int)poolSize)/(float)stride) * stride); unsigned int gridStartY = (pixelY + 1 - (int)poolSize) < 0 ? 0 : (unsigned int)(ceil((float)(pixelY + 1 - (int)poolSize)/(float)stride) * stride); ////////////////////////////////// for(unsigned int gridX=gridStartX; gridX<=pixelX; gridX+=stride) { for(unsigned int gridY=gridStartY; gridY<=pixelY; gridY+=stride) { indexInErrorSignal = imageNum * errorSignalRows + channel * errorSignalHeight * errorSignalWidth + (gridX/stride) * errorSignalHeight + (gridY/stride); result[index] += errorSignal[indexInErrorSignal] / (T)(poolSize * poolSize); } } } } template<typename T> void _cuda_backPropogateAvgPool(T *result, const T *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride) { unsigned int nElements = sourceRows * sourceColumns; int gridSize = (int)ceil((float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_backPropogateAvgPool), dim3(gridSize), dim3(THREADS_PER_BLOCK), 0, 0, result, errorSignal, sourceRows, sourceColumns, sourceWidth, sourceHeight, sourceChannels, poolSize, stride); } template __global__ void __cuda_backPropogateAvgPool(double *result, const double *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); template __global__ void __cuda_backPropogateAvgPool(float *result, const float *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); template void _cuda_backPropogateAvgPool(double *result, const double *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); template void _cuda_backPropogateAvgPool(float *result, const float *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); /* * * Max Pooling * */ template<typename T> __global__ void __cuda_maxPool(const T *source, T *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride, const T minValue) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; unsigned int resultWidth = ceil((float)sourceWidth/stride); unsigned int resultHeight = ceil((float)sourceHeight/stride); unsigned int resultRows = resultWidth * resultHeight * sourceChannels; if(index < (resultRows * sourceColumns)) { int imageNum = index / resultRows; int resultPixelNum = index % resultRows; int channelNum = resultPixelNum / (resultWidth * resultHeight); resultPixelNum %= (resultWidth * resultHeight); int resultPixelX = resultPixelNum / resultHeight; int resultPixelY = resultPixelNum % resultHeight; int sourcePixelX = resultPixelX * stride; int sourcePixelY = resultPixelY * stride; T maxValue = minValue; int index = -1; for(int i=sourcePixelX; (i<(sourcePixelX+poolSize)) && (i<sourceWidth); i++) { for(int j=sourcePixelY; (j<(sourcePixelY + poolSize)) && (j<sourceHeight); j++) { index = imageNum * sourceRows + channelNum * (sourceWidth * sourceHeight) + i * sourceHeight + j; if(source[index] >= maxValue) { maxValue = source[index]; } } } int resultIndex = imageNum * resultRows + channelNum * (resultWidth * resultHeight) + resultPixelX * resultHeight + resultPixelY; result[resultIndex] = maxValue; } } template<typename T> void _cuda_maxPool(const T *source, T *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride) { unsigned int nElements = (int)ceil((float)sourceWidth/stride) * (int)ceil((float)sourceHeight/stride) * sourceChannels * sourceColumns; int gridSize = (int)ceil((float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_maxPool), dim3(gridSize), dim3(THREADS_PER_BLOCK), 0, 0, source, result, sourceRows, sourceColumns, sourceWidth, sourceHeight, sourceChannels, poolSize, stride, std::numeric_limits<T>::min()); } template __global__ void __cuda_maxPool(const double *source, double *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride, double minValue); template __global__ void __cuda_maxPool(const float *source, float *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride, float minValue); template void _cuda_maxPool(const double *source, double *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); template void _cuda_maxPool(const float *source, float *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); /* * * Max Pooling Backpropogation * */ template<typename T> __global__ void __cuda_backPropogateMaxPool(T *result, const T* activationIn, const T* activationOut, const T *errorSignal,const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; unsigned int errorSignalWidth = ceil((double)sourceWidth/stride); unsigned int errorSignalHeight = ceil((double)sourceHeight/stride); unsigned int errorSignalRows = errorSignalWidth * errorSignalHeight * sourceChannels; if(index < (sourceRows * sourceColumns)) { int imageNum = index / sourceRows; int imagePixelNum = index % sourceRows; int channel = imagePixelNum / (sourceWidth * sourceHeight); imagePixelNum %= (sourceWidth * sourceHeight); int pixelX = imagePixelNum / sourceHeight; int pixelY = imagePixelNum % sourceHeight; int indexInErrorSignal = -1; int numMaxima = 0; //calculates start of the first grid containing current Pixel unsigned int gridStartX = (pixelX + 1 - (int)poolSize) < 0 ? 0 : (unsigned int)(ceil((float)(pixelX + 1 - (int)poolSize)/(float)stride) * stride); unsigned int gridStartY = (pixelY + 1 - (int)poolSize) < 0 ? 0 : (unsigned int)(ceil((float)(pixelY + 1 - (int)poolSize)/(float)stride) * stride); ////////////////////////////////// for(unsigned int gridX=gridStartX; gridX<=pixelX; gridX+=stride) { for(unsigned int gridY=gridStartY; gridY<=pixelY; gridY+=stride) { indexInErrorSignal = imageNum * errorSignalRows + channel * errorSignalHeight * errorSignalWidth + (gridX/stride) * errorSignalHeight + (gridY/stride); //current pixel is not maximum in current window if(activationIn[index] != activationOut[indexInErrorSignal]) break; numMaxima = 0; for(unsigned int i=gridX; (i<(gridX + poolSize)) && i<sourceWidth; i++) { for(unsigned int j=gridY;(j<(gridY+poolSize)) && j<sourceHeight; j++) { int indexInActivationIn = imageNum * sourceRows + channel * sourceHeight * sourceWidth + i * sourceHeight + j; if(activationIn[indexInActivationIn] == activationOut[indexInErrorSignal]) { numMaxima += 1; } } } result[index] += errorSignal[indexInErrorSignal] / (T) numMaxima; } } } } template<typename T> void _cuda_backPropogateMaxPool(T *result, const T* activationIn, const T* activationOut, const T *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride) { unsigned int nElements = sourceRows * sourceColumns; int gridSize = (int)ceil((float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_backPropogateMaxPool), dim3(gridSize), dim3(THREADS_PER_BLOCK), 0, 0, result, activationIn, activationOut, errorSignal, sourceRows, sourceColumns, sourceWidth, sourceHeight, sourceChannels, poolSize, stride); } template __global__ void __cuda_backPropogateMaxPool(double *result, const double* activationIn, const double* activationOut, const double *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); template __global__ void __cuda_backPropogateMaxPool(float *result, const float* activationIn, const float* activationOut, const float *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); template void _cuda_backPropogateMaxPool(double *result, const double* activationIn, const double* activationOut, const double *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); template void _cuda_backPropogateMaxPool(float *result, const float* activationIn, const float* activationOut, const float *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); /* * * ensure minimal value * */ template<typename T> __global__ void __cuda_ensureMinimalValue(T *data, T value, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if ((index < nElements) && (data[index] < value)) data[index] = value; } template<typename T> void _cuda_ensureMinimalValue(T *data, T value, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_ensureMinimalValue) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, value, nElements); } template __global__ void __cuda_ensureMinimalValue(double *data, double value, unsigned int nElements); template void _cuda_ensureMinimalValue(double *data, double value, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_ensureMinimalValue(float *data, float value, unsigned int nElements); template void _cuda_ensureMinimalValue(float *data, float value, unsigned int nRows, unsigned int nColumns); /* * * ensure maximal value * */ template<typename T> __global__ void __cuda_ensureMaximalValue(T *data, T value, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if ((index < nElements) && (data[index] > value)) data[index] = value; } template<typename T> void _cuda_ensureMaximalValue(T *data, T value, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_ensureMaximalValue) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, value, nElements); } template __global__ void __cuda_ensureMaximalValue(double *data, double value, unsigned int nElements); template void _cuda_ensureMaximalValue(double *data, double value, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_ensureMaximalValue(float *data, float value, unsigned int nElements); template void _cuda_ensureMaximalValue(float *data, float value, unsigned int nRows, unsigned int nColumns); /* * * prepares for convolution * */ template<typename T> __global__ void __cuda_prepareConvolution(T* dest, const T* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < destRows * destCols) { int imageNum = index / destRows; int resultIndex = index % destRows; int kernelMiddleX = kernelWidth / 2; int kernelMiddleY = kernelHeight / 2; int heightOfOneDestCh = (int)ceil((float)(sourceHeight - kernelHeight + 1) / (float)strideY); int pixelNum = resultIndex / (kernelHeight * kernelWidth * sourceChannels); int pixelX = (pixelNum / heightOfOneDestCh) * strideX + kernelMiddleX; int pixelY = (pixelNum % heightOfOneDestCh) * strideY + kernelMiddleY; int channelNum = resultIndex % (kernelHeight * kernelWidth * sourceChannels); int neighbNum = channelNum % (kernelHeight * kernelWidth); channelNum = channelNum / (kernelWidth * kernelHeight); int neighX = (neighbNum / kernelHeight) - kernelMiddleX; int neighY = (neighbNum % kernelHeight) - kernelMiddleY; dest[index] = source[imageNum * (sourceChannels * sourceWidth * sourceHeight) + channelNum * (sourceWidth * sourceHeight) + (pixelX + neighX) * sourceHeight + (pixelY + neighY)]; } } template<typename T> void _cuda_prepareConvolution(T* dest, const T* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY) { unsigned int nElements = destRows * destCols; int gridSize = (int)ceil((float)nElements/ THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_prepareConvolution), dim3(gridSize), dim3(THREADS_PER_BLOCK), 0, 0, dest, source, sourceWidth, sourceHeight, sourceChannels, kernelWidth, kernelHeight, destRows, destCols, strideX, strideY); } template __global__ void __cuda_prepareConvolution(double* dest, const double* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); template __global__ void __cuda_prepareConvolution(float* dest, const float* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); template void _cuda_prepareConvolution(double* dest, const double* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); template void _cuda_prepareConvolution(float* dest, const float* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); /* * Backpropogation convolution * */ template<typename T> __global__ void __cuda_prepareConvolutionBackProp(T* dest, const T* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < destRows * destCols) { dest[index] = 0; int img = index / destRows; int ch = (index % destRows) / (destWidth * destHeight); int pixelNum = (index % destRows) % (destWidth * destHeight); int pixelX = pixelNum / destHeight; int pixelY = pixelNum % destHeight; int gridStartX = (pixelX + 1 - (int)kernelWidth) <= 0 ? 0 : (pixelX + 1 - (int)kernelWidth); int gridStartY = (pixelY + 1 - (int)kernelHeight) <= 0 ? 0 : (pixelY + 1 - (int)kernelHeight); int sourceHeight = (destHeight - (int)kernelHeight + 1); int sizeOfOneChSource = sourceHeight * (destWidth - (int)kernelWidth + 1); int neighNum = 0; for(int j=gridStartX; (j<=pixelX) && ((j + kernelWidth) <= destWidth); j++) { for(int k=gridStartY; (k<=pixelY) && ((k + kernelHeight) <= destHeight) ; k++) { // (Cx, Cy) = (j + kernelMiddleX, k + kernelMiddleY) are coordinates of center pixel in grid // (Rx, Ry) = (Cx - pixelX, Cy - pixelY) gives coordinates of pixel in refernce // to center pixel, such that center pixel of grid is mapped is mapped to (0,0) neighNum = (pixelX - j) * kernelHeight + (pixelY - k); //(j * sourceHeight + k) is pixel number of center of grid in source //i.e result of convolution dest[index] += source[img * sizeOfOneChSource * destChannels * kernelWidth * kernelHeight + (j * sourceHeight + k) * destChannels * kernelWidth * kernelHeight + ch * kernelWidth * kernelHeight + neighNum]; } } } } template<typename T> void _cuda_prepareConvolutionBackProp(T* dest, const T* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols) { unsigned int nElements = destRows * destCols; int gridSize = (int)ceil((float)nElements/ THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_prepareConvolutionBackProp), dim3(gridSize), dim3(THREADS_PER_BLOCK), 0, 0, dest, source, destWidth, destHeight, destChannels, kernelWidth, kernelHeight, destRows, destCols); } template void _cuda_prepareConvolutionBackProp(double* dest, const double* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols); template void _cuda_prepareConvolutionBackProp(float* dest, const float* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols); template __global__ void __cuda_prepareConvolutionBackProp(double* dest, const double* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols); template __global__ void __cuda_prepareConvolutionBackProp(float* dest, const float* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols); /* * prepare for convolution such that after convolution image size stays same * * */ template<typename T> __global__ void __cuda_prepareConvolutionSame(T* dest, const T* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < destRows * destCols) { int destWidth = (int)ceil((float)sourceWidth / (float)strideX); int destHeight = (int)ceil((float)sourceHeight / (float)strideY); int imageNum = index / destRows; int resultIndex = index % destRows; int kernelMiddleX = kernelWidth / 2; int kernelMiddleY = kernelHeight / 2; int pixelNum = resultIndex / (kernelHeight * kernelWidth * sourceChannels); int pixelX = (pixelNum / destHeight) * strideX; int pixelY = (pixelNum % destHeight) * strideY; int channelNum = resultIndex % (kernelHeight * kernelWidth * sourceChannels); int neighbNum = channelNum % (kernelHeight * kernelWidth); channelNum = channelNum / (kernelWidth * kernelHeight); int neighX = (neighbNum / kernelHeight) - kernelMiddleX; int neighY = (neighbNum % kernelHeight) - kernelMiddleY; dest[index] = ( (pixelX + neighX) < 0 || (pixelY + neighY) < 0 || (pixelX + neighX) >= sourceWidth || (pixelY + neighY) >= sourceHeight) ? 0 : source[imageNum * (sourceChannels * sourceWidth * sourceHeight) + channelNum * (sourceWidth * sourceHeight) + (pixelX + neighX) * sourceHeight + (pixelY + neighY)]; } } template<typename T> void _cuda_prepareConvolutionSame(T* dest, const T* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY) { unsigned int nElements = destRows * destCols; int gridSize = (int)ceil((float)nElements/ THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_prepareConvolutionSame), dim3(gridSize), dim3(THREADS_PER_BLOCK), 0, 0, dest, source, sourceWidth, sourceHeight, sourceChannels, kernelWidth, kernelHeight, destRows, destCols, strideX, strideY); } template __global__ void __cuda_prepareConvolutionSame(double* dest, const double* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); template __global__ void __cuda_prepareConvolutionSame(float* dest, const float* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); template void _cuda_prepareConvolutionSame(double* dest, const double* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); template void _cuda_prepareConvolutionSame(float* dest, const float* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); template<typename T> __global__ void __cuda_prepareConvolutionSameBackProp(T* dest, const T* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < (destRows * destCols)) { dest[index] = 0; int img = index / destRows; // destRows = destWidth * destHeight * destChannels int ch = (index % destRows) / (destWidth * destHeight); int pixelNum = (index % destRows) % (destWidth * destHeight); int pixelX = pixelNum / destHeight; int pixelY = pixelNum % destHeight; int kernelMiddleX = (int)kernelWidth / 2; int kernelMiddleY = (int)kernelHeight / 2; int gridStartX = (pixelX + 1 - (int)kernelWidth) <= (-1 * kernelMiddleX) ? (-1 * kernelMiddleX) : (pixelX + 1 - (int)kernelWidth); int gridStartY = (pixelY + 1 - (int)kernelHeight) <= (-1 * kernelMiddleY) ? (-1 * kernelMiddleY) : (pixelY + 1 - (int)kernelHeight); for(int gridX=gridStartX; (gridX <= pixelX) && ((gridX + kernelMiddleX) < destWidth) ; gridX++) { if (((gridX + kernelMiddleX) % strideX) == 0) { for(int gridY=gridStartY; (gridY <= pixelY) && ((gridY + kernelMiddleY) < destHeight); gridY++) { if (((gridY + kernelMiddleY) % strideY) == 0) { int neighNum = (pixelX - gridX) * kernelHeight + (pixelY - gridY); int centerPixel = (((gridX + kernelMiddleX) / strideX) * destHeight / strideY) + (gridY + kernelMiddleY) / strideY; dest[index] += source[img * destChannels * (destWidth / strideX) * (destHeight / strideY) * kernelWidth * kernelHeight + centerPixel * destChannels * kernelWidth * kernelHeight + ch * kernelWidth * kernelHeight + neighNum]; } } } } } } template<typename T> void _cuda_prepareConvolutionSameBackProp(T* dest, const T* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY) { unsigned int nElements = destRows * destCols; int gridSize = (int)ceil((float)nElements/ THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_prepareConvolutionSameBackProp), dim3(gridSize), dim3(THREADS_PER_BLOCK), 0, 0, dest, source, destWidth, destHeight, destChannels, kernelWidth, kernelHeight, destRows, destCols, strideX, strideY); } template void _cuda_prepareConvolutionSameBackProp(double* dest, const double* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); template void _cuda_prepareConvolutionSameBackProp(float* dest, const float* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); template __global__ void __cuda_prepareConvolutionSameBackProp(double* dest, const double* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); template __global__ void __cuda_prepareConvolutionSameBackProp(float* dest, const float* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); /* * rearrange * * helper for convolution */ template<typename T> __global__ void __cuda_rearrange(T *dest, const T *source, const unsigned int sourceRows, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < (destRows * destColumns)) { unsigned int img = index / (sourceRows * destNumPixels); unsigned int ch = (index % (sourceRows * destNumPixels)) / destNumPixels; unsigned int pix = (index % (sourceRows * destNumPixels)) % destNumPixels; dest[index] = source[sourceRows * (img * destNumPixels + pix) + ch]; } } template<typename T> void _cuda_rearrange(T *dest, const T *source, const unsigned int sourceRows, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels) { unsigned int nElements = destColumns * destRows; int gridSize = (int)ceil((float)nElements/ THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_rearrange), dim3(gridSize), dim3(THREADS_PER_BLOCK), 0, 0, dest, source, sourceRows, destRows, destColumns, destNumPixels); } template __global__ void __cuda_rearrange(double *dest, const double *source, const unsigned int sourceRows, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels); template __global__ void __cuda_rearrange(float *dest, const float *source, const unsigned int sourceRows, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels); template void _cuda_rearrange(double *dest, const double *source, const unsigned int sourceRows, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels); template void _cuda_rearrange(float *dest, const float *source, const unsigned int sourceRows, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels); /* * * Rearrange back propogation * */ template<typename T> __global__ void __cuda_rearrangeBackProp(T *dest, const T *source, const unsigned int sourceColumns, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < (destRows * destColumns)) { unsigned int img = index / (destNumPixels * destRows); unsigned int pix = (index % (destNumPixels * destRows)) / destRows; unsigned int ch = (index % (destNumPixels * destRows)) % destRows; dest[index] = source[img*(destRows * destNumPixels) + ch * destNumPixels + pix]; } } template<typename T> void _cuda_rearrangeBackProp(T *dest, const T *source, const unsigned int sourceColumns, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels) { unsigned int nElements = destRows * destColumns; int gridSize = (int)ceil((float)nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_rearrangeBackProp), dim3(gridSize), dim3(THREADS_PER_BLOCK), 0, 0, dest, source, sourceColumns, destRows, destColumns, destNumPixels); } template __global__ void __cuda_rearrangeBackProp(double *dest, const double *source, const unsigned int sourceColumns, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels); template __global__ void __cuda_rearrangeBackProp(float *dest, const float *source, const unsigned int sourceColumns, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels); template void _cuda_rearrangeBackProp(double *dest, const double *source, const unsigned int sourceColumns, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels); template void _cuda_rearrangeBackProp(float *dest, const float *source, const unsigned int sourceColumns, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels); /* * * argMax * * */ template<typename T> __global__ void __cuda_argMax(T *matrixPtr, unsigned int nRows, unsigned int nColumns, unsigned int *resultDevPtr){ unsigned int column= threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumns){ int beginCol = column * nRows; T maxVal = matrixPtr[beginCol]; resultDevPtr[column] = 0; for (int i = 1; i < nRows; i++){ T val = matrixPtr[beginCol + i]; if (val > maxVal){ maxVal = val; resultDevPtr[column] = i; } } } } template<typename T> void _cuda_argMax(T *matrixPtr, unsigned int nRows, unsigned int nColumns, unsigned int *resultDevPtr) { // parallelization over columns only int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_argMax) , dim3(gridSize), dim3(THREADS_PER_BLOCK), 0, 0, matrixPtr, nRows, nColumns, resultDevPtr); } template __global__ void __cuda_argMax<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, unsigned int *resultDevPtr); template void _cuda_argMax<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, unsigned int *resultDevPtr); template __global__ void __cuda_argMax<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, unsigned int *resultDevPtr); template void _cuda_argMax<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, unsigned int *resultDevPtr); /* * * max * set max per column to 1.0, all other to 0.0 * */ template<typename T> __global__ void __cuda_max(T *devResult, unsigned int nRows, unsigned int nColumns){ unsigned int column = threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumns) { unsigned int argMax = 0; T max = devResult[column * nRows]; for (int i = 0; i < nRows; i++) { if (devResult[column * nRows + i] > max) { max = devResult[column * nRows + i]; argMax = i; } devResult[column * nRows + i] = 0.0; } devResult[column * nRows + argMax] = 1.0; } } template<typename T> void _cuda_max(T *devResult, unsigned int nRows, unsigned int nColumns) { int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_max) , dim3(gridSize), dim3(THREADS_PER_BLOCK), 0, 0, devResult, nRows, nColumns); } template __global__ void __cuda_max<double>(double *devResult, unsigned int nRows, unsigned int nColumns); template void _cuda_max<double>(double *devResult, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_max<float>(float *devResult, unsigned int nRows, unsigned int nColumns); template void _cuda_max<float>(float *devResult, unsigned int nRows, unsigned int nColumns); /* * * max * * */ template<typename T> __global__ void __cuda_max(T *devResult, const T *devA, const T *devB, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) { if (devA[index] < devB[index]) devResult[index] = devB[index]; else devResult[index] = devA[index]; } } template<typename T> void _cuda_max(T *devResult, const T *devA, const T *devB, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_max) , dim3(gridSize), dim3(THREADS_PER_BLOCK), 0, 0, devResult, devA, devB, nElements); } template __global__ void __cuda_max<double>(double *devResult, const double *devA, const double *devB, unsigned int nElements); template void _cuda_max<double>(double *devResult, const double *devA, const double *devB, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_max<float>(float *devResult, const float *devA, const float *devB, unsigned int nElements); template void _cuda_max<float>(float *devResult, const float *devA, const float *devB, unsigned int nRows, unsigned int nColumns); /* * * elementwiseMultiplicationWithKroneckerDelta * * */ template<typename T> __global__ void __cuda_elementwiseMultiplicationWithKroneckerDelta(T *devResult, const T *devA, const T *devB, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) { if (devA[index] != devB[index]) devResult[index] = 0; } } template<typename T> void _cuda_elementwiseMultiplicationWithKroneckerDelta(T *devResult, const T *devA, const T *devB, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_elementwiseMultiplicationWithKroneckerDelta) , dim3(gridSize), dim3(THREADS_PER_BLOCK), 0, 0, devResult, devA, devB, nElements); } template __global__ void __cuda_elementwiseMultiplicationWithKroneckerDelta<double>(double *devResult, const double *devA, const double *devB, unsigned int nElements); template void _cuda_elementwiseMultiplicationWithKroneckerDelta<double>(double *devResult, const double *devA, const double *devB, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_elementwiseMultiplicationWithKroneckerDelta<float>(float *devResult, const float *devA, const float *devB, unsigned int nElements); template void _cuda_elementwiseMultiplicationWithKroneckerDelta<float>(float *devResult, const float *devA, const float *devB, unsigned int nRows, unsigned int nColumns); /* * * nClassificationErrors * * */ template<typename T> __global__ void __cuda_nClassificationErrors(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, unsigned int *resultDevPtr){ unsigned int column= threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumns){ int beginCol = column * nRows; T maxVal = matrixPtr[beginCol]; uint argmax = 0; for (int i = 1; i < nRows; i++){ T val = matrixPtr[beginCol + i]; if (val > maxVal){ maxVal = val; argmax = i; } } if (targets[nRows * column + argmax] != 1.0){ atomicAdd(resultDevPtr, 1); } } } template<typename T> void _cuda_nClassificationErrors(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, unsigned int *resultDevPtr) { // parallelization over columns only int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); unsigned int result = 0; hipMemcpy(resultDevPtr, &result, sizeof(unsigned int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( __cuda_nClassificationErrors) , dim3(gridSize), dim3(THREADS_PER_BLOCK), 0, 0, matrixPtr, nRows, nColumns, targets, resultDevPtr); } template __global__ void __cuda_nClassificationErrors<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, unsigned int *resultDevPtr); template void _cuda_nClassificationErrors<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, unsigned int *resultDevPtr); template __global__ void __cuda_nClassificationErrors<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, unsigned int *resultDevPtr); template void _cuda_nClassificationErrors<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, unsigned int *resultDevPtr); // crossEntropyObjectiveFunction template<typename T> __global__ void __cuda_crossEntropyObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn){ unsigned int column= threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumns){ objFctn[column] = 0; for (int i = 0; i < nRows; i++){ if (targets[nRows * column + i] == 1.0) objFctn[column] -= log(matrixPtr[nRows * column + i]); } } } template<typename T> void _cuda_crossEntropyObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T* targets, T *objFctn) { // parallelization over columns only int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_crossEntropyObjectiveFunction) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, matrixPtr, nRows, nColumns, targets, objFctn); } template __global__ void __cuda_crossEntropyObjectiveFunction<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn); template void _cuda_crossEntropyObjectiveFunction<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn); template __global__ void __cuda_crossEntropyObjectiveFunction<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn); template void _cuda_crossEntropyObjectiveFunction<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn); // weightedCrossEntropyObjectiveFunction template<typename T> __global__ void __cuda_weightedCrossEntropyObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn, T *weights){ unsigned int column= threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumns){ objFctn[column] = 0; for (int i = 0; i < nRows; i++){ if (targets[nRows * column + i] == 1.0) objFctn[column] -= log(matrixPtr[nRows * column + i]) * weights[column]; } } } template<typename T> void _cuda_weightedCrossEntropyObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn, T *weights) { // parallelization over columns only int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_weightedCrossEntropyObjectiveFunction) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, matrixPtr, nRows, nColumns, targets, objFctn, weights); } template __global__ void __cuda_weightedCrossEntropyObjectiveFunction<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn, double *weights); template void _cuda_weightedCrossEntropyObjectiveFunction<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn, double *weights); template __global__ void __cuda_weightedCrossEntropyObjectiveFunction<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn, float *weights); template void _cuda_weightedCrossEntropyObjectiveFunction<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn, float *weights); // squaredErrorObjectiveFunction template<typename T> __global__ void __cuda_squaredErrorObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn){ unsigned int column = threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumns){ objFctn[column] = 0.0f; for (int row = 0; row < nRows; row++){ unsigned int position = column * nRows + row; objFctn[column] += (matrixPtr[position] - targets[position]) * (matrixPtr[position] - targets[position]); } } } template<typename T> void _cuda_squaredErrorObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn) { int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_squaredErrorObjectiveFunction) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, matrixPtr, nRows, nColumns, targets, objFctn); } template __global__ void __cuda_squaredErrorObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn); template void _cuda_squaredErrorObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn); template __global__ void __cuda_squaredErrorObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn); template void _cuda_squaredErrorObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn); // weightedSquaredErrorObjectiveFunction template<typename T> __global__ void __cuda_weightedSquaredErrorObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn, T *weights){ unsigned int column = threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumns){ objFctn[column] = 0.0f; for (int row = 0; row < nRows; row++){ unsigned int position = column * nRows + row; objFctn[column] += (matrixPtr[position] - targets[position]) * (matrixPtr[position] - targets[position]); } objFctn[column] *= weights[column]; } } template<typename T> void _cuda_weightedSquaredErrorObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn, T *weights) { int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_weightedSquaredErrorObjectiveFunction) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, matrixPtr, nRows, nColumns, targets, objFctn, weights); } template __global__ void __cuda_weightedSquaredErrorObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn, double *weights); template void _cuda_weightedSquaredErrorObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn, double *weights); template __global__ void __cuda_weightedSquaredErrorObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn, float *weights); template void _cuda_weightedSquaredErrorObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn, float *weights); // smoothedL1ObjectiveFunction template<typename T> __global__ void __cuda_smoothedL1ObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn){ unsigned int column = threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumns){ objFctn[column] = 0.0f; for (int row = 0; row < nRows; row++){ unsigned int position = column * nRows + row; if ((matrixPtr[position] - targets[position]) < -1.0) objFctn[column] += (targets[position] - matrixPtr[position]) - 0.5; else if ((matrixPtr[position] - targets[position]) > 1.0) objFctn[column] += (matrixPtr[position] - targets[position]) - 0.5; else objFctn[column] += 0.5 * (matrixPtr[position] - targets[position]) * (matrixPtr[position] - targets[position]); } } } template<typename T> void _cuda_smoothedL1ObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn) { int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_smoothedL1ObjectiveFunction) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, matrixPtr, nRows, nColumns, targets, objFctn); } template __global__ void __cuda_smoothedL1ObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn); template void _cuda_smoothedL1ObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn); template __global__ void __cuda_smoothedL1ObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn); template void _cuda_smoothedL1ObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn); // weightedSmoothedL1ObjectiveFunction template<typename T> __global__ void __cuda_weightedSmoothedL1ObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T* weights, T *objFctn){ unsigned int column = threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumns){ objFctn[column] = 0.0f; for (int row = 0; row < nRows; row++){ unsigned int position = column * nRows + row; if ((matrixPtr[position] - targets[position]) < -1.0) objFctn[column] += (targets[position] - matrixPtr[position]) - 0.5; else if ((matrixPtr[position] - targets[position]) > 1.0) objFctn[column] += (matrixPtr[position] - targets[position]) - 0.5; else objFctn[column] += 0.5 * (matrixPtr[position] - targets[position]) * (matrixPtr[position] - targets[position]); } objFctn[column] *= weights[column]; } } template<typename T> void _cuda_weightedSmoothedL1ObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T* weights, T *objFctn) { int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_weightedSmoothedL1ObjectiveFunction) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, matrixPtr, nRows, nColumns, targets, weights, objFctn); } template __global__ void __cuda_weightedSmoothedL1ObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn, double *weights); template void _cuda_weightedSmoothedL1ObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn, double *weights); template __global__ void __cuda_weightedSmoothedL1ObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn, float *weights); template void _cuda_weightedSmoothedL1ObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn, float *weights); /* * appendSecondOrderFeatures */ template<typename T> __global__ void __cuda_appendSecondOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){ unsigned int column = threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumnsX){ unsigned int pos = offset; for (unsigned int i = 0; i < nRowsX; ++ i) { for (unsigned int j = i; j < nRowsX; ++ j) { Y[column * nRowsY + pos] = X[column * nRowsX + i] * X[column * nRowsX + j]; pos++; } } } } template<typename T> void _cuda_appendSecondOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){ int gridSize = (int)ceil( (float) nColumnsX/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_appendSecondOrderFeatures) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, X, nRowsX, nColumnsX, Y, nRowsY, offset); } template __global__ void __cuda_appendSecondOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset); template void _cuda_appendSecondOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset); template __global__ void __cuda_appendSecondOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset); template void _cuda_appendSecondOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset); /* * appendDiagonalSecondOrderFeatures */ template<typename T> __global__ void __cuda_appendDiagonalSecondOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){ unsigned int column = threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumnsX){ unsigned int pos = offset; for (unsigned int i = 0; i < nRowsX; ++ i) { Y[column * nRowsY + pos] = X[column * nRowsX + i] * X[column * nRowsX + i]; pos++; } } } template<typename T> void _cuda_appendDiagonalSecondOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){ int gridSize = (int)ceil( (float) nColumnsX/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_appendDiagonalSecondOrderFeatures) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, X, nRowsX, nColumnsX, Y, nRowsY, offset); } template __global__ void __cuda_appendDiagonalSecondOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset); template void _cuda_appendDiagonalSecondOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset); template __global__ void __cuda_appendDiagonalSecondOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset); template void _cuda_appendDiagonalSecondOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset); // appendThirdOrderFeatures template<typename T> __global__ void __cuda_appendThirdOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){ unsigned int column = threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumnsX){ unsigned int pos = offset; for (unsigned int i = 0; i < nRowsX; ++ i) { for (unsigned int j = i; j < nRowsX; ++ j) { for (unsigned int k = j; k < nRowsX; ++ k) { Y[column * nRowsY + pos] = X[column * nRowsX + i] * X[column * nRowsX + j] * X[column * nRowsX + k]; pos++; } } } } } template<typename T> void _cuda_appendThirdOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){ int gridSize = (int)ceil( (float) nColumnsX/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_appendThirdOrderFeatures) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, X, nRowsX, nColumnsX, Y, nRowsY, offset); } template __global__ void __cuda_appendThirdOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset); template void _cuda_appendThirdOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset); template __global__ void __cuda_appendThirdOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset); template void _cuda_appendThirdOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset); // appendDiagonalThirdOrderFeatures template<typename T> __global__ void __cuda_appendDiagonalThirdOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){ unsigned int column = threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumnsX){ unsigned int pos = offset; for (unsigned int i = 0; i < nRowsX; ++ i) { Y[column * nRowsY + pos] = X[column * nRowsX + i] * X[column * nRowsX + i] * X[column * nRowsX + i]; pos++; } } } template<typename T> void _cuda_appendDiagonalThirdOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){ int gridSize = (int)ceil( (float) nColumnsX/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_appendDiagonalThirdOrderFeatures) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, X, nRowsX, nColumnsX, Y, nRowsY, offset); } template __global__ void __cuda_appendDiagonalThirdOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset); template void _cuda_appendDiagonalThirdOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset); template __global__ void __cuda_appendDiagonalThirdOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset); template void _cuda_appendDiagonalThirdOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset); /* * * gaussianMixturePosteriors * computes unnormalized, unexponentiated Gaussian mixture posteriors * -> p(c|x) can be obtained with application of softmax on the result of this function * */ template<typename T> __global__ void __cuda_gaussianMixturePosteriors(T *P, const T *X, const T *means, const T *variances, const T *weights, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nFeatures * nMixtures) { unsigned int k = index % nMixtures; unsigned int n = index / nMixtures; T expn = 0; T det = 0; for (unsigned int d = 0; d < featureDim; d++) { expn += (X[n * featureDim + d] - means[d * nMixtures + k]) * (X[n * featureDim + d] - means[d * nMixtures + k]) / variances[d * nMixtures + k]; det += log(variances[d * nMixtures + k]); } P[index] = log(weights[k]) - 0.5 * expn - 0.5 * log(2 * CUDART_PI) * featureDim - 0.5 * det; } } template<typename T> void _cuda_gaussianMixturePosteriors(T *P, const T *X, const T *means, const T *variances, const T *weights, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures) { unsigned int nElements = nFeatures * nMixtures; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_gaussianMixturePosteriors) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, P, X, means, variances, weights, nFeatures, featureDim, nMixtures); } template __global__ void __cuda_gaussianMixturePosteriors(double *P, const double *X, const double *means, const double *variances, const double *weights, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures); template void _cuda_gaussianMixturePosteriors(double *P, const double *X, const double *means, const double *variances, const double *weights, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures); template __global__ void __cuda_gaussianMixturePosteriors(float *P, const float *X, const float *means, const float *variances, const float *weights, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures); template void _cuda_gaussianMixturePosteriors(float *P, const float *X, const float *means, const float *variances, const float *weights, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures); /* * * fisher encoding * */ template<typename T> __global__ void __cuda_fisherEncoding(T *F, const T *X, const T *means, const T *variances, const T *weights, const T* gamma, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nFeatures * nMixtures * featureDim) { unsigned int n = index / (nMixtures * featureDim); unsigned int k = (index % (nMixtures * featureDim)) / featureDim; unsigned int d = (index % (nMixtures * featureDim)) % featureDim; // first order component F[d + k * featureDim + n * featureDim * nMixtures * 2] = gamma[k + n * nMixtures] * (X[d + n * featureDim] - means[k + d * nMixtures]) / sqrt(variances[k + d * nMixtures] * weights[k]); // second order component F[d + (k + nMixtures) * featureDim + n * featureDim * nMixtures * 2] = gamma[k + n * nMixtures] * ( (X[d + n * featureDim] - means[k + d * nMixtures]) * (X[d + n * featureDim] - means[k + d * nMixtures]) / variances[k + d * nMixtures] - 1.0 ) / sqrt(2 * weights[k]); } } template<typename T> void _cuda_fisherEncoding(T *F, const T *X, const T *means, const T *variances, const T *weights, const T *gamma, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures) { unsigned int nElements = nFeatures * nMixtures * featureDim; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_fisherEncoding) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, F, X, means, variances, weights, gamma, nFeatures, featureDim, nMixtures); } template __global__ void __cuda_fisherEncoding(double *F, const double *X, const double *means, const double *variances, const double *weights, const double *gamma, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures); template void _cuda_fisherEncoding(double *F, const double *X, const double *means, const double *variances, const double *weights, const double *gamma, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures); template __global__ void __cuda_fisherEncoding(float *F, const float *X, const float *means, const float *variances, const float *weights, const float *gamma, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures); template void _cuda_fisherEncoding(float *F, const float *X, const float *means, const float *variances, const float *weights, const float *gamma, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures); /* * * dropout * */ template<typename T> __global__ void __cuda_dropout(T *data, const T *mask, unsigned int nElements, T dropoutProbability){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if ((index < nElements) && (mask[index] < dropoutProbability)) data[index] = 0.0; } template<typename T> void _cuda_dropout(T *data, const T *mask, unsigned int nRows, unsigned int nColumns, T dropoutProbability) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); hipLaunchKernelGGL(( __cuda_dropout) , dim3(gridSize) , dim3(THREADS_PER_BLOCK) , 0, 0, data, mask, nElements, dropoutProbability); } template __global__ void __cuda_dropout(double *data, const double *mask, unsigned int nElements, double dropoutProbability); template void _cuda_dropout(double *data, const double *mask, unsigned int nRows, unsigned int nColumns, double dropoutProbability); template __global__ void __cuda_dropout(float *data, const float *mask, unsigned int nElements, float dropoutProbability); template void _cuda_dropout(float *data, const float *mask, unsigned int nRows, unsigned int nColumns, float dropoutProbability);
22add1c034167ce02b6437d3b398c8a839221a00.cu
#include "stdio.h" #include "CudaMatrixKernels.hh" #include <math_constants.h> #include <cuda_runtime.h> #include <limits> #ifdef __CDT_PARSER__ #define __global__ #define __device__ #define __host__ #define __shared__ #endif #define THREADS_PER_BLOCK 1024 /*****************************************************************************/ /* HELPER FUNCTIONS */ /*****************************************************************************/ /* * * atomicAdd for double * */ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif /*****************************************************************************/ /* * * mixed precision axpy * */ __global__ void __cuda_axpy(int nElements, float alpha, const float *x, double *y){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) y[index] += alpha * x[index]; } void _cuda_axpy(int nElements, float alpha, const float *x, double *y) { int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_axpy <<< gridSize , THREADS_PER_BLOCK >>> (nElements, alpha, x, y); } /* * * exp * */ template<typename T> __global__ void __cuda_exp(T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = exp(data[index]); } template<typename T> void _cuda_exp(T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_exp <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements); } template __global__ void __cuda_exp<float>(float *, unsigned int); template __global__ void __cuda_exp<double>(double *, unsigned int); template void _cuda_exp<float>(float *, unsigned int, unsigned int); template void _cuda_exp<double>(double *, unsigned int, unsigned int); /* * * signedPow * */ template<typename T> __global__ void __cuda_signedPow(T *data, unsigned int nElements, T p){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) { if(data[index] < 0) data[index] = -pow(-data[index], p); else data[index] = pow(data[index], p); } } template<typename T> void _cuda_signedPow(T *data, unsigned int nRows, unsigned int nColumns, T p) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_signedPow <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements, p); } template __global__ void __cuda_signedPow<float>(float *, unsigned int, float); template __global__ void __cuda_signedPow<double>(double *, unsigned int, double); template void _cuda_signedPow<float>(float *, unsigned int, unsigned int, float); template void _cuda_signedPow<double>(double *, unsigned int, unsigned int, double); /* * * log * */ template<typename T> __global__ void __cuda_log(T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = log(data[index]); } template<typename T> void _cuda_log(T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_log <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements); } template __global__ void __cuda_log<float>(float *, unsigned int); template __global__ void __cuda_log<double>(double *, unsigned int); template void _cuda_log<float>(float *, unsigned int, unsigned int); template void _cuda_log<double>(double *, unsigned int, unsigned int); /* * * sin * */ template<typename T> __global__ void __cuda_sin(T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = sin(data[index]); } template<typename T> void _cuda_sin(T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_sin <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements); } template __global__ void __cuda_sin<float>(float *, unsigned int); template __global__ void __cuda_sin<double>(double *, unsigned int); template void _cuda_sin<float>(float *, unsigned int, unsigned int); template void _cuda_sin<double>(double *, unsigned int, unsigned int); /* * * cos * */ template<typename T> __global__ void __cuda_cos(T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = cos(data[index]); } template<typename T> void _cuda_cos(T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_cos <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements); } template __global__ void __cuda_cos<float>(float *, unsigned int); template __global__ void __cuda_cos<double>(double *, unsigned int); template void _cuda_cos<float>(float *, unsigned int, unsigned int); template void _cuda_cos<double>(double *, unsigned int, unsigned int); /* * * asin * */ template<typename T> __global__ void __cuda_asin(T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = asin(data[index]); } template<typename T> void _cuda_asin(T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_asin <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements); } template __global__ void __cuda_asin<float>(float *, unsigned int); template __global__ void __cuda_asin<double>(double *, unsigned int); template void _cuda_asin<float>(float *, unsigned int, unsigned int); template void _cuda_asin<double>(double *, unsigned int, unsigned int); /* * * acos * */ template<typename T> __global__ void __cuda_acos(T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = acos(data[index]); } template<typename T> void _cuda_acos(T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_acos <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements); } template __global__ void __cuda_acos<float>(float *, unsigned int); template __global__ void __cuda_acos<double>(double *, unsigned int); template void _cuda_acos<float>(float *, unsigned int, unsigned int); template void _cuda_acos<double>(double *, unsigned int, unsigned int); /* * * abs * */ template<typename T> __global__ void __cuda_abs(T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) { if (data[index] < 0) data[index] = -data[index]; } } template<typename T> void _cuda_abs(T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_abs <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements); } template __global__ void __cuda_abs<float>(float *, unsigned int); template __global__ void __cuda_abs<double>(double *, unsigned int); template void _cuda_abs<float>(float *, unsigned int, unsigned int); template void _cuda_abs<double>(double *, unsigned int, unsigned int); /* * * tanh * * */ template<typename T> __global__ void __cuda_tanh(T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = tanh(data[index]); } template<typename T> void _cuda_tanh(T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_tanh <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements); } template __global__ void __cuda_tanh<float>(float *, unsigned int); template __global__ void __cuda_tanh<double>(double *, unsigned int); template void _cuda_tanh<float>(float *, unsigned int, unsigned int); template void _cuda_tanh<double>(double *, unsigned int, unsigned int); /* * * sigmoid * */ template<typename T> __global__ void __cuda_sigmoid1(T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = 1.0 / (1.0 + exp(-data[index])); } template<typename T> __global__ void __cuda_sigmoid(T gamma, T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = 1.0 / (1.0 + exp(-gamma * data[index])); } template<typename T> void _cuda_sigmoid(T gamma, T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); if (gamma == 1.0) __cuda_sigmoid1 <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements); else __cuda_sigmoid <<< gridSize , THREADS_PER_BLOCK >>> (gamma, data, nElements); } template void _cuda_sigmoid<double>(double gamma, double *data, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_sigmoid<double>(double gamma, double *data, unsigned int nElements); template __global__ void __cuda_sigmoid1<double>(double *data, unsigned int nElements); template void _cuda_sigmoid<float>(float gamma, float *data, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_sigmoid<float>(float gamma, float *data, unsigned int nElements); template __global__ void __cuda_sigmoid1<float>(float *data, unsigned int nElements); /* * * triangle * */ template<typename T> __global__ void __cuda_triangle(T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) { if ((data[index] < -1.0) || (data[index] > 1.0)) data[index] = 0.0; else if (data[index] < 0.0) data[index] = 1.0 + data[index]; else data[index] = 1.0 - data[index]; } } template<typename T> void _cuda_triangle(T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_triangle <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements); } template void _cuda_triangle<double>(double *data, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_triangle<double>(double *data, unsigned int nElements); template void _cuda_triangle<float>(float *data, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_triangle<float>(float *data, unsigned int nElements); /* * * sum * */ template<typename T> __global__ void __cuda_sum(T *data, unsigned int nRows, unsigned int nColumns, T *result){ *result = 0; for (int i = 0; i < nRows * nColumns; i++){ *result += data[i]; } } template<typename T> void _cuda_sum(T *data, unsigned int nRows, unsigned int nColumns, T *result) { // no parallelization, but probably not relevant __cuda_sum <<< 1,1>>> (data, nRows, nColumns, result); } template __global__ void __cuda_sum<double>(double *data, unsigned int nRows, unsigned int nColumns, double *result); template void _cuda_sum<double>(double *data, unsigned int nRows, unsigned int nColumns, double *result); template __global__ void __cuda_sum<float>(float *data, unsigned int nRows, unsigned int nColumns, float *result); template void _cuda_sum<float>(float *data, unsigned int nRows, unsigned int nColumns, float *result); /* * * columnwiseSquaredEuclideanDistance * */ template<typename T> __global__ void __cuda_columnwiseSquaredEuclideanDistance(const T *A, unsigned int nRows, unsigned int nColumns, const T *v, T *result){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nRows * nColumns) { T d = A[index] - v[index % nRows]; d = d*d; atomicAdd(&(result[index / nRows]), d); } } template<typename T> void _cuda_columnwiseSquaredEuclideanDistance(const T *A, unsigned int nRows, unsigned int nColumns, const T *v, T *result) { int gridSize = (int)ceil( (float) (nRows * nColumns)/THREADS_PER_BLOCK); __cuda_columnwiseSquaredEuclideanDistance <<< gridSize , THREADS_PER_BLOCK >>> (A, nRows, nColumns, v, result); } template void _cuda_columnwiseSquaredEuclideanDistance<double>(const double *A, unsigned int nRows, unsigned int nColumns, const double *v, double *result); template __global__ void __cuda_columnwiseSquaredEuclideanDistance<double>(const double *A, unsigned int nRows, unsigned int nColumns, const double *v, double *result); template void _cuda_columnwiseSquaredEuclideanDistance<float>(const float *A, unsigned int nRows, unsigned int nColumns, const float *v, float *result); template __global__ void __cuda_columnwiseSquaredEuclideanDistance<float>(const float *A, unsigned int nRows, unsigned int nColumns, const float *v, float *result); /* * * clone * */ template<typename T> __global__ void __cuda_clone(const T *dataA, T *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nRowsB * nColumnsB) { unsigned int nRowsA = nRowsB / nClones; unsigned int rowA = (index % nRowsA); unsigned int colA = index / nRowsB; dataB[index] = dataA[colA * nRowsA + rowA]; } } template<typename T> void _cuda_clone(const T *dataA, T *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones) { int nElementsB = nRowsB * nColumnsB; int gridSize = (int)ceil( (float) nElementsB/THREADS_PER_BLOCK); __cuda_clone <<< gridSize , THREADS_PER_BLOCK >>> (dataA, dataB, nRowsB, nColumnsB, nClones); } template void _cuda_clone<double>(const double *dataA, double *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones); template __global__ void __cuda_clone<double>(const double *dataA, double *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones); template void _cuda_clone<float>(const float *dataA, float *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones); template __global__ void __cuda_clone<float>(const float *dataA, float *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones); /* * * cloneElementwise * */ template<typename T> __global__ void __cuda_cloneElementwise(const T *dataA, T *dataB, unsigned int nElementsB, unsigned int nClones){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElementsB) { unsigned int indexA = index / nClones; dataB[index] = dataA[indexA]; } } template<typename T> void _cuda_cloneElementwise(const T *dataA, T *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones) { int nElementsB = nRowsB * nColumnsB; int gridSize = (int)ceil( (float) nElementsB/THREADS_PER_BLOCK); __cuda_cloneElementwise <<< gridSize , THREADS_PER_BLOCK >>> (dataA, dataB, nElementsB, nClones); } template void _cuda_cloneElementwise<double>(const double *dataA, double *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones); template __global__ void __cuda_cloneElementwise<double>(const double *dataA, double *dataB, unsigned int nElementsB, unsigned int nClones); template void _cuda_cloneElementwise<float>(const float *dataA, float *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones); template __global__ void __cuda_cloneElementwise<float>(const float *dataA, float *dataB, unsigned int nElementsB, unsigned int nClones); /* * * addElementsByModuloIndex * */ template<typename T> __global__ void __cuda_addElementsByModuloIndex(const T *dataA, T *dataB, unsigned int nRowsA, unsigned int nRowsB, unsigned int nColumns){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nRowsB * nColumns) { unsigned int rowB = index % nRowsB; unsigned int column = index / nRowsB; for (unsigned int j = 0; j < nRowsA / nRowsB; j++) { dataB[index] += dataA[column * nRowsA + (rowB + j * nRowsB)]; } } } template<typename T> void _cuda_addElementsByModuloIndex(const T *dataA, T *dataB, unsigned int nRowsA, unsigned int nRowsB, unsigned int nColumns) { unsigned int nElementsB = nRowsB * nColumns; int gridSize = (int)ceil( (float) nElementsB/THREADS_PER_BLOCK); __cuda_addElementsByModuloIndex <<< gridSize , THREADS_PER_BLOCK >>> (dataA, dataB, nRowsA, nRowsB, nColumns); } template void _cuda_addElementsByModuloIndex<double>(const double *dataA, double *dataB, unsigned int nRowsA, unsigned int nRowsB, unsigned int nColumns); template __global__ void __cuda_addElementsByModuloIndex<double>(const double *dataA, double *dataB, unsigned int nRowsA, unsigned int nRowsB, unsigned int nColumns); template void _cuda_addElementsByModuloIndex<float>(const float *dataA, float *dataB, unsigned int nRowsA, unsigned int nRowsB, unsigned int nColumns); template __global__ void __cuda_addElementsByModuloIndex<float>(const float *dataA, float *dataB, unsigned int nRowsA, unsigned int nRowsB, unsigned int nColumns); /* * * chiSquareFeatureMap * */ template<typename T> __global__ void __cuda_chiSquareFeatureMap(const T *dataA, T *dataB, unsigned int nElementsB, unsigned int n, T samplingDistance, T min){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElementsB) { unsigned int j = index % (2*n + 1); unsigned int baseIndex = index / (2*n + 1); T x = (dataA[baseIndex] > min ? dataA[baseIndex] : min); if (j == 0) { dataB[index] = sqrt(samplingDistance * x); } else if (j % 2 == 1) { T kappa = 1.0 / cosh(CUDART_PI * (j+1)/2 * samplingDistance); dataB[index] = sqrt(2 * kappa * samplingDistance * x) * cos((j+1)/2 * samplingDistance * log(x)); } else { T kappa = 1.0 / cosh(CUDART_PI * j/2 * samplingDistance); dataB[index] = sqrt(2 * kappa * samplingDistance * x) * sin(j/2 * samplingDistance * log(x)); } } } template<typename T> void _cuda_chiSquareFeatureMap(const T *dataA, T *dataB, unsigned int nElementsB, unsigned int n, T samplingDistance, T min) { int gridSize = (int)ceil( (float) nElementsB/THREADS_PER_BLOCK); __cuda_chiSquareFeatureMap <<< gridSize , THREADS_PER_BLOCK >>> (dataA, dataB, nElementsB, n, samplingDistance, min); } template void _cuda_chiSquareFeatureMap<double>(const double *dataA, double *dataB, unsigned int nElementsB, unsigned int n, double samplingDistance, double min); template __global__ void __cuda_chiSquareFeatureMap<double>(const double *dataA, double *dataB, unsigned int nElementsB, unsigned int n, double samplingDistance, double min); template void _cuda_chiSquareFeatureMap<float>(const float *dataA, float *dataB, unsigned int nElementsB, unsigned int n, float samplingDistance, float min); template __global__ void __cuda_chiSquareFeatureMap<float>(const float *dataA, float *dataB, unsigned int nElementsB, unsigned int n, float samplingDistance, float min); /* * * histogramIntersectionFeatureMap * */ template<typename T> __global__ void __cuda_histogramIntersectionFeatureMap(const T *dataA, T *dataB, unsigned int nElementsB, unsigned int n, T samplingDistance, T min){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElementsB) { unsigned int j = index % (2*n + 1); unsigned int baseIndex = index / (2*n + 1); T x = (dataA[baseIndex] > min ? dataA[baseIndex] : min); if (j == 0) { dataB[index] = sqrt(2 / CUDART_PI * samplingDistance * x); } else if (j % 2 == 1) { T kappa = 2.0 / (CUDART_PI * (1 + 4 * (j+1)/2 * samplingDistance * (j+1)/2 * samplingDistance)); dataB[index] = sqrt(2 * kappa * samplingDistance * x) * cos((j+1)/2 * samplingDistance * log(x)); } else { T kappa = 2.0 / (CUDART_PI * (1 + 4 * j/2 * samplingDistance * j/2 * samplingDistance)); dataB[index] = sqrt(2 * kappa * samplingDistance * x) * sin(j/2 * samplingDistance * log(x)); } } } template<typename T> void _cuda_histogramIntersectionFeatureMap(const T *dataA, T *dataB, unsigned int nElementsB, unsigned int n, T samplingDistance, T min) { int gridSize = (int)ceil( (float) nElementsB/THREADS_PER_BLOCK); __cuda_histogramIntersectionFeatureMap <<< gridSize , THREADS_PER_BLOCK >>> (dataA, dataB, nElementsB, n, samplingDistance, min); } template void _cuda_histogramIntersectionFeatureMap<double>(const double *dataA, double *dataB, unsigned int nElementsB, unsigned int n, double samplingDistance, double min); template __global__ void __cuda_histogramIntersectionFeatureMap<double>(const double *dataA, double *dataB, unsigned int nElementsB, unsigned int n, double samplingDistance, double min); template void _cuda_histogramIntersectionFeatureMap<float>(const float *dataA, float *dataB, unsigned int nElementsB, unsigned int n, float samplingDistance, float min); template __global__ void __cuda_histogramIntersectionFeatureMap<float>(const float *dataA, float *dataB, unsigned int nElementsB, unsigned int n, float samplingDistance, float min); /* * * elementwiseMultiplicationWithChiSquareFeatureMapDerivative * */ template<typename T> __global__ void __cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative(const T *dataA, T *dataB, unsigned int nElements, unsigned int n, T samplingDistance, T kappa0){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) { unsigned int j = index % (2 * n + 1); if (j == 0) { dataB[index] *= dataA[index]; } else if (j % 2 == 1) { dataB[index] *= dataA[index] - (j+1) * samplingDistance * dataA[index + 1]; } else { dataB[index] *= dataA[index] + j * samplingDistance * dataA[index - 1]; } dataB[index] *= samplingDistance * kappa0 / (2.0 * dataA[index - j] * dataA[index - j]); } } template<typename T> void _cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative(const T *dataA, T *dataB, unsigned int nElements, unsigned int n, T samplingDistance, T kappa0) { int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative <<< gridSize , THREADS_PER_BLOCK >>> (dataA, dataB, nElements, n, samplingDistance, kappa0); } template void _cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative<double>(const double *dataA, double *dataB, unsigned int nElements, unsigned int n, double samplingDistance, double kappa0); template __global__ void __cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative<double>(const double *dataA, double *dataB, unsigned int nElements, unsigned int n, double samplingDistance, double kappa0); template void _cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative<float>(const float *dataA, float *dataB, unsigned int nElements, unsigned int n, float samplingDistance, float kappa0); template __global__ void __cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative<float>(const float *dataA, float *dataB, unsigned int nElements, unsigned int n, float samplingDistance, float kappa0); /* * * addSummedRows * */ template<typename T> __global__ void __cuda_addSummedRows(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const T scale){ unsigned int columnIndex = threadIdx.x + blockIdx.x * blockDim.x; if (columnIndex < nColumns){ float result = 0.0; for (unsigned int i = 0; i < nRows; i++){ // result += matrix(i,columnIndex) result += matrixDevPtr[columnIndex * nRows + i]; } vectorDevPtr[columnIndex] += scale * result; } } template<typename T> void _cuda_addSummedRows(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const T scale){ // parallelize over columns int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); __cuda_addSummedRows <<< gridSize , THREADS_PER_BLOCK >>> (vectorDevPtr, matrixDevPtr, nRows, nColumns, scale); } template __global__ void __cuda_addSummedRows(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const double scale); template void _cuda_addSummedRows(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const double scale); template __global__ void __cuda_addSummedRows(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const float scale); template void _cuda_addSummedRows(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const float scale); /* * slightly faster version using tmp array * */ template<typename T> __global__ void __cuda_summedRowsTmp(const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, T *tmpDevPtr, unsigned int tmpRows){ unsigned int columnIndex = threadIdx.x + blockIdx.x * blockDim.x; unsigned int columnPart = blockIdx.y; if (columnIndex < nColumns){ unsigned int nRowsDiv = nRows / tmpRows; unsigned int startRow = columnPart * nRowsDiv; if (startRow < nRows){ unsigned int endRow = columnPart == tmpRows - 1 ? nRows : (columnPart + 1) * nRowsDiv; T result = 0.0; for (unsigned int i = startRow; i < endRow; i++){ // result += matrix(i, columnIndex) result += matrixDevPtr[columnIndex * nRows + i]; } tmpDevPtr[columnIndex*tmpRows + columnPart] = result; } } } template<typename T> void _cuda_addSummedRows(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, T *tmpDevPtr, unsigned int tmpRows, const T scale){ int gridDimx = (int)ceil( (float) nColumns / THREADS_PER_BLOCK); int gridDimy = tmpRows; dim3 gridSize(gridDimx,gridDimy); __cuda_summedRowsTmp <<< gridSize , THREADS_PER_BLOCK >>> (matrixDevPtr, nRows, nColumns, tmpDevPtr, tmpRows); _cuda_addSummedRows<T>(vectorDevPtr, tmpDevPtr, tmpRows, nColumns, scale); } template __global__ void __cuda_summedRowsTmp<double>(const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, double *tmpDevPtr, unsigned int tmpRows); template void _cuda_addSummedRows<double>(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, double *tmpDevPtr, unsigned int tmpRows, const double scale); template __global__ void __cuda_summedRowsTmp<float>(const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, float *tmpDevPtr, unsigned int tmpRows); template void _cuda_addSummedRows<float>(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, float *tmpDevPtr, unsigned int tmpRows, const float scale); /* * * addSummedColumns * */ template<typename T> __global__ void __cuda_addSummedColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const T scale){ unsigned int rowIndex = threadIdx.x + blockIdx.x * blockDim.x; if (rowIndex < nRows){ T result = 0.0; for (unsigned int i = 0; i < nColumns; i++){ // result += matrix(rowIndex,i) result += matrixDevPtr[i * nRows + rowIndex]; } vectorDevPtr[rowIndex] += scale * result; } } template<typename T> void _cuda_addSummedColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const T scale){ // parallelize over rows int gridSize = (int)ceil( (float) nRows/THREADS_PER_BLOCK); __cuda_addSummedColumns <<< gridSize , THREADS_PER_BLOCK >>> (vectorDevPtr, matrixDevPtr, nRows, nColumns, scale); } template __global__ void __cuda_addSummedColumns<double>(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const double scale); template void _cuda_addSummedColumns<double>(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const double scale); template __global__ void __cuda_addSummedColumns<float>(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const float scale); template void _cuda_addSummedColumns<float>(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const float scale); /* * addSummedColumnsChannelWise * * */ template<typename T> __global__ void __cuda_addSummedColumnsChannelWise(T *vector, const T* matrix, const unsigned int channels, const unsigned int nRows, const unsigned int nColumns, const T scale) { unsigned int channelIndex = threadIdx.x + blockIdx.x * blockDim.x; if(channelIndex < channels) { unsigned int channelSize = nRows / channels; for(unsigned int i=0; i < channelSize; i++) { for(unsigned int j=0; j < nColumns; j++) { vector[channelIndex] += scale * matrix[j * nRows + channelIndex * channelSize + i]; } } } } template<typename T> void _cuda_addSummedColumnsChannelWise(T *vector, const T* matrix, const unsigned int channels, const unsigned int nRows, const unsigned int nColumns, const T scale) { int gridSize = (int)ceil( (float) channels/THREADS_PER_BLOCK); __cuda_addSummedColumnsChannelWise<<<gridSize, THREADS_PER_BLOCK>>>(vector, matrix, channels, nRows, nColumns, scale); } template __global__ void __cuda_addSummedColumnsChannelWise(double *vector, const double* matrix, const unsigned int channels, const unsigned int nRows, const unsigned int nColumns, const double scale); template __global__ void __cuda_addSummedColumnsChannelWise(float *vector, const float* matrix, const unsigned int channels, const unsigned int nRows, const unsigned int nColumns, const float scale); template void _cuda_addSummedColumnsChannelWise(double *vector, const double* matrix, const unsigned int channels, const unsigned int nRows, const unsigned int nColumns, const double scale); template void _cuda_addSummedColumnsChannelWise(float *vector, const float* matrix, const unsigned int channels, const unsigned int nRows, const unsigned int nColumns, const float scale); /* * * addSquaredSummedColumns * */ template<typename T> __global__ void __cuda_addSquaredSummedColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const T scale){ unsigned int rowIndex = threadIdx.x + blockIdx.x * blockDim.x; if (rowIndex < nRows){ T result = 0.0; for (unsigned int i = 0; i < nColumns; i++){ result += matrixDevPtr[i * nRows + rowIndex] * matrixDevPtr[i * nRows + rowIndex]; } vectorDevPtr[rowIndex] += scale * result; } } template<typename T> void _cuda_addSquaredSummedColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const T scale){ // parallelize over rows int gridSize = (int)ceil( (float) nRows/THREADS_PER_BLOCK); __cuda_addSquaredSummedColumns <<< gridSize , THREADS_PER_BLOCK >>> (vectorDevPtr, matrixDevPtr, nRows, nColumns, scale); } template __global__ void __cuda_addSquaredSummedColumns(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const double scale); template void _cuda_addSquaredSummedColumns(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const double scale); template __global__ void __cuda_addSquaredSummedColumns(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const float scale); template void _cuda_addSquaredSummedColumns(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const float scale); /* * * addSummedNeighborsInARow * */ template<typename T> __global__ void __cuda_addSummedNeighborsInARow(T* dataA, const T* dataB, unsigned int elementsA, unsigned int nNeighbors){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < elementsA){ for (unsigned int n = 0; n < nNeighbors; n++){ dataA[index] += dataB[index * nNeighbors + n]; } } } template<typename T> void _cuda_addSummedNeighborsInARow(T* dataA, const T* dataB, unsigned int rowsA, unsigned int columnsA, unsigned int nNeighbors){ // parallelize over rows int gridSize = (int)ceil( (float) rowsA*columnsA/THREADS_PER_BLOCK); __cuda_addSummedNeighborsInARow <<< gridSize , THREADS_PER_BLOCK >>> (dataA, dataB, rowsA * columnsA, nNeighbors); } template __global__ void __cuda_addSummedNeighborsInARow(double* dataA, const double* dataB, unsigned int elementsA, unsigned int nNeighbors); template void _cuda_addSummedNeighborsInARow(double* dataA, const double* dataB, unsigned int rowsA, unsigned int columnsA, unsigned int nNeighbors); template __global__ void __cuda_addSummedNeighborsInARow(float* dataA, const float* dataB, unsigned int elementsA, unsigned int nNeighbors); template void _cuda_addSummedNeighborsInARow(float* dataA, const float* dataB, unsigned int rowsA, unsigned int columnsA, unsigned int nNeighbors); /* * * addWeighted * */ template<typename T> __global__ void __cuda_addWeighted(T *data, const T *X, const T* weights, unsigned int nRows, unsigned int nColumns){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nRows * nColumns) { unsigned int col = index / nRows; data[index] += X[index] * weights[col]; } } template<typename T> void _cuda_addWeighted(T *data, const T *X, const T* weights, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_addWeighted <<< gridSize , THREADS_PER_BLOCK >>> (data, X, weights, nRows, nColumns); } template __global__ void __cuda_addWeighted<double>(double *data, const double *X, const double* weights, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_addWeighted<float>(float *data, const float *X, const float* weights, unsigned int nRows, unsigned int nColumns); template void _cuda_addWeighted<double>(double *data, const double *X, const double* weights, unsigned int nRows, unsigned int nColumns); template void _cuda_addWeighted<float>(float *data, const float *X, const float* weights, unsigned int nRows, unsigned int nColumns); /* * * elementwise multiplication * */ template<typename T> __global__ void __cuda_elementwiseMultiplication(T *data, T *datab, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = data[index] * datab[index]; } template<typename T> void _cuda_elementwiseMultiplication(T *data, T *datab, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_elementwiseMultiplication <<< gridSize , THREADS_PER_BLOCK >>> (data, datab, nElements); } template __global__ void __cuda_elementwiseMultiplication<double>(double *data, double *datab, unsigned int nElements); template __global__ void __cuda_elementwiseMultiplication<float>(float *data, float *datab, unsigned int nElements); template void _cuda_elementwiseMultiplication<double>(double *data, double *datab, unsigned int nRows, unsigned int nColumns); template void _cuda_elementwiseMultiplication<float>(float *data, float *datab, unsigned int nRows, unsigned int nColumns); /* * * elementwise division * */ template<typename T> __global__ void __cuda_elementwiseDivision(T *data, T *datab, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = data[index] / datab[index]; } template<typename T> void _cuda_elementwiseDivision(T *data, T *datab, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_elementwiseDivision <<< gridSize , THREADS_PER_BLOCK >>> (data, datab, nElements); } template __global__ void __cuda_elementwiseDivision<double>(double *data, double *datab, unsigned int nElements); template __global__ void __cuda_elementwiseDivision<float>(float *data, float *datab, unsigned int nElements); template void _cuda_elementwiseDivision<double>(double *data, double *datab, unsigned int nRows, unsigned int nColumns); template void _cuda_elementwiseDivision<float>(float *data, float *datab, unsigned int nRows, unsigned int nColumns); /* * * rprop Weight Update * */ template<typename T> __global__ void __cuda_rpropUpdate(T *currentValues, T *newGradients, T *oldGradients, T *updateValues, T increasingFactor, T decreasingFactor, T maxUpdateValue, T minUpdateValue, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) { T change = oldGradients[index] * newGradients[index]; if (change > 0) { updateValues[index] = updateValues[index] * increasingFactor; if (updateValues[index] > maxUpdateValue) updateValues[index] = maxUpdateValue; } else if (change < 0) { updateValues[index] = updateValues[index] * decreasingFactor; if (updateValues[index] < minUpdateValue) updateValues[index] = minUpdateValue; } if (newGradients[index] > 0) currentValues[index] = currentValues[index] - updateValues[index]; else if (newGradients[index] < 0) currentValues[index] = currentValues[index] + updateValues[index]; oldGradients[index] = newGradients[index]; } } template<typename T> void _cuda_rpropUpdate(T *currentValues, T *newGradients, T *oldGradients, T *updateValues, T increasingFactor, T decreasingFactor, T maxUpdateValue, T minUpdateValue, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_rpropUpdate <<< gridSize , THREADS_PER_BLOCK >>> (currentValues, newGradients, oldGradients, updateValues, increasingFactor, decreasingFactor, maxUpdateValue, minUpdateValue, nElements); } template __global__ void __cuda_rpropUpdate<double>(double *currentValues, double *newGradients, double *oldGradients, double *updateValues, double increasingFactor, double decreasingFactor, double maxUpdateValue, double minUpdateValue, unsigned int nElements); template __global__ void __cuda_rpropUpdate<float>(float *currentValues, float *newGradients, float *oldGradients, float *updateValues, float increasingFactor, float decreasingFactor, float maxUpdateValue, float minUpdateValue, unsigned int nElements); template void _cuda_rpropUpdate<double>(double *currentValues, double *newGradients, double *oldGradients, double *updateValues, double increasingFactor, double decreasingFactor, double maxUpdateValue, double minUpdateValue, unsigned int nRows, unsigned int nColumns); template void _cuda_rpropUpdate<float>(float *currentValues, float *newGradients, float *oldGradients, float *updateValues, float increasingFactor, float decreasingFactor, float maxUpdateValue, float minUpdateValue, unsigned int nRows, unsigned int nColumns); /* * * add constant elementwise * */ template<typename T> __global__ void __cuda_addConstantElementwise(T constant, T *data, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = data[index] + constant; } template<typename T> void _cuda_addConstantElementwise(T constant, T *data, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (T) nElements/THREADS_PER_BLOCK); __cuda_addConstantElementwise <<< gridSize , THREADS_PER_BLOCK >>> (constant, data, nElements); } template __global__ void __cuda_addConstantElementwise<double>(double constant, double *data, unsigned int nElements); template void _cuda_addConstantElementwise<double>(double constant, double *data, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_addConstantElementwise<float>(float constant, float *data, unsigned int nElements); template void _cuda_addConstantElementwise<float>(float constant, float *data, unsigned int nRows, unsigned int nColumns); /* * * getMaxOfColumns * */ template<typename T> __global__ void __cuda_getMaxOfColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns){ unsigned int columnIndex = threadIdx.x + blockIdx.x * blockDim.x; if (columnIndex < nColumns){ T result = matrixDevPtr[columnIndex * nRows]; for (unsigned int i = 1; i < nRows; i++){ T val = matrixDevPtr[columnIndex * nRows + i]; result = fmax(result, val); } vectorDevPtr[columnIndex] = result; } } template<typename T> void _cuda_getMaxOfColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns){ // parallelize over columns int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); __cuda_getMaxOfColumns <<< gridSize , THREADS_PER_BLOCK >>> (vectorDevPtr, matrixDevPtr, nRows, nColumns); } template __global__ void __cuda_getMaxOfColumns(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns); template void _cuda_getMaxOfColumns(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_getMaxOfColumns(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns); template void _cuda_getMaxOfColumns(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns); /* * slightly faster version using tmp array */ template<typename T> __global__ void __cuda_getMaxOfColumnsTmp(const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, T *tmpDevPtr, unsigned int tmpRows){ unsigned int columnIndex = threadIdx.x + blockIdx.x * blockDim.x; unsigned int columnPart = blockIdx.y; if (columnIndex < nColumns){ unsigned int nRowsDiv = nRows / tmpRows; unsigned int startRow = columnPart * nRowsDiv; if (startRow < nRows){ unsigned int endRow = columnPart == tmpRows - 1 ? nRows : (columnPart + 1) * nRowsDiv; T result = matrixDevPtr[columnIndex * nRows]; for (unsigned int i = startRow; i < endRow; i++){ // result += matrix(i, columnIndex) T val = matrixDevPtr[columnIndex * nRows + i]; result = fmax(result, val); } tmpDevPtr[columnIndex*tmpRows + columnPart] = result; } } } template<typename T> void _cuda_getMaxOfColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, T *tmpDevPtr, unsigned int tmpRows){ int gridDimx = (int)ceil( (float) nColumns / THREADS_PER_BLOCK); int gridDimy = tmpRows; dim3 gridSize(gridDimx,gridDimy); __cuda_getMaxOfColumnsTmp <<< gridSize , THREADS_PER_BLOCK >>> (matrixDevPtr, nRows, nColumns, tmpDevPtr, tmpRows); _cuda_getMaxOfColumns<T>(vectorDevPtr, tmpDevPtr, tmpRows, nColumns); } template __global__ void __cuda_getMaxOfColumnsTmp(const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, double *tmpDevPtr, unsigned int tmpRows); template void _cuda_getMaxOfColumns(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, double *tmpDevPtr, unsigned int tmpRows); template __global__ void __cuda_getMaxOfColumnsTmp(const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, float *tmpDevPtr, unsigned int tmpRows); template void _cuda_getMaxOfColumns(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, float *tmpDevPtr, unsigned int tmpRows); /* * * elementwiseMultiplicationWithSigmoidDerivative * */ template<typename T> __global__ void __cuda_elementwiseMultiplicationWithSigmoidDerivative(T *data, T *datab, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = data[index] * (datab[index] * (1 - datab[index])); } template<typename T> void _cuda_elementwiseMultiplicationWithSigmoidDerivative(T *data, T *datab, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_elementwiseMultiplicationWithSigmoidDerivative <<< gridSize , THREADS_PER_BLOCK >>> (data, datab, nElements); } template __global__ void __cuda_elementwiseMultiplicationWithSigmoidDerivative(double *data, double *datab, unsigned int nElements); template void _cuda_elementwiseMultiplicationWithSigmoidDerivative(double *data, double *datab, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_elementwiseMultiplicationWithSigmoidDerivative(float *data, float *datab, unsigned int nElements); template void _cuda_elementwiseMultiplicationWithSigmoidDerivative(float *data, float *datab, unsigned int nRows, unsigned int nColumns); /* * * elementwiseMultiplicationWithTriangleDerivative * */ template<typename T> __global__ void __cuda_elementwiseMultiplicationWithTriangleDerivative(T *data, T *datab, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) { if ((datab[index] < -1.0) || (datab[index] > 1.0) || (datab[index] == 0)) data[index] = 0; else if (datab[index] > 0.0) data[index] = -data[index]; } } template<typename T> void _cuda_elementwiseMultiplicationWithTriangleDerivative(T *data, T *datab, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_elementwiseMultiplicationWithTriangleDerivative <<< gridSize , THREADS_PER_BLOCK >>> (data, datab, nElements); } template __global__ void __cuda_elementwiseMultiplicationWithTriangleDerivative(double *data, double *datab, unsigned int nElements); template void _cuda_elementwiseMultiplicationWithTriangleDerivative(double *data, double *datab, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_elementwiseMultiplicationWithTriangleDerivative(float *data, float *datab, unsigned int nElements); template void _cuda_elementwiseMultiplicationWithTriangleDerivative(float *data, float *datab, unsigned int nRows, unsigned int nColumns); /* * * elementwiseMultiplicationWithTanhDerivative * */ template<typename T> __global__ void __cuda_elementwiseMultiplicationWithTanhDerivative(T *data, T *datab, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = data[index] * (1 - pow(datab[index],2)); } template<typename T> void _cuda_elementwiseMultiplicationWithTanhDerivative(T *data, T *datab, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_elementwiseMultiplicationWithTanhDerivative <<< gridSize , THREADS_PER_BLOCK >>> (data, datab, nElements); } template __global__ void __cuda_elementwiseMultiplicationWithTanhDerivative(double *data, double *datab, unsigned int nElements); template void _cuda_elementwiseMultiplicationWithTanhDerivative(double *data, double *datab, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_elementwiseMultiplicationWithTanhDerivative(float *data, float *datab, unsigned int nElements); template void _cuda_elementwiseMultiplicationWithTanhDerivative(float *data, float *datab, unsigned int nRows, unsigned int nColumns); /* * * multiplicationWithSoftmaxDerivative * */ template<typename T> __global__ void __cuda_multiplicationWithSoftmaxDerivative(T *data, T *datab, T *datac, unsigned int nElements, unsigned int nRows){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = datab[index] * (data[index] - datac[index/nRows]); } template<typename T> void _cuda_multiplicationWithSoftmaxDerivative(T *data, T *datab, T *datac, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_multiplicationWithSoftmaxDerivative <<< gridSize , THREADS_PER_BLOCK >>> (data, datab, datac, nElements, nRows); } template __global__ void __cuda_multiplicationWithSoftmaxDerivative(double *data, double *datab, double *datac, unsigned int nElements, unsigned int nRows); template void _cuda_multiplicationWithSoftmaxDerivative(double *data, double *datab, double *datac, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_multiplicationWithSoftmaxDerivative(float *data, float *datab, float *datac, unsigned int nElements, unsigned int nRows); template void _cuda_multiplicationWithSoftmaxDerivative(float *data, float *datab, float *datac, unsigned int nRows, unsigned int nColumns); /* * elementwiseMultiplicationWithClippedDerivative * */ template <typename T> __global__ void __cuda_elementwiseMultiplicationWithClippedDerivative(T *errOut, T *activations, unsigned int nElements, T thresholdLeft, T thresholdRight){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) { if ((activations[index] <= thresholdLeft) || (activations[index] >= thresholdRight)) errOut[index] = 0; } } template <typename T> void _cuda_elementwiseMultiplicationWithClippedDerivative(T *data, T *datab, unsigned int nRows, unsigned int nColumns, T thresholdLeft, T thresholdRight) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_elementwiseMultiplicationWithClippedDerivative<T> <<<gridSize, THREADS_PER_BLOCK>>> (data, datab, nElements, thresholdLeft, thresholdRight); } template __global__ void __cuda_elementwiseMultiplicationWithClippedDerivative<float>(float*, float*, unsigned int, float, float); template __global__ void __cuda_elementwiseMultiplicationWithClippedDerivative<double>(double*, double*, unsigned int, double, double); template void _cuda_elementwiseMultiplicationWithClippedDerivative<float>(float*, float*, unsigned int, unsigned int, float, float); template void _cuda_elementwiseMultiplicationWithClippedDerivative<double>(double*, double*, unsigned int, unsigned int, double, double); /* * elementwiseMultiplicationWithSignedPowDerivative * */ template <typename T> __global__ void __cuda_elementwiseMultiplicationWithSignedPowDerivative(T *errOut, T *activations, unsigned int nElements, T p){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) { if (activations[index] == 0) errOut[index] = 0; else if (activations[index] < 0) errOut[index] *= p * pow(-activations[index], p - 1); else errOut[index] *= p * pow(activations[index], p - 1); } } template <typename T> void _cuda_elementwiseMultiplicationWithSignedPowDerivative(T *data, T *datab, unsigned int nRows, unsigned int nColumns, T p) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_elementwiseMultiplicationWithSignedPowDerivative<T> <<<gridSize, THREADS_PER_BLOCK>>> (data, datab, nElements, p); } template __global__ void __cuda_elementwiseMultiplicationWithSignedPowDerivative<float>(float*, float*, unsigned int, float); template __global__ void __cuda_elementwiseMultiplicationWithSignedPowDerivative<double>(double*, double*, unsigned int, double); template void _cuda_elementwiseMultiplicationWithSignedPowDerivative<float>(float*, float*, unsigned int, unsigned int, float); template void _cuda_elementwiseMultiplicationWithSignedPowDerivative<double>(double*, double*, unsigned int, unsigned int, double); /* * elementwiseMultiplicationWithLogDerivative * */ template <typename T> __global__ void __cuda_elementwiseMultiplicationWithLogDerivative(T *errOut, T *activations, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) errOut[index] *= exp(-activations[index]); } template <typename T> void _cuda_elementwiseMultiplicationWithLogDerivative(T *data, T *datab, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_elementwiseMultiplicationWithLogDerivative<T> <<<gridSize, THREADS_PER_BLOCK>>> (data, datab, nElements); } template __global__ void __cuda_elementwiseMultiplicationWithLogDerivative<float>(float*, float*, unsigned int); template __global__ void __cuda_elementwiseMultiplicationWithLogDerivative<double>(double*, double*, unsigned int); template void _cuda_elementwiseMultiplicationWithLogDerivative<float>(float*, float*, unsigned int, unsigned int); template void _cuda_elementwiseMultiplicationWithLogDerivative<double>(double*, double*, unsigned int, unsigned int); /* * * multiplicationWithL2NormalizationDerivative * */ template<typename T> __global__ void __cuda_multiplicationWithL2NormalizationDerivative(T *data, T *datab, T *datac, T *datad, unsigned int nElements, unsigned int nRows){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = (data[index] - datab[index] * datac[index/nRows]) / datad[index/nRows]; } template<typename T> void _cuda_multiplicationWithL2NormalizationDerivative(T *data, T *datab, T *datac, T *datad, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_multiplicationWithL2NormalizationDerivative <<< gridSize , THREADS_PER_BLOCK >>> (data, datab, datac, datad, nElements, nRows); } template __global__ void __cuda_multiplicationWithL2NormalizationDerivative(double *data, double *datab, double *datac, double *datad, unsigned int nElements, unsigned int nRows); template void _cuda_multiplicationWithL2NormalizationDerivative(double *data, double *datab, double *datac, double *datad, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_multiplicationWithL2NormalizationDerivative(float *data, float *datab, float *datac, float *datad, unsigned int nElements, unsigned int nRows); template void _cuda_multiplicationWithL2NormalizationDerivative(float *data, float *datab, float *datac, float *datad, unsigned int nRows, unsigned int nColumns); /* * * addToAllColumns * */ template<typename T> __global__ void __cuda_addToAllColumns(T *data, T *datab, unsigned int nElements, unsigned int nRows, T alpha){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] += alpha * datab[index%nRows]; } template<typename T> void _cuda_addToAllColumns(T *data, T *datab, unsigned int nRows, unsigned int nColumns, T alpha) { // TODO implement kernel without % operator (slow on GPU) unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_addToAllColumns <<< gridSize , THREADS_PER_BLOCK >>> (data, datab, nElements, nRows, alpha); } template __global__ void __cuda_addToAllColumns<double>(double *data, double *datab, unsigned int nElements, unsigned int nRows, double alpha); template void _cuda_addToAllColumns<double>(double *data, double *datab, unsigned int nRows, unsigned int nColumns, double alpha); template __global__ void __cuda_addToAllColumns<float>(float *data, float *datab, unsigned int nElements, unsigned int nRows, float alpha); template void _cuda_addToAllColumns<float>(float *data, float *datab, unsigned int nRows, unsigned int nColumns, float alpha); /* * addToAllChannels * Adds one element of vector to one channel */ template<typename T> __global__ void __cuda_addToAllChannels(T *mat, T *vec, unsigned int channels, unsigned int nRows, unsigned int nElements, T alpha) { unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < nElements) { unsigned int channelSize = nRows / channels; mat[index] += alpha * vec[(index%nRows)/channelSize]; } } template<typename T> void _cuda_addToAllChannels(T *mat, T *vec, unsigned int channels, unsigned int nRows, unsigned int nColumns, T alpha) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_addToAllChannels<<<gridSize, THREADS_PER_BLOCK>>>(mat, vec, channels, nRows, nElements, alpha); } template __global__ void __cuda_addToAllChannels(double *mat, double *vec, unsigned int channels, unsigned int nRows, unsigned int nElements, double alpha); template __global__ void __cuda_addToAllChannels(float *mat, float *vec, unsigned int channels, unsigned int nRows, unsigned int nElements, float alpha); template void _cuda_addToAllChannels(double *mat, double *vec, unsigned int channels, unsigned int nRows, unsigned int nColumns, double alpha); template void _cuda_addToAllChannels(float *mat, float *vec, unsigned int channels, unsigned int nRows, unsigned int nColumns, float alpha); /* * * addToAllRows * */ template<typename T> __global__ void __cuda_addToAllRows(T *data, T *datab, unsigned int nElements, unsigned int nRows, T alpha){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] += alpha * datab[index/nRows]; } template<typename T> void _cuda_addToAllRows(T *data, T *datab, unsigned int nRows, unsigned int nColumns, T alpha) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_addToAllRows <<< gridSize , THREADS_PER_BLOCK >>> (data, datab, nElements, nRows, alpha); } template __global__ void __cuda_addToAllRows<double>(double *data, double *datab, unsigned int nElements, unsigned int nRows, double alpha); template void _cuda_addToAllRows<double>(double *data, double *datab, unsigned int nRows, unsigned int nColumns, double alpha); template __global__ void __cuda_addToAllRows<float>(float *data, float *datab, unsigned int nElements, unsigned int nRows, float alpha); template void _cuda_addToAllRows<float>(float *data, float *datab, unsigned int nRows, unsigned int nColumns, float alpha); /* * * multiplyColumnsByScalars * */ template<typename T> __global__ void __cuda_multiplyColumnsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; unsigned int colIndex = index / nRows; if (index < nElements) matrixDevPtr[index] = matrixDevPtr[index] * vectorDevPtr[colIndex]; } template<typename T> void _cuda_multiplyColumnsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nColumns){ // TODO parallelization without mod operator (mod is slow on GPU) unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_multiplyColumnsByScalars <<< gridSize , THREADS_PER_BLOCK >>> (vectorDevPtr, matrixDevPtr, nRows, nElements); } template __global__ void __cuda_multiplyColumnsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows, unsigned int nElements); template void _cuda_multiplyColumnsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_multiplyColumnsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nElements); template void _cuda_multiplyColumnsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nColumns); /* * * divideColumnsByScalars * */ template<typename T> __global__ void __cuda_divideColumnsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; unsigned int colIndex = index / nRows; if (index < nElements) matrixDevPtr[index] = matrixDevPtr[index] / vectorDevPtr[colIndex]; } template<typename T> void _cuda_divideColumnsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nColumns){ // TODO parallelization without mod operator (mod is slow on GPU) unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_divideColumnsByScalars <<< gridSize , THREADS_PER_BLOCK >>> (vectorDevPtr, matrixDevPtr, nRows, nElements); } template __global__ void __cuda_divideColumnsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows, unsigned int nElements); template void _cuda_divideColumnsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_divideColumnsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nElements); template void _cuda_divideColumnsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nColumns); /* * * multiplyRowsByScalars * */ template<typename T> __global__ void __cuda_multiplyRowsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; unsigned int rowIndex = index % nRows; if (index < nElements) matrixDevPtr[index] = matrixDevPtr[index] * vectorDevPtr[rowIndex]; } template<typename T> void _cuda_multiplyRowsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nColumns){ // TODO parallelization without mod operator (mod is slow on GPU) unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_multiplyRowsByScalars <<< gridSize , THREADS_PER_BLOCK >>> (vectorDevPtr, matrixDevPtr, nRows, nElements); } template __global__ void __cuda_multiplyRowsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows,unsigned int nElements); template void _cuda_multiplyRowsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_multiplyRowsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nElements); template void _cuda_multiplyRowsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nColumns); /* * * divideRowsByScalars * */ template<typename T> __global__ void __cuda_divideRowsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; unsigned int rowIndex = index % nRows; if (index < nElements) matrixDevPtr[index] = matrixDevPtr[index] / vectorDevPtr[rowIndex]; } template<typename T> void _cuda_divideRowsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nColumns){ // TODO parallelization without mod operator (mod is slow on GPU) unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_divideRowsByScalars <<< gridSize , THREADS_PER_BLOCK >>> (vectorDevPtr, matrixDevPtr, nRows, nElements); } template __global__ void __cuda_divideRowsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows,unsigned int nElements); template void _cuda_divideRowsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_divideRowsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows,unsigned int nElements); template void _cuda_divideRowsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nColumns); /* * * fill * */ template<typename T> __global__ void __cuda_fill(T *data, T value, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) data[index] = value; } template<typename T> void _cuda_fill(T *data, T value, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_fill <<< gridSize , THREADS_PER_BLOCK >>> (data, value, nElements); } template __global__ void __cuda_fill<double>(double *data, double value, unsigned int nElements); template void _cuda_fill<double>(double *data, double value, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_fill<float>(float *data, float value, unsigned int nElements); template void _cuda_fill<float>(float *data, float value, unsigned int nRows, unsigned int nColumns); /* * * Average Pooling * */ template<typename T> __global__ void __cuda_avgPool(const T *source, T *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride, const T minValue) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; unsigned int resultWidth = ceil((float)sourceWidth/stride); unsigned int resultHeight = ceil((float)sourceHeight/stride); unsigned int resultRows = resultWidth * resultHeight * sourceChannels; if(index < (resultRows * sourceColumns)) { int imageNum = index / resultRows; int resultPixelNum = index % resultRows; int channelNum = resultPixelNum / (resultWidth * resultHeight); resultPixelNum %= (resultWidth * resultHeight); int resultPixelX = resultPixelNum / resultHeight; int resultPixelY = resultPixelNum % resultHeight; int sourcePixelX = resultPixelX * stride; int sourcePixelY = resultPixelY * stride; T sum = 0; T num = 0; int index = -1; for(int i=sourcePixelX; (i<(sourcePixelX+poolSize)) && (i<sourceWidth); i++) { for(int j=sourcePixelY; (j<(sourcePixelY + poolSize)) && (j<sourceHeight); j++) { index = imageNum * sourceRows + channelNum * (sourceWidth * sourceHeight) + i * sourceHeight + j; sum += source[index]; num += 1; } } int resultIndex = imageNum * resultRows + channelNum * (resultWidth * resultHeight) + resultPixelX * resultHeight + resultPixelY; result[resultIndex] = sum / (poolSize * poolSize);//num; } } template<typename T> void _cuda_avgPool(const T *source, T *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride) { unsigned int nElements = (int)ceil((float)sourceWidth/stride) * (int)ceil((float)sourceHeight/stride) * sourceChannels * sourceColumns; int gridSize = (int)ceil((float) nElements/THREADS_PER_BLOCK); __cuda_avgPool<<<gridSize, THREADS_PER_BLOCK>>>(source, result, sourceRows, sourceColumns, sourceWidth, sourceHeight, sourceChannels, poolSize, stride, std::numeric_limits<T>::min()); } template __global__ void __cuda_avgPool(const double *source, double *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride, double minValue); template __global__ void __cuda_avgPool(const float *source, float *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride, float minValue); template void _cuda_avgPool(const double *source, double *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); template void _cuda_avgPool(const float *source, float *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); /* * * Avg Pooling Backpropogation * */ template<typename T> __global__ void __cuda_backPropogateAvgPool(T *result, const T *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; unsigned int errorSignalWidth = ceil((double)sourceWidth/stride); unsigned int errorSignalHeight = ceil((double)sourceHeight/stride); unsigned int errorSignalRows = errorSignalWidth * errorSignalHeight * sourceChannels; if(index < (sourceRows * sourceColumns)) { int imageNum = index / sourceRows; int imagePixelNum = index % sourceRows; int channel = imagePixelNum / (sourceWidth * sourceHeight); imagePixelNum %= (sourceWidth * sourceHeight); int pixelX = imagePixelNum / sourceHeight; int pixelY = imagePixelNum % sourceHeight; int indexInErrorSignal = -1; //calculates start of the first grid containing current Pixel unsigned int gridStartX = (pixelX + 1 - (int)poolSize) < 0 ? 0 : (unsigned int)(ceil((float)(pixelX + 1 - (int)poolSize)/(float)stride) * stride); unsigned int gridStartY = (pixelY + 1 - (int)poolSize) < 0 ? 0 : (unsigned int)(ceil((float)(pixelY + 1 - (int)poolSize)/(float)stride) * stride); ////////////////////////////////// for(unsigned int gridX=gridStartX; gridX<=pixelX; gridX+=stride) { for(unsigned int gridY=gridStartY; gridY<=pixelY; gridY+=stride) { indexInErrorSignal = imageNum * errorSignalRows + channel * errorSignalHeight * errorSignalWidth + (gridX/stride) * errorSignalHeight + (gridY/stride); result[index] += errorSignal[indexInErrorSignal] / (T)(poolSize * poolSize); } } } } template<typename T> void _cuda_backPropogateAvgPool(T *result, const T *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride) { unsigned int nElements = sourceRows * sourceColumns; int gridSize = (int)ceil((float) nElements/THREADS_PER_BLOCK); __cuda_backPropogateAvgPool<<<gridSize, THREADS_PER_BLOCK>>>(result, errorSignal, sourceRows, sourceColumns, sourceWidth, sourceHeight, sourceChannels, poolSize, stride); } template __global__ void __cuda_backPropogateAvgPool(double *result, const double *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); template __global__ void __cuda_backPropogateAvgPool(float *result, const float *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); template void _cuda_backPropogateAvgPool(double *result, const double *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); template void _cuda_backPropogateAvgPool(float *result, const float *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); /* * * Max Pooling * */ template<typename T> __global__ void __cuda_maxPool(const T *source, T *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride, const T minValue) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; unsigned int resultWidth = ceil((float)sourceWidth/stride); unsigned int resultHeight = ceil((float)sourceHeight/stride); unsigned int resultRows = resultWidth * resultHeight * sourceChannels; if(index < (resultRows * sourceColumns)) { int imageNum = index / resultRows; int resultPixelNum = index % resultRows; int channelNum = resultPixelNum / (resultWidth * resultHeight); resultPixelNum %= (resultWidth * resultHeight); int resultPixelX = resultPixelNum / resultHeight; int resultPixelY = resultPixelNum % resultHeight; int sourcePixelX = resultPixelX * stride; int sourcePixelY = resultPixelY * stride; T maxValue = minValue; int index = -1; for(int i=sourcePixelX; (i<(sourcePixelX+poolSize)) && (i<sourceWidth); i++) { for(int j=sourcePixelY; (j<(sourcePixelY + poolSize)) && (j<sourceHeight); j++) { index = imageNum * sourceRows + channelNum * (sourceWidth * sourceHeight) + i * sourceHeight + j; if(source[index] >= maxValue) { maxValue = source[index]; } } } int resultIndex = imageNum * resultRows + channelNum * (resultWidth * resultHeight) + resultPixelX * resultHeight + resultPixelY; result[resultIndex] = maxValue; } } template<typename T> void _cuda_maxPool(const T *source, T *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride) { unsigned int nElements = (int)ceil((float)sourceWidth/stride) * (int)ceil((float)sourceHeight/stride) * sourceChannels * sourceColumns; int gridSize = (int)ceil((float) nElements/THREADS_PER_BLOCK); __cuda_maxPool<<<gridSize, THREADS_PER_BLOCK>>>(source, result, sourceRows, sourceColumns, sourceWidth, sourceHeight, sourceChannels, poolSize, stride, std::numeric_limits<T>::min()); } template __global__ void __cuda_maxPool(const double *source, double *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride, double minValue); template __global__ void __cuda_maxPool(const float *source, float *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride, float minValue); template void _cuda_maxPool(const double *source, double *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); template void _cuda_maxPool(const float *source, float *result, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); /* * * Max Pooling Backpropogation * */ template<typename T> __global__ void __cuda_backPropogateMaxPool(T *result, const T* activationIn, const T* activationOut, const T *errorSignal,const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; unsigned int errorSignalWidth = ceil((double)sourceWidth/stride); unsigned int errorSignalHeight = ceil((double)sourceHeight/stride); unsigned int errorSignalRows = errorSignalWidth * errorSignalHeight * sourceChannels; if(index < (sourceRows * sourceColumns)) { int imageNum = index / sourceRows; int imagePixelNum = index % sourceRows; int channel = imagePixelNum / (sourceWidth * sourceHeight); imagePixelNum %= (sourceWidth * sourceHeight); int pixelX = imagePixelNum / sourceHeight; int pixelY = imagePixelNum % sourceHeight; int indexInErrorSignal = -1; int numMaxima = 0; //calculates start of the first grid containing current Pixel unsigned int gridStartX = (pixelX + 1 - (int)poolSize) < 0 ? 0 : (unsigned int)(ceil((float)(pixelX + 1 - (int)poolSize)/(float)stride) * stride); unsigned int gridStartY = (pixelY + 1 - (int)poolSize) < 0 ? 0 : (unsigned int)(ceil((float)(pixelY + 1 - (int)poolSize)/(float)stride) * stride); ////////////////////////////////// for(unsigned int gridX=gridStartX; gridX<=pixelX; gridX+=stride) { for(unsigned int gridY=gridStartY; gridY<=pixelY; gridY+=stride) { indexInErrorSignal = imageNum * errorSignalRows + channel * errorSignalHeight * errorSignalWidth + (gridX/stride) * errorSignalHeight + (gridY/stride); //current pixel is not maximum in current window if(activationIn[index] != activationOut[indexInErrorSignal]) break; numMaxima = 0; for(unsigned int i=gridX; (i<(gridX + poolSize)) && i<sourceWidth; i++) { for(unsigned int j=gridY;(j<(gridY+poolSize)) && j<sourceHeight; j++) { int indexInActivationIn = imageNum * sourceRows + channel * sourceHeight * sourceWidth + i * sourceHeight + j; if(activationIn[indexInActivationIn] == activationOut[indexInErrorSignal]) { numMaxima += 1; } } } result[index] += errorSignal[indexInErrorSignal] / (T) numMaxima; } } } } template<typename T> void _cuda_backPropogateMaxPool(T *result, const T* activationIn, const T* activationOut, const T *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride) { unsigned int nElements = sourceRows * sourceColumns; int gridSize = (int)ceil((float) nElements/THREADS_PER_BLOCK); __cuda_backPropogateMaxPool<<<gridSize, THREADS_PER_BLOCK>>>(result, activationIn, activationOut, errorSignal, sourceRows, sourceColumns, sourceWidth, sourceHeight, sourceChannels, poolSize, stride); } template __global__ void __cuda_backPropogateMaxPool(double *result, const double* activationIn, const double* activationOut, const double *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); template __global__ void __cuda_backPropogateMaxPool(float *result, const float* activationIn, const float* activationOut, const float *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); template void _cuda_backPropogateMaxPool(double *result, const double* activationIn, const double* activationOut, const double *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); template void _cuda_backPropogateMaxPool(float *result, const float* activationIn, const float* activationOut, const float *errorSignal, const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride); /* * * ensure minimal value * */ template<typename T> __global__ void __cuda_ensureMinimalValue(T *data, T value, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if ((index < nElements) && (data[index] < value)) data[index] = value; } template<typename T> void _cuda_ensureMinimalValue(T *data, T value, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_ensureMinimalValue <<< gridSize , THREADS_PER_BLOCK >>> (data, value, nElements); } template __global__ void __cuda_ensureMinimalValue(double *data, double value, unsigned int nElements); template void _cuda_ensureMinimalValue(double *data, double value, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_ensureMinimalValue(float *data, float value, unsigned int nElements); template void _cuda_ensureMinimalValue(float *data, float value, unsigned int nRows, unsigned int nColumns); /* * * ensure maximal value * */ template<typename T> __global__ void __cuda_ensureMaximalValue(T *data, T value, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if ((index < nElements) && (data[index] > value)) data[index] = value; } template<typename T> void _cuda_ensureMaximalValue(T *data, T value, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_ensureMaximalValue <<< gridSize , THREADS_PER_BLOCK >>> (data, value, nElements); } template __global__ void __cuda_ensureMaximalValue(double *data, double value, unsigned int nElements); template void _cuda_ensureMaximalValue(double *data, double value, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_ensureMaximalValue(float *data, float value, unsigned int nElements); template void _cuda_ensureMaximalValue(float *data, float value, unsigned int nRows, unsigned int nColumns); /* * * prepares for convolution * */ template<typename T> __global__ void __cuda_prepareConvolution(T* dest, const T* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < destRows * destCols) { int imageNum = index / destRows; int resultIndex = index % destRows; int kernelMiddleX = kernelWidth / 2; int kernelMiddleY = kernelHeight / 2; int heightOfOneDestCh = (int)ceil((float)(sourceHeight - kernelHeight + 1) / (float)strideY); int pixelNum = resultIndex / (kernelHeight * kernelWidth * sourceChannels); int pixelX = (pixelNum / heightOfOneDestCh) * strideX + kernelMiddleX; int pixelY = (pixelNum % heightOfOneDestCh) * strideY + kernelMiddleY; int channelNum = resultIndex % (kernelHeight * kernelWidth * sourceChannels); int neighbNum = channelNum % (kernelHeight * kernelWidth); channelNum = channelNum / (kernelWidth * kernelHeight); int neighX = (neighbNum / kernelHeight) - kernelMiddleX; int neighY = (neighbNum % kernelHeight) - kernelMiddleY; dest[index] = source[imageNum * (sourceChannels * sourceWidth * sourceHeight) + channelNum * (sourceWidth * sourceHeight) + (pixelX + neighX) * sourceHeight + (pixelY + neighY)]; } } template<typename T> void _cuda_prepareConvolution(T* dest, const T* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY) { unsigned int nElements = destRows * destCols; int gridSize = (int)ceil((float)nElements/ THREADS_PER_BLOCK); __cuda_prepareConvolution<<<gridSize, THREADS_PER_BLOCK>>>(dest, source, sourceWidth, sourceHeight, sourceChannels, kernelWidth, kernelHeight, destRows, destCols, strideX, strideY); } template __global__ void __cuda_prepareConvolution(double* dest, const double* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); template __global__ void __cuda_prepareConvolution(float* dest, const float* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); template void _cuda_prepareConvolution(double* dest, const double* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); template void _cuda_prepareConvolution(float* dest, const float* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); /* * Backpropogation convolution * */ template<typename T> __global__ void __cuda_prepareConvolutionBackProp(T* dest, const T* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < destRows * destCols) { dest[index] = 0; int img = index / destRows; int ch = (index % destRows) / (destWidth * destHeight); int pixelNum = (index % destRows) % (destWidth * destHeight); int pixelX = pixelNum / destHeight; int pixelY = pixelNum % destHeight; int gridStartX = (pixelX + 1 - (int)kernelWidth) <= 0 ? 0 : (pixelX + 1 - (int)kernelWidth); int gridStartY = (pixelY + 1 - (int)kernelHeight) <= 0 ? 0 : (pixelY + 1 - (int)kernelHeight); int sourceHeight = (destHeight - (int)kernelHeight + 1); int sizeOfOneChSource = sourceHeight * (destWidth - (int)kernelWidth + 1); int neighNum = 0; for(int j=gridStartX; (j<=pixelX) && ((j + kernelWidth) <= destWidth); j++) { for(int k=gridStartY; (k<=pixelY) && ((k + kernelHeight) <= destHeight) ; k++) { // (Cx, Cy) = (j + kernelMiddleX, k + kernelMiddleY) are coordinates of center pixel in grid // (Rx, Ry) = (Cx - pixelX, Cy - pixelY) gives coordinates of pixel in refernce // to center pixel, such that center pixel of grid is mapped is mapped to (0,0) neighNum = (pixelX - j) * kernelHeight + (pixelY - k); //(j * sourceHeight + k) is pixel number of center of grid in source //i.e result of convolution dest[index] += source[img * sizeOfOneChSource * destChannels * kernelWidth * kernelHeight + (j * sourceHeight + k) * destChannels * kernelWidth * kernelHeight + ch * kernelWidth * kernelHeight + neighNum]; } } } } template<typename T> void _cuda_prepareConvolutionBackProp(T* dest, const T* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols) { unsigned int nElements = destRows * destCols; int gridSize = (int)ceil((float)nElements/ THREADS_PER_BLOCK); __cuda_prepareConvolutionBackProp<<<gridSize, THREADS_PER_BLOCK>>>(dest, source, destWidth, destHeight, destChannels, kernelWidth, kernelHeight, destRows, destCols); } template void _cuda_prepareConvolutionBackProp(double* dest, const double* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols); template void _cuda_prepareConvolutionBackProp(float* dest, const float* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols); template __global__ void __cuda_prepareConvolutionBackProp(double* dest, const double* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols); template __global__ void __cuda_prepareConvolutionBackProp(float* dest, const float* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols); /* * prepare for convolution such that after convolution image size stays same * * */ template<typename T> __global__ void __cuda_prepareConvolutionSame(T* dest, const T* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < destRows * destCols) { int destWidth = (int)ceil((float)sourceWidth / (float)strideX); int destHeight = (int)ceil((float)sourceHeight / (float)strideY); int imageNum = index / destRows; int resultIndex = index % destRows; int kernelMiddleX = kernelWidth / 2; int kernelMiddleY = kernelHeight / 2; int pixelNum = resultIndex / (kernelHeight * kernelWidth * sourceChannels); int pixelX = (pixelNum / destHeight) * strideX; int pixelY = (pixelNum % destHeight) * strideY; int channelNum = resultIndex % (kernelHeight * kernelWidth * sourceChannels); int neighbNum = channelNum % (kernelHeight * kernelWidth); channelNum = channelNum / (kernelWidth * kernelHeight); int neighX = (neighbNum / kernelHeight) - kernelMiddleX; int neighY = (neighbNum % kernelHeight) - kernelMiddleY; dest[index] = ( (pixelX + neighX) < 0 || (pixelY + neighY) < 0 || (pixelX + neighX) >= sourceWidth || (pixelY + neighY) >= sourceHeight) ? 0 : source[imageNum * (sourceChannels * sourceWidth * sourceHeight) + channelNum * (sourceWidth * sourceHeight) + (pixelX + neighX) * sourceHeight + (pixelY + neighY)]; } } template<typename T> void _cuda_prepareConvolutionSame(T* dest, const T* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY) { unsigned int nElements = destRows * destCols; int gridSize = (int)ceil((float)nElements/ THREADS_PER_BLOCK); __cuda_prepareConvolutionSame<<<gridSize, THREADS_PER_BLOCK>>>(dest, source, sourceWidth, sourceHeight, sourceChannels, kernelWidth, kernelHeight, destRows, destCols, strideX, strideY); } template __global__ void __cuda_prepareConvolutionSame(double* dest, const double* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); template __global__ void __cuda_prepareConvolutionSame(float* dest, const float* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); template void _cuda_prepareConvolutionSame(double* dest, const double* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); template void _cuda_prepareConvolutionSame(float* dest, const float* source, const unsigned int sourceWidth, const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); template<typename T> __global__ void __cuda_prepareConvolutionSameBackProp(T* dest, const T* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < (destRows * destCols)) { dest[index] = 0; int img = index / destRows; // destRows = destWidth * destHeight * destChannels int ch = (index % destRows) / (destWidth * destHeight); int pixelNum = (index % destRows) % (destWidth * destHeight); int pixelX = pixelNum / destHeight; int pixelY = pixelNum % destHeight; int kernelMiddleX = (int)kernelWidth / 2; int kernelMiddleY = (int)kernelHeight / 2; int gridStartX = (pixelX + 1 - (int)kernelWidth) <= (-1 * kernelMiddleX) ? (-1 * kernelMiddleX) : (pixelX + 1 - (int)kernelWidth); int gridStartY = (pixelY + 1 - (int)kernelHeight) <= (-1 * kernelMiddleY) ? (-1 * kernelMiddleY) : (pixelY + 1 - (int)kernelHeight); for(int gridX=gridStartX; (gridX <= pixelX) && ((gridX + kernelMiddleX) < destWidth) ; gridX++) { if (((gridX + kernelMiddleX) % strideX) == 0) { for(int gridY=gridStartY; (gridY <= pixelY) && ((gridY + kernelMiddleY) < destHeight); gridY++) { if (((gridY + kernelMiddleY) % strideY) == 0) { int neighNum = (pixelX - gridX) * kernelHeight + (pixelY - gridY); int centerPixel = (((gridX + kernelMiddleX) / strideX) * destHeight / strideY) + (gridY + kernelMiddleY) / strideY; dest[index] += source[img * destChannels * (destWidth / strideX) * (destHeight / strideY) * kernelWidth * kernelHeight + centerPixel * destChannels * kernelWidth * kernelHeight + ch * kernelWidth * kernelHeight + neighNum]; } } } } } } template<typename T> void _cuda_prepareConvolutionSameBackProp(T* dest, const T* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY) { unsigned int nElements = destRows * destCols; int gridSize = (int)ceil((float)nElements/ THREADS_PER_BLOCK); __cuda_prepareConvolutionSameBackProp<<<gridSize, THREADS_PER_BLOCK>>>(dest, source, destWidth, destHeight, destChannels, kernelWidth, kernelHeight, destRows, destCols, strideX, strideY); } template void _cuda_prepareConvolutionSameBackProp(double* dest, const double* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); template void _cuda_prepareConvolutionSameBackProp(float* dest, const float* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); template __global__ void __cuda_prepareConvolutionSameBackProp(double* dest, const double* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); template __global__ void __cuda_prepareConvolutionSameBackProp(float* dest, const float* source, const unsigned int destWidth, const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth, const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY); /* * rearrange * * helper for convolution */ template<typename T> __global__ void __cuda_rearrange(T *dest, const T *source, const unsigned int sourceRows, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < (destRows * destColumns)) { unsigned int img = index / (sourceRows * destNumPixels); unsigned int ch = (index % (sourceRows * destNumPixels)) / destNumPixels; unsigned int pix = (index % (sourceRows * destNumPixels)) % destNumPixels; dest[index] = source[sourceRows * (img * destNumPixels + pix) + ch]; } } template<typename T> void _cuda_rearrange(T *dest, const T *source, const unsigned int sourceRows, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels) { unsigned int nElements = destColumns * destRows; int gridSize = (int)ceil((float)nElements/ THREADS_PER_BLOCK); __cuda_rearrange<<<gridSize, THREADS_PER_BLOCK>>>(dest, source, sourceRows, destRows, destColumns, destNumPixels); } template __global__ void __cuda_rearrange(double *dest, const double *source, const unsigned int sourceRows, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels); template __global__ void __cuda_rearrange(float *dest, const float *source, const unsigned int sourceRows, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels); template void _cuda_rearrange(double *dest, const double *source, const unsigned int sourceRows, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels); template void _cuda_rearrange(float *dest, const float *source, const unsigned int sourceRows, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels); /* * * Rearrange back propogation * */ template<typename T> __global__ void __cuda_rearrangeBackProp(T *dest, const T *source, const unsigned int sourceColumns, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < (destRows * destColumns)) { unsigned int img = index / (destNumPixels * destRows); unsigned int pix = (index % (destNumPixels * destRows)) / destRows; unsigned int ch = (index % (destNumPixels * destRows)) % destRows; dest[index] = source[img*(destRows * destNumPixels) + ch * destNumPixels + pix]; } } template<typename T> void _cuda_rearrangeBackProp(T *dest, const T *source, const unsigned int sourceColumns, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels) { unsigned int nElements = destRows * destColumns; int gridSize = (int)ceil((float)nElements/THREADS_PER_BLOCK); __cuda_rearrangeBackProp<<<gridSize, THREADS_PER_BLOCK>>>(dest, source, sourceColumns, destRows, destColumns, destNumPixels); } template __global__ void __cuda_rearrangeBackProp(double *dest, const double *source, const unsigned int sourceColumns, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels); template __global__ void __cuda_rearrangeBackProp(float *dest, const float *source, const unsigned int sourceColumns, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels); template void _cuda_rearrangeBackProp(double *dest, const double *source, const unsigned int sourceColumns, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels); template void _cuda_rearrangeBackProp(float *dest, const float *source, const unsigned int sourceColumns, const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels); /* * * argMax * * */ template<typename T> __global__ void __cuda_argMax(T *matrixPtr, unsigned int nRows, unsigned int nColumns, unsigned int *resultDevPtr){ unsigned int column= threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumns){ int beginCol = column * nRows; T maxVal = matrixPtr[beginCol]; resultDevPtr[column] = 0; for (int i = 1; i < nRows; i++){ T val = matrixPtr[beginCol + i]; if (val > maxVal){ maxVal = val; resultDevPtr[column] = i; } } } } template<typename T> void _cuda_argMax(T *matrixPtr, unsigned int nRows, unsigned int nColumns, unsigned int *resultDevPtr) { // parallelization over columns only int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); __cuda_argMax <<< gridSize, THREADS_PER_BLOCK>>> (matrixPtr, nRows, nColumns, resultDevPtr); } template __global__ void __cuda_argMax<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, unsigned int *resultDevPtr); template void _cuda_argMax<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, unsigned int *resultDevPtr); template __global__ void __cuda_argMax<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, unsigned int *resultDevPtr); template void _cuda_argMax<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, unsigned int *resultDevPtr); /* * * max * set max per column to 1.0, all other to 0.0 * */ template<typename T> __global__ void __cuda_max(T *devResult, unsigned int nRows, unsigned int nColumns){ unsigned int column = threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumns) { unsigned int argMax = 0; T max = devResult[column * nRows]; for (int i = 0; i < nRows; i++) { if (devResult[column * nRows + i] > max) { max = devResult[column * nRows + i]; argMax = i; } devResult[column * nRows + i] = 0.0; } devResult[column * nRows + argMax] = 1.0; } } template<typename T> void _cuda_max(T *devResult, unsigned int nRows, unsigned int nColumns) { int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); __cuda_max <<< gridSize, THREADS_PER_BLOCK>>> (devResult, nRows, nColumns); } template __global__ void __cuda_max<double>(double *devResult, unsigned int nRows, unsigned int nColumns); template void _cuda_max<double>(double *devResult, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_max<float>(float *devResult, unsigned int nRows, unsigned int nColumns); template void _cuda_max<float>(float *devResult, unsigned int nRows, unsigned int nColumns); /* * * max * * */ template<typename T> __global__ void __cuda_max(T *devResult, const T *devA, const T *devB, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) { if (devA[index] < devB[index]) devResult[index] = devB[index]; else devResult[index] = devA[index]; } } template<typename T> void _cuda_max(T *devResult, const T *devA, const T *devB, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_max <<< gridSize, THREADS_PER_BLOCK>>> (devResult, devA, devB, nElements); } template __global__ void __cuda_max<double>(double *devResult, const double *devA, const double *devB, unsigned int nElements); template void _cuda_max<double>(double *devResult, const double *devA, const double *devB, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_max<float>(float *devResult, const float *devA, const float *devB, unsigned int nElements); template void _cuda_max<float>(float *devResult, const float *devA, const float *devB, unsigned int nRows, unsigned int nColumns); /* * * elementwiseMultiplicationWithKroneckerDelta * * */ template<typename T> __global__ void __cuda_elementwiseMultiplicationWithKroneckerDelta(T *devResult, const T *devA, const T *devB, unsigned int nElements){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nElements) { if (devA[index] != devB[index]) devResult[index] = 0; } } template<typename T> void _cuda_elementwiseMultiplicationWithKroneckerDelta(T *devResult, const T *devA, const T *devB, unsigned int nRows, unsigned int nColumns) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_elementwiseMultiplicationWithKroneckerDelta <<< gridSize, THREADS_PER_BLOCK>>> (devResult, devA, devB, nElements); } template __global__ void __cuda_elementwiseMultiplicationWithKroneckerDelta<double>(double *devResult, const double *devA, const double *devB, unsigned int nElements); template void _cuda_elementwiseMultiplicationWithKroneckerDelta<double>(double *devResult, const double *devA, const double *devB, unsigned int nRows, unsigned int nColumns); template __global__ void __cuda_elementwiseMultiplicationWithKroneckerDelta<float>(float *devResult, const float *devA, const float *devB, unsigned int nElements); template void _cuda_elementwiseMultiplicationWithKroneckerDelta<float>(float *devResult, const float *devA, const float *devB, unsigned int nRows, unsigned int nColumns); /* * * nClassificationErrors * * */ template<typename T> __global__ void __cuda_nClassificationErrors(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, unsigned int *resultDevPtr){ unsigned int column= threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumns){ int beginCol = column * nRows; T maxVal = matrixPtr[beginCol]; uint argmax = 0; for (int i = 1; i < nRows; i++){ T val = matrixPtr[beginCol + i]; if (val > maxVal){ maxVal = val; argmax = i; } } if (targets[nRows * column + argmax] != 1.0){ atomicAdd(resultDevPtr, 1); } } } template<typename T> void _cuda_nClassificationErrors(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, unsigned int *resultDevPtr) { // parallelization over columns only int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); unsigned int result = 0; cudaMemcpy(resultDevPtr, &result, sizeof(unsigned int), cudaMemcpyHostToDevice); __cuda_nClassificationErrors <<< gridSize, THREADS_PER_BLOCK>>> (matrixPtr, nRows, nColumns, targets, resultDevPtr); } template __global__ void __cuda_nClassificationErrors<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, unsigned int *resultDevPtr); template void _cuda_nClassificationErrors<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, unsigned int *resultDevPtr); template __global__ void __cuda_nClassificationErrors<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, unsigned int *resultDevPtr); template void _cuda_nClassificationErrors<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, unsigned int *resultDevPtr); // crossEntropyObjectiveFunction template<typename T> __global__ void __cuda_crossEntropyObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn){ unsigned int column= threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumns){ objFctn[column] = 0; for (int i = 0; i < nRows; i++){ if (targets[nRows * column + i] == 1.0) objFctn[column] -= log(matrixPtr[nRows * column + i]); } } } template<typename T> void _cuda_crossEntropyObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T* targets, T *objFctn) { // parallelization over columns only int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); __cuda_crossEntropyObjectiveFunction <<< gridSize , THREADS_PER_BLOCK >>> (matrixPtr, nRows, nColumns, targets, objFctn); } template __global__ void __cuda_crossEntropyObjectiveFunction<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn); template void _cuda_crossEntropyObjectiveFunction<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn); template __global__ void __cuda_crossEntropyObjectiveFunction<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn); template void _cuda_crossEntropyObjectiveFunction<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn); // weightedCrossEntropyObjectiveFunction template<typename T> __global__ void __cuda_weightedCrossEntropyObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn, T *weights){ unsigned int column= threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumns){ objFctn[column] = 0; for (int i = 0; i < nRows; i++){ if (targets[nRows * column + i] == 1.0) objFctn[column] -= log(matrixPtr[nRows * column + i]) * weights[column]; } } } template<typename T> void _cuda_weightedCrossEntropyObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn, T *weights) { // parallelization over columns only int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); __cuda_weightedCrossEntropyObjectiveFunction <<< gridSize , THREADS_PER_BLOCK >>> (matrixPtr, nRows, nColumns, targets, objFctn, weights); } template __global__ void __cuda_weightedCrossEntropyObjectiveFunction<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn, double *weights); template void _cuda_weightedCrossEntropyObjectiveFunction<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn, double *weights); template __global__ void __cuda_weightedCrossEntropyObjectiveFunction<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn, float *weights); template void _cuda_weightedCrossEntropyObjectiveFunction<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn, float *weights); // squaredErrorObjectiveFunction template<typename T> __global__ void __cuda_squaredErrorObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn){ unsigned int column = threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumns){ objFctn[column] = 0.0f; for (int row = 0; row < nRows; row++){ unsigned int position = column * nRows + row; objFctn[column] += (matrixPtr[position] - targets[position]) * (matrixPtr[position] - targets[position]); } } } template<typename T> void _cuda_squaredErrorObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn) { int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); __cuda_squaredErrorObjectiveFunction <<< gridSize , THREADS_PER_BLOCK >>> (matrixPtr, nRows, nColumns, targets, objFctn); } template __global__ void __cuda_squaredErrorObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn); template void _cuda_squaredErrorObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn); template __global__ void __cuda_squaredErrorObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn); template void _cuda_squaredErrorObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn); // weightedSquaredErrorObjectiveFunction template<typename T> __global__ void __cuda_weightedSquaredErrorObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn, T *weights){ unsigned int column = threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumns){ objFctn[column] = 0.0f; for (int row = 0; row < nRows; row++){ unsigned int position = column * nRows + row; objFctn[column] += (matrixPtr[position] - targets[position]) * (matrixPtr[position] - targets[position]); } objFctn[column] *= weights[column]; } } template<typename T> void _cuda_weightedSquaredErrorObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn, T *weights) { int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); __cuda_weightedSquaredErrorObjectiveFunction <<< gridSize , THREADS_PER_BLOCK >>> (matrixPtr, nRows, nColumns, targets, objFctn, weights); } template __global__ void __cuda_weightedSquaredErrorObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn, double *weights); template void _cuda_weightedSquaredErrorObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn, double *weights); template __global__ void __cuda_weightedSquaredErrorObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn, float *weights); template void _cuda_weightedSquaredErrorObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn, float *weights); // smoothedL1ObjectiveFunction template<typename T> __global__ void __cuda_smoothedL1ObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn){ unsigned int column = threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumns){ objFctn[column] = 0.0f; for (int row = 0; row < nRows; row++){ unsigned int position = column * nRows + row; if ((matrixPtr[position] - targets[position]) < -1.0) objFctn[column] += (targets[position] - matrixPtr[position]) - 0.5; else if ((matrixPtr[position] - targets[position]) > 1.0) objFctn[column] += (matrixPtr[position] - targets[position]) - 0.5; else objFctn[column] += 0.5 * (matrixPtr[position] - targets[position]) * (matrixPtr[position] - targets[position]); } } } template<typename T> void _cuda_smoothedL1ObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn) { int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); __cuda_smoothedL1ObjectiveFunction <<< gridSize , THREADS_PER_BLOCK >>> (matrixPtr, nRows, nColumns, targets, objFctn); } template __global__ void __cuda_smoothedL1ObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn); template void _cuda_smoothedL1ObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn); template __global__ void __cuda_smoothedL1ObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn); template void _cuda_smoothedL1ObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn); // weightedSmoothedL1ObjectiveFunction template<typename T> __global__ void __cuda_weightedSmoothedL1ObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T* weights, T *objFctn){ unsigned int column = threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumns){ objFctn[column] = 0.0f; for (int row = 0; row < nRows; row++){ unsigned int position = column * nRows + row; if ((matrixPtr[position] - targets[position]) < -1.0) objFctn[column] += (targets[position] - matrixPtr[position]) - 0.5; else if ((matrixPtr[position] - targets[position]) > 1.0) objFctn[column] += (matrixPtr[position] - targets[position]) - 0.5; else objFctn[column] += 0.5 * (matrixPtr[position] - targets[position]) * (matrixPtr[position] - targets[position]); } objFctn[column] *= weights[column]; } } template<typename T> void _cuda_weightedSmoothedL1ObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T* weights, T *objFctn) { int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK); __cuda_weightedSmoothedL1ObjectiveFunction <<< gridSize , THREADS_PER_BLOCK >>> (matrixPtr, nRows, nColumns, targets, weights, objFctn); } template __global__ void __cuda_weightedSmoothedL1ObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn, double *weights); template void _cuda_weightedSmoothedL1ObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn, double *weights); template __global__ void __cuda_weightedSmoothedL1ObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn, float *weights); template void _cuda_weightedSmoothedL1ObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn, float *weights); /* * appendSecondOrderFeatures */ template<typename T> __global__ void __cuda_appendSecondOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){ unsigned int column = threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumnsX){ unsigned int pos = offset; for (unsigned int i = 0; i < nRowsX; ++ i) { for (unsigned int j = i; j < nRowsX; ++ j) { Y[column * nRowsY + pos] = X[column * nRowsX + i] * X[column * nRowsX + j]; pos++; } } } } template<typename T> void _cuda_appendSecondOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){ int gridSize = (int)ceil( (float) nColumnsX/THREADS_PER_BLOCK); __cuda_appendSecondOrderFeatures <<< gridSize , THREADS_PER_BLOCK >>> (X, nRowsX, nColumnsX, Y, nRowsY, offset); } template __global__ void __cuda_appendSecondOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset); template void _cuda_appendSecondOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset); template __global__ void __cuda_appendSecondOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset); template void _cuda_appendSecondOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset); /* * appendDiagonalSecondOrderFeatures */ template<typename T> __global__ void __cuda_appendDiagonalSecondOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){ unsigned int column = threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumnsX){ unsigned int pos = offset; for (unsigned int i = 0; i < nRowsX; ++ i) { Y[column * nRowsY + pos] = X[column * nRowsX + i] * X[column * nRowsX + i]; pos++; } } } template<typename T> void _cuda_appendDiagonalSecondOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){ int gridSize = (int)ceil( (float) nColumnsX/THREADS_PER_BLOCK); __cuda_appendDiagonalSecondOrderFeatures <<< gridSize , THREADS_PER_BLOCK >>> (X, nRowsX, nColumnsX, Y, nRowsY, offset); } template __global__ void __cuda_appendDiagonalSecondOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset); template void _cuda_appendDiagonalSecondOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset); template __global__ void __cuda_appendDiagonalSecondOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset); template void _cuda_appendDiagonalSecondOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset); // appendThirdOrderFeatures template<typename T> __global__ void __cuda_appendThirdOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){ unsigned int column = threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumnsX){ unsigned int pos = offset; for (unsigned int i = 0; i < nRowsX; ++ i) { for (unsigned int j = i; j < nRowsX; ++ j) { for (unsigned int k = j; k < nRowsX; ++ k) { Y[column * nRowsY + pos] = X[column * nRowsX + i] * X[column * nRowsX + j] * X[column * nRowsX + k]; pos++; } } } } } template<typename T> void _cuda_appendThirdOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){ int gridSize = (int)ceil( (float) nColumnsX/THREADS_PER_BLOCK); __cuda_appendThirdOrderFeatures <<< gridSize , THREADS_PER_BLOCK >>> (X, nRowsX, nColumnsX, Y, nRowsY, offset); } template __global__ void __cuda_appendThirdOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset); template void _cuda_appendThirdOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset); template __global__ void __cuda_appendThirdOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset); template void _cuda_appendThirdOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset); // appendDiagonalThirdOrderFeatures template<typename T> __global__ void __cuda_appendDiagonalThirdOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){ unsigned int column = threadIdx.x + blockIdx.x * blockDim.x; if (column < nColumnsX){ unsigned int pos = offset; for (unsigned int i = 0; i < nRowsX; ++ i) { Y[column * nRowsY + pos] = X[column * nRowsX + i] * X[column * nRowsX + i] * X[column * nRowsX + i]; pos++; } } } template<typename T> void _cuda_appendDiagonalThirdOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){ int gridSize = (int)ceil( (float) nColumnsX/THREADS_PER_BLOCK); __cuda_appendDiagonalThirdOrderFeatures <<< gridSize , THREADS_PER_BLOCK >>> (X, nRowsX, nColumnsX, Y, nRowsY, offset); } template __global__ void __cuda_appendDiagonalThirdOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset); template void _cuda_appendDiagonalThirdOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset); template __global__ void __cuda_appendDiagonalThirdOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset); template void _cuda_appendDiagonalThirdOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset); /* * * gaussianMixturePosteriors * computes unnormalized, unexponentiated Gaussian mixture posteriors * -> p(c|x) can be obtained with application of softmax on the result of this function * */ template<typename T> __global__ void __cuda_gaussianMixturePosteriors(T *P, const T *X, const T *means, const T *variances, const T *weights, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nFeatures * nMixtures) { unsigned int k = index % nMixtures; unsigned int n = index / nMixtures; T expn = 0; T det = 0; for (unsigned int d = 0; d < featureDim; d++) { expn += (X[n * featureDim + d] - means[d * nMixtures + k]) * (X[n * featureDim + d] - means[d * nMixtures + k]) / variances[d * nMixtures + k]; det += log(variances[d * nMixtures + k]); } P[index] = log(weights[k]) - 0.5 * expn - 0.5 * log(2 * CUDART_PI) * featureDim - 0.5 * det; } } template<typename T> void _cuda_gaussianMixturePosteriors(T *P, const T *X, const T *means, const T *variances, const T *weights, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures) { unsigned int nElements = nFeatures * nMixtures; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_gaussianMixturePosteriors <<< gridSize , THREADS_PER_BLOCK >>> (P, X, means, variances, weights, nFeatures, featureDim, nMixtures); } template __global__ void __cuda_gaussianMixturePosteriors(double *P, const double *X, const double *means, const double *variances, const double *weights, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures); template void _cuda_gaussianMixturePosteriors(double *P, const double *X, const double *means, const double *variances, const double *weights, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures); template __global__ void __cuda_gaussianMixturePosteriors(float *P, const float *X, const float *means, const float *variances, const float *weights, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures); template void _cuda_gaussianMixturePosteriors(float *P, const float *X, const float *means, const float *variances, const float *weights, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures); /* * * fisher encoding * */ template<typename T> __global__ void __cuda_fisherEncoding(T *F, const T *X, const T *means, const T *variances, const T *weights, const T* gamma, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nFeatures * nMixtures * featureDim) { unsigned int n = index / (nMixtures * featureDim); unsigned int k = (index % (nMixtures * featureDim)) / featureDim; unsigned int d = (index % (nMixtures * featureDim)) % featureDim; // first order component F[d + k * featureDim + n * featureDim * nMixtures * 2] = gamma[k + n * nMixtures] * (X[d + n * featureDim] - means[k + d * nMixtures]) / sqrt(variances[k + d * nMixtures] * weights[k]); // second order component F[d + (k + nMixtures) * featureDim + n * featureDim * nMixtures * 2] = gamma[k + n * nMixtures] * ( (X[d + n * featureDim] - means[k + d * nMixtures]) * (X[d + n * featureDim] - means[k + d * nMixtures]) / variances[k + d * nMixtures] - 1.0 ) / sqrt(2 * weights[k]); } } template<typename T> void _cuda_fisherEncoding(T *F, const T *X, const T *means, const T *variances, const T *weights, const T *gamma, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures) { unsigned int nElements = nFeatures * nMixtures * featureDim; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_fisherEncoding <<< gridSize , THREADS_PER_BLOCK >>> (F, X, means, variances, weights, gamma, nFeatures, featureDim, nMixtures); } template __global__ void __cuda_fisherEncoding(double *F, const double *X, const double *means, const double *variances, const double *weights, const double *gamma, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures); template void _cuda_fisherEncoding(double *F, const double *X, const double *means, const double *variances, const double *weights, const double *gamma, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures); template __global__ void __cuda_fisherEncoding(float *F, const float *X, const float *means, const float *variances, const float *weights, const float *gamma, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures); template void _cuda_fisherEncoding(float *F, const float *X, const float *means, const float *variances, const float *weights, const float *gamma, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures); /* * * dropout * */ template<typename T> __global__ void __cuda_dropout(T *data, const T *mask, unsigned int nElements, T dropoutProbability){ unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if ((index < nElements) && (mask[index] < dropoutProbability)) data[index] = 0.0; } template<typename T> void _cuda_dropout(T *data, const T *mask, unsigned int nRows, unsigned int nColumns, T dropoutProbability) { unsigned int nElements = nRows * nColumns; int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK); __cuda_dropout <<< gridSize , THREADS_PER_BLOCK >>> (data, mask, nElements, dropoutProbability); } template __global__ void __cuda_dropout(double *data, const double *mask, unsigned int nElements, double dropoutProbability); template void _cuda_dropout(double *data, const double *mask, unsigned int nRows, unsigned int nColumns, double dropoutProbability); template __global__ void __cuda_dropout(float *data, const float *mask, unsigned int nElements, float dropoutProbability); template void _cuda_dropout(float *data, const float *mask, unsigned int nRows, unsigned int nColumns, float dropoutProbability);
80ed54063da4bcbb4cfd410ed2c5e35765da59a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <cuda_utils.cuh> #include <distance/fused_l2_nn.cuh> #include <linalg/norm.cuh> #include <random/rng.cuh> #include "test_utils.h" namespace MLCommon { namespace Distance { template <typename DataT, bool Sqrt, typename ReduceOpT, int NWARPS> __global__ void naiveKernel(hipcub::KeyValuePair<int, DataT> *min, DataT *x, DataT *y, int m, int n, int k, int *workspace, DataT maxVal) { int midx = threadIdx.y + blockIdx.y * blockDim.y; int nidx = threadIdx.x + blockIdx.x * blockDim.x; DataT acc = DataT(0); for (int i = 0; i < k; ++i) { int xidx = i + midx * k; int yidx = i + nidx * k; auto diff = midx >= m || nidx >= n ? DataT(0) : x[xidx] - y[yidx]; acc += diff * diff; } if (Sqrt) { acc = raft::mySqrt(acc); } ReduceOpT redOp; typedef hipcub::WarpReduce<hipcub::KeyValuePair<int, DataT>> WarpReduce; __shared__ typename WarpReduce::TempStorage temp[NWARPS]; int warpId = threadIdx.x / raft::WarpSize; hipcub::KeyValuePair<int, DataT> tmp; tmp.key = nidx; tmp.value = midx >= m || nidx >= n ? maxVal : acc; tmp = WarpReduce(temp[warpId]).Reduce(tmp, KVPMinReduce<int, DataT>()); if (threadIdx.x % raft::WarpSize == 0 && midx < m) { while (atomicCAS(workspace + midx, 0, 1) == 1) ; __threadfence(); redOp(min + midx, tmp); __threadfence(); atomicCAS(workspace + midx, 1, 0); } } template <typename DataT, bool Sqrt> void naive(hipcub::KeyValuePair<int, DataT> *min, DataT *x, DataT *y, int m, int n, int k, int *workspace, hipStream_t stream) { static const dim3 TPB(32, 16, 1); dim3 nblks(raft::ceildiv(n, (int)TPB.x), raft::ceildiv(m, (int)TPB.y), 1); CUDA_CHECK(hipMemsetAsync(workspace, 0, sizeof(int) * m, stream)); auto blks = raft::ceildiv(m, 256); MinAndDistanceReduceOp<int, DataT> op; hipLaunchKernelGGL(( initKernel<DataT, hipcub::KeyValuePair<int, DataT>, int>) , dim3(blks), dim3(256), 0, stream, min, m, std::numeric_limits<DataT>::max(), op); CUDA_CHECK(hipGetLastError()); hipLaunchKernelGGL(( naiveKernel<DataT, Sqrt, MinAndDistanceReduceOp<int, DataT>, 16>) , dim3(nblks), dim3(TPB), 0, stream, min, x, y, m, n, k, workspace, std::numeric_limits<DataT>::max()); CUDA_CHECK(hipGetLastError()); } template <typename DataT> struct Inputs { DataT tolerance; int m, n, k; unsigned long long int seed; }; template <typename DataT, bool Sqrt> class FusedL2NNTest : public ::testing::TestWithParam<Inputs<DataT>> { public: void SetUp() override { params = ::testing::TestWithParam<Inputs<DataT>>::GetParam(); raft::random::Rng r(params.seed); int m = params.m; int n = params.n; int k = params.k; CUDA_CHECK(hipStreamCreate(&stream)); raft::allocate(x, m * k); raft::allocate(y, n * k); raft::allocate(xn, m); raft::allocate(yn, n); raft::allocate(workspace, sizeof(int) * m); raft::allocate(min, m); raft::allocate(min_ref, m); r.uniform(x, m * k, DataT(-1.0), DataT(1.0), stream); r.uniform(y, n * k, DataT(-1.0), DataT(1.0), stream); generateGoldenResult(); LinAlg::rowNorm(xn, x, k, m, LinAlg::L2Norm, true, stream); LinAlg::rowNorm(yn, y, k, n, LinAlg::L2Norm, true, stream); } void TearDown() override { CUDA_CHECK(hipStreamSynchronize(stream)); CUDA_CHECK(hipStreamDestroy(stream)); CUDA_CHECK(hipFree(x)); CUDA_CHECK(hipFree(y)); CUDA_CHECK(hipFree(xn)); CUDA_CHECK(hipFree(yn)); CUDA_CHECK(hipFree(workspace)); CUDA_CHECK(hipFree(min_ref)); CUDA_CHECK(hipFree(min)); } protected: Inputs<DataT> params; DataT *x, *y, *xn, *yn; char *workspace; hipcub::KeyValuePair<int, DataT> *min, *min_ref; hipStream_t stream; virtual void generateGoldenResult() { int m = params.m; int n = params.n; int k = params.k; naive<DataT, Sqrt>(min_ref, x, y, m, n, k, (int *)workspace, stream); } void runTest(hipcub::KeyValuePair<int, DataT> *out) { int m = params.m; int n = params.n; int k = params.k; MinAndDistanceReduceOp<int, DataT> redOp; fusedL2NN<DataT, hipcub::KeyValuePair<int, DataT>, int>( out, x, y, xn, yn, m, n, k, (void *)workspace, redOp, Sqrt, true, stream); CUDA_CHECK(hipStreamSynchronize(stream)); } }; template <typename T> struct CompareApproxAbsKVP { typedef typename hipcub::KeyValuePair<int, T> KVP; CompareApproxAbsKVP(T eps_) : eps(eps_) {} bool operator()(const KVP &a, const KVP &b) const { if (a.key != b.key) return false; T diff = raft::abs(raft::abs(a.value) - raft::abs(b.value)); T m = ::max(raft::abs(a.value), raft::abs(b.value)); T ratio = m >= eps ? diff / m : diff; return (ratio <= eps); } private: T eps; }; template <typename T> struct CompareExactKVP { typedef typename hipcub::KeyValuePair<int, T> KVP; bool operator()(const KVP &a, const KVP &b) const { if (a.key != b.key) return false; if (a.value != b.value) return false; return true; } }; template <typename K, typename V, typename L> ::testing::AssertionResult devArrMatch(const hipcub::KeyValuePair<K, V> *expected, const hipcub::KeyValuePair<K, V> *actual, size_t size, L eq_compare, hipStream_t stream = 0) { typedef typename hipcub::KeyValuePair<K, V> KVP; std::shared_ptr<KVP> exp_h(new KVP[size]); std::shared_ptr<KVP> act_h(new KVP[size]); raft::update_host<KVP>(exp_h.get(), expected, size, stream); raft::update_host<KVP>(act_h.get(), actual, size, stream); CUDA_CHECK(hipStreamSynchronize(stream)); for (size_t i(0); i < size; ++i) { auto exp = exp_h.get()[i]; auto act = act_h.get()[i]; if (!eq_compare(exp, act)) { return ::testing::AssertionFailure() << "actual=" << act.key << "," << act.value << " != expected=" << exp.key << "," << exp.value << " @" << i; } } return ::testing::AssertionSuccess(); } const std::vector<Inputs<float>> inputsf = { {0.001f, 32, 32, 32, 1234ULL}, {0.001f, 32, 64, 32, 1234ULL}, {0.001f, 64, 32, 32, 1234ULL}, {0.001f, 64, 64, 32, 1234ULL}, {0.001f, 128, 32, 32, 1234ULL}, {0.001f, 128, 64, 32, 1234ULL}, {0.001f, 128, 128, 64, 1234ULL}, {0.001f, 64, 128, 128, 1234ULL}, {0.001f, 32, 32, 34, 1234ULL}, {0.001f, 32, 64, 34, 1234ULL}, {0.001f, 64, 32, 34, 1234ULL}, {0.001f, 64, 64, 34, 1234ULL}, {0.001f, 128, 32, 34, 1234ULL}, {0.001f, 128, 64, 34, 1234ULL}, {0.001f, 128, 128, 66, 1234ULL}, {0.001f, 64, 128, 130, 1234ULL}, {0.001f, 32, 32, 33, 1234ULL}, {0.001f, 32, 64, 33, 1234ULL}, {0.001f, 64, 32, 33, 1234ULL}, {0.001f, 64, 64, 33, 1234ULL}, {0.001f, 128, 32, 33, 1234ULL}, {0.001f, 128, 64, 33, 1234ULL}, {0.001f, 128, 128, 65, 1234ULL}, {0.001f, 64, 128, 129, 1234ULL}, {0.006f, 1805, 134, 2, 1234ULL}, }; typedef FusedL2NNTest<float, false> FusedL2NNTestF_Sq; TEST_P(FusedL2NNTestF_Sq, Result) { runTest(min); ASSERT_TRUE(devArrMatch(min_ref, min, params.m, CompareApproxAbsKVP<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestF_Sq, ::testing::ValuesIn(inputsf)); typedef FusedL2NNTest<float, true> FusedL2NNTestF_Sqrt; TEST_P(FusedL2NNTestF_Sqrt, Result) { runTest(min); ASSERT_TRUE(devArrMatch(min_ref, min, params.m, CompareApproxAbsKVP<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestF_Sqrt, ::testing::ValuesIn(inputsf)); const std::vector<Inputs<double>> inputsd = { {0.00001, 32, 32, 32, 1234ULL}, {0.00001, 32, 64, 32, 1234ULL}, {0.00001, 64, 32, 32, 1234ULL}, {0.00001, 64, 64, 32, 1234ULL}, {0.00001, 128, 32, 32, 1234ULL}, {0.00001, 128, 64, 32, 1234ULL}, {0.00001, 128, 128, 64, 1234ULL}, {0.00001, 64, 128, 128, 1234ULL}, {0.00001, 32, 32, 34, 1234ULL}, {0.00001, 32, 64, 34, 1234ULL}, {0.00001, 64, 32, 34, 1234ULL}, {0.00001, 64, 64, 34, 1234ULL}, {0.00001, 128, 32, 34, 1234ULL}, {0.00001, 128, 64, 34, 1234ULL}, {0.00001, 128, 128, 66, 1234ULL}, {0.00001, 64, 128, 130, 1234ULL}, {0.00001, 32, 32, 33, 1234ULL}, {0.00001, 32, 64, 33, 1234ULL}, {0.00001, 64, 32, 33, 1234ULL}, {0.00001, 64, 64, 33, 1234ULL}, {0.00001, 128, 32, 33, 1234ULL}, {0.00001, 128, 64, 33, 1234ULL}, {0.00001, 128, 128, 65, 1234ULL}, {0.00001, 64, 128, 129, 1234ULL}, {0.00001, 1805, 134, 2, 1234ULL}, }; typedef FusedL2NNTest<double, false> FusedL2NNTestD_Sq; TEST_P(FusedL2NNTestD_Sq, Result) { runTest(min); ASSERT_TRUE(devArrMatch(min_ref, min, params.m, CompareApproxAbsKVP<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestD_Sq, ::testing::ValuesIn(inputsd)); typedef FusedL2NNTest<double, true> FusedL2NNTestD_Sqrt; TEST_P(FusedL2NNTestD_Sqrt, Result) { runTest(min); ASSERT_TRUE(devArrMatch(min_ref, min, params.m, CompareApproxAbsKVP<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestD_Sqrt, ::testing::ValuesIn(inputsd)); /// This is to test output determinism of the prim template <typename DataT, bool Sqrt> class FusedL2NNDetTest : public FusedL2NNTest<DataT, Sqrt> { void SetUp() override { FusedL2NNTest<DataT, Sqrt>::SetUp(); int m = this->params.m; raft::allocate(min1, m); } void TearDown() override { FusedL2NNTest<DataT, Sqrt>::TearDown(); CUDA_CHECK(hipFree(min1)); } protected: hipcub::KeyValuePair<int, DataT> *min1; static const int NumRepeats = 100; void generateGoldenResult() override {} }; typedef FusedL2NNDetTest<float, false> FusedL2NNDetTestF_Sq; TEST_P(FusedL2NNDetTestF_Sq, Result) { runTest(min); // assumed to be golden for (int i = 0; i < NumRepeats; ++i) { runTest(min1); ASSERT_TRUE(devArrMatch(min, min1, params.m, CompareExactKVP<float>())); } } INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestF_Sq, ::testing::ValuesIn(inputsf)); typedef FusedL2NNDetTest<float, true> FusedL2NNDetTestF_Sqrt; TEST_P(FusedL2NNDetTestF_Sqrt, Result) { runTest(min); // assumed to be golden for (int i = 0; i < NumRepeats; ++i) { runTest(min1); ASSERT_TRUE(devArrMatch(min, min1, params.m, CompareExactKVP<float>())); } } INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestF_Sqrt, ::testing::ValuesIn(inputsf)); typedef FusedL2NNDetTest<double, false> FusedL2NNDetTestD_Sq; TEST_P(FusedL2NNDetTestD_Sq, Result) { runTest(min); // assumed to be golden for (int i = 0; i < NumRepeats; ++i) { runTest(min1); ASSERT_TRUE(devArrMatch(min, min1, params.m, CompareExactKVP<double>())); } } INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestD_Sq, ::testing::ValuesIn(inputsd)); typedef FusedL2NNDetTest<double, true> FusedL2NNDetTestD_Sqrt; TEST_P(FusedL2NNDetTestD_Sqrt, Result) { runTest(min); // assumed to be golden for (int i = 0; i < NumRepeats; ++i) { runTest(min1); ASSERT_TRUE(devArrMatch(min, min1, params.m, CompareExactKVP<double>())); } } INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestD_Sqrt, ::testing::ValuesIn(inputsd)); } // end namespace Distance } // end namespace MLCommon
80ed54063da4bcbb4cfd410ed2c5e35765da59a9.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <cuda_utils.cuh> #include <distance/fused_l2_nn.cuh> #include <linalg/norm.cuh> #include <random/rng.cuh> #include "test_utils.h" namespace MLCommon { namespace Distance { template <typename DataT, bool Sqrt, typename ReduceOpT, int NWARPS> __global__ void naiveKernel(cub::KeyValuePair<int, DataT> *min, DataT *x, DataT *y, int m, int n, int k, int *workspace, DataT maxVal) { int midx = threadIdx.y + blockIdx.y * blockDim.y; int nidx = threadIdx.x + blockIdx.x * blockDim.x; DataT acc = DataT(0); for (int i = 0; i < k; ++i) { int xidx = i + midx * k; int yidx = i + nidx * k; auto diff = midx >= m || nidx >= n ? DataT(0) : x[xidx] - y[yidx]; acc += diff * diff; } if (Sqrt) { acc = raft::mySqrt(acc); } ReduceOpT redOp; typedef cub::WarpReduce<cub::KeyValuePair<int, DataT>> WarpReduce; __shared__ typename WarpReduce::TempStorage temp[NWARPS]; int warpId = threadIdx.x / raft::WarpSize; cub::KeyValuePair<int, DataT> tmp; tmp.key = nidx; tmp.value = midx >= m || nidx >= n ? maxVal : acc; tmp = WarpReduce(temp[warpId]).Reduce(tmp, KVPMinReduce<int, DataT>()); if (threadIdx.x % raft::WarpSize == 0 && midx < m) { while (atomicCAS(workspace + midx, 0, 1) == 1) ; __threadfence(); redOp(min + midx, tmp); __threadfence(); atomicCAS(workspace + midx, 1, 0); } } template <typename DataT, bool Sqrt> void naive(cub::KeyValuePair<int, DataT> *min, DataT *x, DataT *y, int m, int n, int k, int *workspace, cudaStream_t stream) { static const dim3 TPB(32, 16, 1); dim3 nblks(raft::ceildiv(n, (int)TPB.x), raft::ceildiv(m, (int)TPB.y), 1); CUDA_CHECK(cudaMemsetAsync(workspace, 0, sizeof(int) * m, stream)); auto blks = raft::ceildiv(m, 256); MinAndDistanceReduceOp<int, DataT> op; initKernel<DataT, cub::KeyValuePair<int, DataT>, int> <<<blks, 256, 0, stream>>>(min, m, std::numeric_limits<DataT>::max(), op); CUDA_CHECK(cudaGetLastError()); naiveKernel<DataT, Sqrt, MinAndDistanceReduceOp<int, DataT>, 16> <<<nblks, TPB, 0, stream>>>(min, x, y, m, n, k, workspace, std::numeric_limits<DataT>::max()); CUDA_CHECK(cudaGetLastError()); } template <typename DataT> struct Inputs { DataT tolerance; int m, n, k; unsigned long long int seed; }; template <typename DataT, bool Sqrt> class FusedL2NNTest : public ::testing::TestWithParam<Inputs<DataT>> { public: void SetUp() override { params = ::testing::TestWithParam<Inputs<DataT>>::GetParam(); raft::random::Rng r(params.seed); int m = params.m; int n = params.n; int k = params.k; CUDA_CHECK(cudaStreamCreate(&stream)); raft::allocate(x, m * k); raft::allocate(y, n * k); raft::allocate(xn, m); raft::allocate(yn, n); raft::allocate(workspace, sizeof(int) * m); raft::allocate(min, m); raft::allocate(min_ref, m); r.uniform(x, m * k, DataT(-1.0), DataT(1.0), stream); r.uniform(y, n * k, DataT(-1.0), DataT(1.0), stream); generateGoldenResult(); LinAlg::rowNorm(xn, x, k, m, LinAlg::L2Norm, true, stream); LinAlg::rowNorm(yn, y, k, n, LinAlg::L2Norm, true, stream); } void TearDown() override { CUDA_CHECK(cudaStreamSynchronize(stream)); CUDA_CHECK(cudaStreamDestroy(stream)); CUDA_CHECK(cudaFree(x)); CUDA_CHECK(cudaFree(y)); CUDA_CHECK(cudaFree(xn)); CUDA_CHECK(cudaFree(yn)); CUDA_CHECK(cudaFree(workspace)); CUDA_CHECK(cudaFree(min_ref)); CUDA_CHECK(cudaFree(min)); } protected: Inputs<DataT> params; DataT *x, *y, *xn, *yn; char *workspace; cub::KeyValuePair<int, DataT> *min, *min_ref; cudaStream_t stream; virtual void generateGoldenResult() { int m = params.m; int n = params.n; int k = params.k; naive<DataT, Sqrt>(min_ref, x, y, m, n, k, (int *)workspace, stream); } void runTest(cub::KeyValuePair<int, DataT> *out) { int m = params.m; int n = params.n; int k = params.k; MinAndDistanceReduceOp<int, DataT> redOp; fusedL2NN<DataT, cub::KeyValuePair<int, DataT>, int>( out, x, y, xn, yn, m, n, k, (void *)workspace, redOp, Sqrt, true, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); } }; template <typename T> struct CompareApproxAbsKVP { typedef typename cub::KeyValuePair<int, T> KVP; CompareApproxAbsKVP(T eps_) : eps(eps_) {} bool operator()(const KVP &a, const KVP &b) const { if (a.key != b.key) return false; T diff = raft::abs(raft::abs(a.value) - raft::abs(b.value)); T m = std::max(raft::abs(a.value), raft::abs(b.value)); T ratio = m >= eps ? diff / m : diff; return (ratio <= eps); } private: T eps; }; template <typename T> struct CompareExactKVP { typedef typename cub::KeyValuePair<int, T> KVP; bool operator()(const KVP &a, const KVP &b) const { if (a.key != b.key) return false; if (a.value != b.value) return false; return true; } }; template <typename K, typename V, typename L> ::testing::AssertionResult devArrMatch(const cub::KeyValuePair<K, V> *expected, const cub::KeyValuePair<K, V> *actual, size_t size, L eq_compare, cudaStream_t stream = 0) { typedef typename cub::KeyValuePair<K, V> KVP; std::shared_ptr<KVP> exp_h(new KVP[size]); std::shared_ptr<KVP> act_h(new KVP[size]); raft::update_host<KVP>(exp_h.get(), expected, size, stream); raft::update_host<KVP>(act_h.get(), actual, size, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); for (size_t i(0); i < size; ++i) { auto exp = exp_h.get()[i]; auto act = act_h.get()[i]; if (!eq_compare(exp, act)) { return ::testing::AssertionFailure() << "actual=" << act.key << "," << act.value << " != expected=" << exp.key << "," << exp.value << " @" << i; } } return ::testing::AssertionSuccess(); } const std::vector<Inputs<float>> inputsf = { {0.001f, 32, 32, 32, 1234ULL}, {0.001f, 32, 64, 32, 1234ULL}, {0.001f, 64, 32, 32, 1234ULL}, {0.001f, 64, 64, 32, 1234ULL}, {0.001f, 128, 32, 32, 1234ULL}, {0.001f, 128, 64, 32, 1234ULL}, {0.001f, 128, 128, 64, 1234ULL}, {0.001f, 64, 128, 128, 1234ULL}, {0.001f, 32, 32, 34, 1234ULL}, {0.001f, 32, 64, 34, 1234ULL}, {0.001f, 64, 32, 34, 1234ULL}, {0.001f, 64, 64, 34, 1234ULL}, {0.001f, 128, 32, 34, 1234ULL}, {0.001f, 128, 64, 34, 1234ULL}, {0.001f, 128, 128, 66, 1234ULL}, {0.001f, 64, 128, 130, 1234ULL}, {0.001f, 32, 32, 33, 1234ULL}, {0.001f, 32, 64, 33, 1234ULL}, {0.001f, 64, 32, 33, 1234ULL}, {0.001f, 64, 64, 33, 1234ULL}, {0.001f, 128, 32, 33, 1234ULL}, {0.001f, 128, 64, 33, 1234ULL}, {0.001f, 128, 128, 65, 1234ULL}, {0.001f, 64, 128, 129, 1234ULL}, {0.006f, 1805, 134, 2, 1234ULL}, }; typedef FusedL2NNTest<float, false> FusedL2NNTestF_Sq; TEST_P(FusedL2NNTestF_Sq, Result) { runTest(min); ASSERT_TRUE(devArrMatch(min_ref, min, params.m, CompareApproxAbsKVP<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestF_Sq, ::testing::ValuesIn(inputsf)); typedef FusedL2NNTest<float, true> FusedL2NNTestF_Sqrt; TEST_P(FusedL2NNTestF_Sqrt, Result) { runTest(min); ASSERT_TRUE(devArrMatch(min_ref, min, params.m, CompareApproxAbsKVP<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestF_Sqrt, ::testing::ValuesIn(inputsf)); const std::vector<Inputs<double>> inputsd = { {0.00001, 32, 32, 32, 1234ULL}, {0.00001, 32, 64, 32, 1234ULL}, {0.00001, 64, 32, 32, 1234ULL}, {0.00001, 64, 64, 32, 1234ULL}, {0.00001, 128, 32, 32, 1234ULL}, {0.00001, 128, 64, 32, 1234ULL}, {0.00001, 128, 128, 64, 1234ULL}, {0.00001, 64, 128, 128, 1234ULL}, {0.00001, 32, 32, 34, 1234ULL}, {0.00001, 32, 64, 34, 1234ULL}, {0.00001, 64, 32, 34, 1234ULL}, {0.00001, 64, 64, 34, 1234ULL}, {0.00001, 128, 32, 34, 1234ULL}, {0.00001, 128, 64, 34, 1234ULL}, {0.00001, 128, 128, 66, 1234ULL}, {0.00001, 64, 128, 130, 1234ULL}, {0.00001, 32, 32, 33, 1234ULL}, {0.00001, 32, 64, 33, 1234ULL}, {0.00001, 64, 32, 33, 1234ULL}, {0.00001, 64, 64, 33, 1234ULL}, {0.00001, 128, 32, 33, 1234ULL}, {0.00001, 128, 64, 33, 1234ULL}, {0.00001, 128, 128, 65, 1234ULL}, {0.00001, 64, 128, 129, 1234ULL}, {0.00001, 1805, 134, 2, 1234ULL}, }; typedef FusedL2NNTest<double, false> FusedL2NNTestD_Sq; TEST_P(FusedL2NNTestD_Sq, Result) { runTest(min); ASSERT_TRUE(devArrMatch(min_ref, min, params.m, CompareApproxAbsKVP<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestD_Sq, ::testing::ValuesIn(inputsd)); typedef FusedL2NNTest<double, true> FusedL2NNTestD_Sqrt; TEST_P(FusedL2NNTestD_Sqrt, Result) { runTest(min); ASSERT_TRUE(devArrMatch(min_ref, min, params.m, CompareApproxAbsKVP<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestD_Sqrt, ::testing::ValuesIn(inputsd)); /// This is to test output determinism of the prim template <typename DataT, bool Sqrt> class FusedL2NNDetTest : public FusedL2NNTest<DataT, Sqrt> { void SetUp() override { FusedL2NNTest<DataT, Sqrt>::SetUp(); int m = this->params.m; raft::allocate(min1, m); } void TearDown() override { FusedL2NNTest<DataT, Sqrt>::TearDown(); CUDA_CHECK(cudaFree(min1)); } protected: cub::KeyValuePair<int, DataT> *min1; static const int NumRepeats = 100; void generateGoldenResult() override {} }; typedef FusedL2NNDetTest<float, false> FusedL2NNDetTestF_Sq; TEST_P(FusedL2NNDetTestF_Sq, Result) { runTest(min); // assumed to be golden for (int i = 0; i < NumRepeats; ++i) { runTest(min1); ASSERT_TRUE(devArrMatch(min, min1, params.m, CompareExactKVP<float>())); } } INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestF_Sq, ::testing::ValuesIn(inputsf)); typedef FusedL2NNDetTest<float, true> FusedL2NNDetTestF_Sqrt; TEST_P(FusedL2NNDetTestF_Sqrt, Result) { runTest(min); // assumed to be golden for (int i = 0; i < NumRepeats; ++i) { runTest(min1); ASSERT_TRUE(devArrMatch(min, min1, params.m, CompareExactKVP<float>())); } } INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestF_Sqrt, ::testing::ValuesIn(inputsf)); typedef FusedL2NNDetTest<double, false> FusedL2NNDetTestD_Sq; TEST_P(FusedL2NNDetTestD_Sq, Result) { runTest(min); // assumed to be golden for (int i = 0; i < NumRepeats; ++i) { runTest(min1); ASSERT_TRUE(devArrMatch(min, min1, params.m, CompareExactKVP<double>())); } } INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestD_Sq, ::testing::ValuesIn(inputsd)); typedef FusedL2NNDetTest<double, true> FusedL2NNDetTestD_Sqrt; TEST_P(FusedL2NNDetTestD_Sqrt, Result) { runTest(min); // assumed to be golden for (int i = 0; i < NumRepeats; ++i) { runTest(min1); ASSERT_TRUE(devArrMatch(min, min1, params.m, CompareExactKVP<double>())); } } INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestD_Sqrt, ::testing::ValuesIn(inputsd)); } // end namespace Distance } // end namespace MLCommon
ce74c56e053cda13e2cfc483a3cb4bdc5e626910.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // pbrt is Copyright(c) 1998-2020 Matt Pharr, Wenzel Jakob, and Greg Humphreys. // The pbrt source code is licensed under the Apache License, Version 2.0. // SPDX: Apache-2.0 #include <pbrt/pbrt.h> #include <pbrt/gpu/accel.h> #include <pbrt/gpu/optix.h> #include <pbrt/interaction.h> #include <pbrt/materials.h> #include <pbrt/media.h> #include <pbrt/shapes.h> #include <pbrt/textures.h> #include <pbrt/util/float.h> #include <pbrt/util/rng.h> #include <pbrt/util/transform.h> #include <pbrt/util/vecmath.h> // Make various functions visible to OptiX, which doesn't get to link // shader code with the CUDA code in the main executable... #include <pbrt/util/color.cpp> #include <pbrt/util/colorspace.cpp> #include <pbrt/util/noise.cpp> #include <pbrt/util/spectrum.cpp> #include <pbrt/util/transform.cpp> #include <optix_device.h> #include <utility> using namespace pbrt; extern "C" { extern __constant__ pbrt::RayIntersectParameters params; } /////////////////////////////////////////////////////////////////////////// // Utility functions // Payload management __device__ inline uint32_t packPointer0(void *ptr) { uint64_t uptr = reinterpret_cast<uint64_t>(ptr); return uptr >> 32; } __device__ inline uint32_t packPointer1(void *ptr) { uint64_t uptr = reinterpret_cast<uint64_t>(ptr); return uint32_t(uptr); } template <typename T> static __forceinline__ __device__ T *getPayload() { uint32_t p0 = optixGetPayload_0(), p1 = optixGetPayload_1(); const uint64_t uptr = (uint64_t(p0) << 32) | p1; return reinterpret_cast<T *>(uptr); } template <typename... Args> __device__ inline void Trace(OptixTraversableHandle traversable, Ray ray, Float tMin, Float tMax, OptixRayFlags flags, Args &&... payload) { optixTrace(traversable, make_float3(ray.o.x, ray.o.y, ray.o.z), make_float3(ray.d.x, ray.d.y, ray.d.z), tMin, tMax, ray.time, OptixVisibilityMask(255), flags, 0, /* ray type */ 1, /* number of ray types */ 0, /* missSBTIndex */ std::forward<Args>(payload)...); } /////////////////////////////////////////////////////////////////////////// // Closest hit struct ClosestHitContext { PBRT_GPU ClosestHitContext(MediumHandle rayMedium, bool shadowRay) : rayMedium(rayMedium), shadowRay(shadowRay) {} MediumHandle rayMedium; bool shadowRay; // out Point3fi piHit; Normal3f nHit; MaterialHandle material; MediumInterface mediumInterface; PBRT_GPU Ray SpawnRayTo(const Point3f &p) const { Interaction intr(piHit, nHit); intr.mediumInterface = &mediumInterface; return intr.SpawnRayTo(p); } }; extern "C" __global__ void __raygen__findClosest() { int rayIndex(optixGetLaunchIndex().x); if (rayIndex >= params.rayQueue->Size()) return; RayWorkItem r = (*params.rayQueue)[rayIndex]; Ray ray = r.ray; Float tMax = 1e30f; ClosestHitContext ctx(ray.medium, false); uint32_t p0 = packPointer0(&ctx), p1 = packPointer1(&ctx); PBRT_DBG("ray o %f %f %f dir %f %f %f tmax %f\n", ray.o.x, ray.o.y, ray.o.z, ray.d.x, ray.d.y, ray.d.z, tMax); uint32_t missed = 0; Trace(params.traversable, ray, 0.f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, p0, p1, missed); if (missed) { if (ray.medium) { PBRT_DBG("Adding miss ray to mediumSampleQueue. " "ray %f %f %f d %f %f %f T_hat %f %f %f %f\n", r.ray.o.x, r.ray.o.y, r.ray.o.z, r.ray.d.x, r.ray.d.y, r.ray.d.z, r.T_hat[0], r.T_hat[1], r.T_hat[2], r.T_hat[3]); params.mediumSampleQueue->Push(r.ray, Infinity, r.lambda, r.T_hat, r.uniPathPDF, r.lightPathPDF, r.pixelIndex, r.prevIntrCtx, r.isSpecularBounce, r.anyNonSpecularBounces, r.etaScale); } else if (params.escapedRayQueue) { PBRT_DBG("Adding ray to escapedRayQueue ray index %d pixel index %d\n", rayIndex, r.pixelIndex); params.escapedRayQueue->Push(EscapedRayWorkItem{ ray.o, ray.d, r.lambda, r.pixelIndex, (int)r.isSpecularBounce, r.T_hat, r.uniPathPDF, r.lightPathPDF, r.prevIntrCtx}); } } } extern "C" __global__ void __miss__noop() { optixSetPayload_2(1); } static __forceinline__ __device__ void ProcessClosestIntersection( SurfaceInteraction intr) { int rayIndex = optixGetLaunchIndex().x; MediumHandle rayMedium = getPayload<ClosestHitContext>()->rayMedium; if (intr.mediumInterface) getPayload<ClosestHitContext>()->mediumInterface = *intr.mediumInterface; else getPayload<ClosestHitContext>()->mediumInterface = MediumInterface(rayMedium); getPayload<ClosestHitContext>()->piHit = intr.pi; getPayload<ClosestHitContext>()->nHit = intr.n; getPayload<ClosestHitContext>()->material = intr.material; if (getPayload<ClosestHitContext>()->shadowRay) return; // We only have the ray queue (and it only makes sense to access) for // regular closest hit rays. RayWorkItem r = (*params.rayQueue)[rayIndex]; if (rayMedium) { assert(params.mediumSampleQueue); PBRT_DBG("Enqueuing into medium sample queue\n"); params.mediumSampleQueue->Push( MediumSampleWorkItem{r.ray, optixGetRayTmax(), r.lambda, r.T_hat, r.uniPathPDF, r.lightPathPDF, r.pixelIndex, r.prevIntrCtx, r.isSpecularBounce, r.anyNonSpecularBounces, r.etaScale, intr.areaLight, intr.pi, intr.n, -r.ray.d, intr.uv, intr.material, intr.shading.n, intr.shading.dpdu, intr.shading.dpdv, intr.shading.dndu, intr.shading.dndv, getPayload<ClosestHitContext>()->mediumInterface}); return; } // FIXME: this is all basically duplicate code w/medium.cpp MaterialHandle material = intr.material; const MixMaterial *mix = material.CastOrNullptr<MixMaterial>(); if (mix) { MaterialEvalContext ctx(intr); material = mix->ChooseMaterial(BasicTextureEvaluator(), ctx); } if (!material) { PBRT_DBG("Enqueuing into medium transition queue: ray index %d pixel index %d \n", rayIndex, r.pixelIndex); Ray newRay = intr.SpawnRay(r.ray.d); params.nextRayQueue->PushIndirectRay( newRay, r.prevIntrCtx, r.T_hat, r.uniPathPDF, r.lightPathPDF, r.lambda, r.etaScale, r.isSpecularBounce, r.anyNonSpecularBounces, r.pixelIndex); return; } if (intr.areaLight) { PBRT_DBG("Ray hit an area light: adding to hitAreaLightQueue ray index %d pixel index " "%d\n", rayIndex, r.pixelIndex); Ray ray = r.ray; // TODO: intr.wo == -ray.d? params.hitAreaLightQueue->Push(HitAreaLightWorkItem{ intr.areaLight, intr.p(), intr.n, intr.uv, intr.wo, r.lambda, r.T_hat, r.uniPathPDF, r.lightPathPDF, r.prevIntrCtx, (int)r.isSpecularBounce, r.pixelIndex}); } FloatTextureHandle displacement = material.GetDisplacement(); MaterialEvalQueue *q = (material.CanEvaluateTextures(BasicTextureEvaluator()) && (!displacement || BasicTextureEvaluator().CanEvaluate({displacement}, {}))) ? params.basicEvalMaterialQueue : params.universalEvalMaterialQueue; PBRT_DBG("Enqueuing for material eval, mtl tag %d\n", material.Tag()); auto enqueue = [=](auto ptr) { using Material = typename std::remove_reference_t<decltype(*ptr)>; q->Push(MaterialEvalWorkItem<Material>{ ptr, intr.pi, intr.n, intr.shading.n, intr.shading.dpdu, intr.shading.dpdv, intr.shading.dndu, intr.shading.dndv, intr.uv, r.lambda, r.anyNonSpecularBounces, intr.wo, r.pixelIndex, r.T_hat, r.uniPathPDF, r.etaScale, getPayload<ClosestHitContext>()->mediumInterface, intr.time}); }; material.Dispatch(enqueue); PBRT_DBG("Closest hit found intersection at t %f\n", optixGetRayTmax()); } /////////////////////////////////////////////////////////////////////////// // Triangles static __forceinline__ __device__ SurfaceInteraction getTriangleIntersection() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); float b1 = optixGetTriangleBarycentrics().x; float b2 = optixGetTriangleBarycentrics().y; float b0 = 1 - b1 - b2; float3 rd = optixGetWorldRayDirection(); Vector3f wo = -Vector3f(rd.x, rd.y, rd.z); assert(optixGetTransformListSize() == 1); float worldFromObj[12], objFromWorld[12]; optixGetObjectToWorldTransformMatrix(worldFromObj); optixGetWorldToObjectTransformMatrix(objFromWorld); SquareMatrix<4> worldFromObjM(worldFromObj[0], worldFromObj[1], worldFromObj[2], worldFromObj[3], worldFromObj[4], worldFromObj[5], worldFromObj[6], worldFromObj[7], worldFromObj[8], worldFromObj[9], worldFromObj[10], worldFromObj[11], 0.f, 0.f, 0.f, 1.f); SquareMatrix<4> objFromWorldM(objFromWorld[0], objFromWorld[1], objFromWorld[2], objFromWorld[3], objFromWorld[4], objFromWorld[5], objFromWorld[6], objFromWorld[7], objFromWorld[8], objFromWorld[9], objFromWorld[10], objFromWorld[11], 0.f, 0.f, 0.f, 1.f); Transform worldFromInstance(worldFromObjM, objFromWorldM); Float time = optixGetRayTime(); wo = worldFromInstance.ApplyInverse(wo); TriangleIntersection ti{b0, b1, b2, optixGetRayTmax()}; SurfaceInteraction intr = Triangle::InteractionFromIntersection(rec.mesh, optixGetPrimitiveIndex(), ti, time, wo); return worldFromInstance(intr); } static __forceinline__ __device__ bool alphaKilled(const TriangleMeshRecord &rec) { if (!rec.alphaTexture) return false; SurfaceInteraction intr = getTriangleIntersection(); BasicTextureEvaluator eval; Float alpha = eval(rec.alphaTexture, intr); if (alpha >= 1) return false; if (alpha <= 0) return true; else { float3 o = optixGetWorldRayOrigin(); float3 d = optixGetWorldRayDirection(); Float u = uint32_t(Hash(o, d)) * 0x1p-32f; return u > alpha; } } extern "C" __global__ void __closesthit__triangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); SurfaceInteraction intr = getTriangleIntersection(); if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition()) intr.mediumInterface = rec.mediumInterface; intr.material = rec.material; if (!rec.areaLights.empty()) intr.areaLight = rec.areaLights[optixGetPrimitiveIndex()]; ProcessClosestIntersection(intr); } extern "C" __global__ void __anyhit__triangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); if (alphaKilled(rec)) optixIgnoreIntersection(); } extern "C" __global__ void __anyhit__shadowTriangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); if (rec.material && rec.material.IsTransparent()) optixIgnoreIntersection(); if (alphaKilled(rec)) optixIgnoreIntersection(); } /////////////////////////////////////////////////////////////////////////// // Shadow rays extern "C" __global__ void __raygen__shadow() { int index = optixGetLaunchIndex().x; if (index >= params.shadowRayQueue->Size()) return; ShadowRayWorkItem sr = (*params.shadowRayQueue)[index]; PBRT_DBG("Tracing shadow ray index %d o %f %f %f d %f %f %f\n", index, sr.ray.o.x, sr.ray.o.y, sr.ray.o.z, sr.ray.d.x, sr.ray.d.y, sr.ray.d.z); uint32_t missed = 0; Trace(params.traversable, sr.ray, 1e-5f /* tMin */, sr.tMax, OPTIX_RAY_FLAG_NONE, missed); SampledSpectrum Ld; if (missed) { Ld = sr.Ld / (sr.uniPathPDF + sr.lightPathPDF).Average(); PBRT_DBG("Unoccluded shadow ray. Final Ld %f %f %f %f " "(sr.Ld %f %f %f %f uniPathPDF %f %f %f %f lightPathPDF %f %f %f %f)\n", Ld[0], Ld[1], Ld[2], Ld[3], sr.Ld[0], sr.Ld[1], sr.Ld[2], sr.Ld[3], sr.uniPathPDF[0], sr.uniPathPDF[1], sr.uniPathPDF[2], sr.uniPathPDF[3], sr.lightPathPDF[0], sr.lightPathPDF[1], sr.lightPathPDF[2], sr.lightPathPDF[3]); SampledSpectrum Lpixel = params.pixelSampleState->L[sr.pixelIndex]; params.pixelSampleState->L[sr.pixelIndex] = Lpixel + Ld; } else { PBRT_DBG("Shadow ray was occluded\n"); } } extern "C" __global__ void __miss__shadow() { optixSetPayload_0(1); } __device__ inline void rescale(SampledSpectrum &T_hat, SampledSpectrum &lightPathPDF, SampledSpectrum &uniPathPDF) { if (T_hat.MaxComponentValue() > 0x1p24f || lightPathPDF.MaxComponentValue() > 0x1p24f || uniPathPDF.MaxComponentValue() > 0x1p24f) { T_hat *= 1.f / 0x1p24f; lightPathPDF *= 1.f / 0x1p24f; uniPathPDF *= 1.f / 0x1p24f; } else if (T_hat.MaxComponentValue() < 0x1p-24f || lightPathPDF.MaxComponentValue() < 0x1p-24f || uniPathPDF.MaxComponentValue() < 0x1p-24f) { T_hat *= 0x1p24f; lightPathPDF *= 0x1p24f; uniPathPDF *= 0x1p24f; } } extern "C" __global__ void __raygen__shadow_Tr() { PBRT_DBG("raygen sahadow tr %d\n", optixGetLaunchIndex().x); int index = optixGetLaunchIndex().x; if (index >= params.shadowRayQueue->Size()) return; ShadowRayWorkItem sr = (*params.shadowRayQueue)[index]; SampledWavelengths lambda = sr.lambda; SampledSpectrum Ld = sr.Ld; PBRT_DBG("Initial Ld %f %f %f %f shadow ray index %d pixel index %d\n", Ld[0], Ld[1], Ld[2], Ld[3], index, sr.pixelIndex); Ray ray = sr.ray; Float tMax = sr.tMax; Point3f pLight = ray(tMax); RNG rng(Hash(ray.o), Hash(ray.d)); SampledSpectrum T_ray(1.f); SampledSpectrum uniPathPDF(1.f), lightPathPDF(1.f); while (ray.d != Vector3f(0, 0, 0)) { ClosestHitContext ctx(ray.medium, true); uint32_t p0 = packPointer0(&ctx), p1 = packPointer1(&ctx); PBRT_DBG("Tracing shadow tr shadow ray index %d pixel index %d " "ray %f %f %f d %f %f %f tMax %f\n", index, sr.pixelIndex, ray.o.x, ray.o.y, ray.o.z, ray.d.x, ray.d.y, ray.d.z, tMax); uint32_t missed = 0; Trace(params.traversable, ray, 1e-5f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, p0, p1, missed); if (!missed && ctx.material) { PBRT_DBG("Hit opaque. Bye\n"); // Hit opaque surface T_ray = SampledSpectrum(0.f); break; } if (ray.medium) { PBRT_DBG("Ray medium %p. Will sample tmaj...\n", ray.medium.ptr()); Float tEnd = missed ? tMax : (Distance(ray.o, Point3f(ctx.piHit)) / Length(ray.d)); SampledSpectrum Tmaj = ray.medium.SampleTmaj(ray, tEnd, rng, lambda, [&](const MediumSample &mediumSample) { const SampledSpectrum &Tmaj = mediumSample.Tmaj; const MediumInteraction &intr = mediumSample.intr; SampledSpectrum sigma_n = intr.sigma_n(); // ratio-tracking: only evaluate null scattering T_ray *= Tmaj * sigma_n; lightPathPDF *= Tmaj * intr.sigma_maj; uniPathPDF *= Tmaj * sigma_n; // Possibly terminate transmittance computation using Russian roulette SampledSpectrum Tr = T_ray / (lightPathPDF + uniPathPDF).Average(); if (Tr.MaxComponentValue() < 0.05f) { Float q = 0.75f; if (rng.Uniform<Float>() < q) T_ray = SampledSpectrum(0.); else { lightPathPDF *= 1 - q; uniPathPDF *= 1 - q; } } PBRT_DBG("Tmaj %f %f %f %f sigma_n %f %f %f %f sigma_maj %f %f %f %f\n", Tmaj[0], Tmaj[1], Tmaj[2], Tmaj[3], sigma_n[0], sigma_n[1], sigma_n[2], sigma_n[3], intr.sigma_maj[0], intr.sigma_maj[1], intr.sigma_maj[2], intr.sigma_maj[3]); PBRT_DBG("T_ray %f %f %f %f lightPathPDF %f %f %f %f uniPathPDF %f %f %f %f\n", T_ray[0], T_ray[1], T_ray[2], T_ray[3], lightPathPDF[0], lightPathPDF[1], lightPathPDF[2], lightPathPDF[3], uniPathPDF[0], uniPathPDF[1], uniPathPDF[2], uniPathPDF[3]); if (!T_ray) return false; rescale(T_ray, lightPathPDF, uniPathPDF); return true; }); T_ray *= Tmaj; lightPathPDF *= Tmaj; uniPathPDF *= Tmaj; } if (missed || !T_ray) // done break; ray = ctx.SpawnRayTo(pLight); } PBRT_DBG("Final T_ray %.9g %.9g %.9g %.9g sr.uniPathPDF %.9g %.9g %.9g %.9g uniPathPDF %.9g %.9g %.9g %.9g\n", T_ray[0], T_ray[1], T_ray[2], T_ray[3], sr.uniPathPDF[0], sr.uniPathPDF[1], sr.uniPathPDF[2], sr.uniPathPDF[3], uniPathPDF[0], uniPathPDF[1], uniPathPDF[2], uniPathPDF[3]); PBRT_DBG("sr.lightPathPDF %.9g %.9g %.9g %.9g lightPathPDF %.9g %.9g %.9g %.9g\n", sr.lightPathPDF[0], sr.lightPathPDF[1], sr.lightPathPDF[2], sr.lightPathPDF[3], lightPathPDF[0], lightPathPDF[1], lightPathPDF[2], lightPathPDF[3]); PBRT_DBG("scaled throughput %.9g %.9g %.9g %.9g\n", T_ray[0] / (sr.uniPathPDF * uniPathPDF + sr.lightPathPDF * lightPathPDF).Average(), T_ray[1] / (sr.uniPathPDF * uniPathPDF + sr.lightPathPDF * lightPathPDF).Average(), T_ray[2] / (sr.uniPathPDF * uniPathPDF + sr.lightPathPDF * lightPathPDF).Average(), T_ray[3] / (sr.uniPathPDF * uniPathPDF + sr.lightPathPDF * lightPathPDF).Average()); if (T_ray) { // FIXME/reconcile: this takes lightPathPDF as input while // e.g. VolPathIntegrator::SampleLd() does not... Ld *= T_ray / (sr.uniPathPDF * uniPathPDF + sr.lightPathPDF * lightPathPDF).Average(); PBRT_DBG("Setting final Ld for shadow ray index %d pixel index %d = as %f %f %f %f\n", index, sr.pixelIndex, Ld[0], Ld[1], Ld[2], Ld[3]); SampledSpectrum Lpixel = params.pixelSampleState->L[sr.pixelIndex]; params.pixelSampleState->L[sr.pixelIndex] = Lpixel + Ld; } } extern "C" __global__ void __miss__shadow_Tr() { optixSetPayload_2(1); } ///////////////////////////////////////////////////////////////////////////////////// // Quadrics static __device__ inline SurfaceInteraction getQuadricIntersection( const QuadricIntersection &si) { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); float3 rd = optixGetWorldRayDirection(); Vector3f wo = -Vector3f(rd.x, rd.y, rd.z); Float time = optixGetRayTime(); SurfaceInteraction intr; if (const Sphere *sphere = rec.shape.CastOrNullptr<Sphere>()) intr = sphere->InteractionFromIntersection(si, wo, time); else if (const Cylinder *cylinder = rec.shape.CastOrNullptr<Cylinder>()) intr = cylinder->InteractionFromIntersection(si, wo, time); else if (const Disk *disk = rec.shape.CastOrNullptr<Disk>()) intr = disk->InteractionFromIntersection(si, wo, time); else assert(!"unexpected quadric"); return intr; } extern "C" __global__ void __closesthit__quadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); QuadricIntersection qi; qi.pObj = Point3f(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1()), BitsToFloat(optixGetAttribute_2())); qi.phi = BitsToFloat(optixGetAttribute_3()); SurfaceInteraction intr = getQuadricIntersection(qi); if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition()) intr.mediumInterface = rec.mediumInterface; intr.material = rec.material; if (rec.areaLight) intr.areaLight = rec.areaLight; ProcessClosestIntersection(intr); } extern "C" __global__ void __anyhit__shadowQuadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); if (rec.material && rec.material.IsTransparent()) optixIgnoreIntersection(); } extern "C" __global__ void __intersection__quadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); float3 org = optixGetObjectRayOrigin(); float3 dir = optixGetObjectRayDirection(); Float tMax = optixGetRayTmax(); Ray ray(Point3f(org.x, org.y, org.z), Vector3f(dir.x, dir.y, dir.z)); pstd::optional<QuadricIntersection> isect; if (const Sphere *sphere = rec.shape.CastOrNullptr<Sphere>()) isect = sphere->BasicIntersect(ray, tMax); else if (const Cylinder *cylinder = rec.shape.CastOrNullptr<Cylinder>()) isect = cylinder->BasicIntersect(ray, tMax); else if (const Disk *disk = rec.shape.CastOrNullptr<Disk>()) isect = disk->BasicIntersect(ray, tMax); if (!isect) return; if (rec.alphaTexture) { SurfaceInteraction intr = getQuadricIntersection(*isect); BasicTextureEvaluator eval; Float alpha = eval(rec.alphaTexture, intr); if (alpha < 1) { if (alpha == 0) // No hit return; float3 o = optixGetWorldRayOrigin(); float3 d = optixGetWorldRayDirection(); Float u = uint32_t(Hash(o.x, o.y, o.z, d.x, d.y, d.z)) * 0x1p-32f; if (u > alpha) // no hit return; } } optixReportIntersection(isect->tHit, 0 /* hit kind */, FloatToBits(isect->pObj.x), FloatToBits(isect->pObj.y), FloatToBits(isect->pObj.z), FloatToBits(isect->phi)); } /////////////////////////////////////////////////////////////////////////// // Bilinear patches static __forceinline__ __device__ SurfaceInteraction getBilinearPatchIntersection(Point2f uv) { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); float3 rd = optixGetWorldRayDirection(); Vector3f wo = -Vector3f(rd.x, rd.y, rd.z); return BilinearPatch::InteractionFromIntersection(rec.mesh, optixGetPrimitiveIndex(), uv, optixGetRayTime(), wo); } extern "C" __global__ void __closesthit__bilinearPatch() { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); Point2f uv(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1())); SurfaceInteraction intr = getBilinearPatchIntersection(uv); if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition()) intr.mediumInterface = rec.mediumInterface; intr.material = rec.material; if (!rec.areaLights.empty()) intr.areaLight = rec.areaLights[optixGetPrimitiveIndex()]; ProcessClosestIntersection(intr); } extern "C" __global__ void __anyhit__shadowBilinearPatch() { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); if (rec.material && rec.material.IsTransparent()) optixIgnoreIntersection(); } extern "C" __global__ void __intersection__bilinearPatch() { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); float3 org = optixGetObjectRayOrigin(); float3 dir = optixGetObjectRayDirection(); Float tMax = optixGetRayTmax(); Ray ray(Point3f(org.x, org.y, org.z), Vector3f(dir.x, dir.y, dir.z)); int vertexIndex = 4 * optixGetPrimitiveIndex(); Point3f p00 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex]]; Point3f p10 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 1]]; Point3f p01 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 2]]; Point3f p11 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 3]]; pstd::optional<BilinearIntersection> isect = IntersectBilinearPatch(ray, tMax, p00, p10, p01, p11); if (!isect) return; if (rec.alphaTexture) { SurfaceInteraction intr = getBilinearPatchIntersection(isect->uv); BasicTextureEvaluator eval; Float alpha = eval(rec.alphaTexture, intr); if (alpha < 1) { if (alpha == 0) // No hit return; float3 o = optixGetWorldRayOrigin(); float3 d = optixGetWorldRayDirection(); Float u = uint32_t(Hash(o, d)) * 0x1p-32f; if (u > alpha) // no hit return; } } optixReportIntersection(isect->t, 0 /* hit kind */, FloatToBits(isect->uv[0]), FloatToBits(isect->uv[1])); } /////////////////////////////////////////////////////////////////////////// // Random hit (for subsurface scattering) struct RandomHitPayload { WeightedReservoirSampler<SubsurfaceInteraction> wrs; MaterialHandle material; }; extern "C" __global__ void __raygen__randomHit() { // Keep as uint32_t so can pass directly to optixTrace. uint32_t index = optixGetLaunchIndex().x; if (index >= params.subsurfaceScatterQueue->Size()) return; SubsurfaceScatterWorkItem s = (*params.subsurfaceScatterQueue)[index]; Ray ray(s.p0, s.p1 - s.p0); Float tMax = 1.f; RandomHitPayload payload; payload.wrs.Seed(Hash(s.p0, s.p1)); payload.material = s.material; uint32_t ptr0 = packPointer0(&payload), ptr1 = packPointer1(&payload); PBRT_DBG("Randomhit raygen ray.o %f %f %f ray.d %f %f %f tMax %f\n", ray.o.x, ray.o.y, ray.o.z, ray.d.x, ray.d.y, ray.d.z, tMax); Trace(params.traversable, ray, 0.f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, ptr0, ptr1); if (payload.wrs.HasSample() && payload.wrs.WeightSum() > 0) { // TODO: latter check shouldn't be needed... const SubsurfaceInteraction &si = payload.wrs.GetSample(); PBRT_DBG("optix si p %f %f %f n %f %f %f\n", si.p().x, si.p().y, si.p().z, si.n.x, si.n.y, si.n.z); params.subsurfaceScatterQueue->weight[index] = payload.wrs.WeightSum(); params.subsurfaceScatterQueue->ssi[index] = payload.wrs.GetSample(); } else params.subsurfaceScatterQueue->weight[index] = 0; } extern "C" __global__ void __anyhit__randomHitTriangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); RandomHitPayload *p = getPayload<RandomHitPayload>(); PBRT_DBG("Anyhit triangle for random hit: rec.material %p params.materials %p\n", rec.material.ptr(), p->material.ptr()); if (rec.material == p->material) p->wrs.Add([&] PBRT_CPU_GPU() { return getTriangleIntersection(); }, 1.f); optixIgnoreIntersection(); } extern "C" __global__ void __anyhit__randomHitBilinearPatch() { BilinearMeshRecord &rec = *(BilinearMeshRecord *)optixGetSbtDataPointer(); RandomHitPayload *p = getPayload<RandomHitPayload>(); PBRT_DBG("Anyhit blp for random hit: rec.material %p params.materials %p\n", rec.material.ptr(), p->material.ptr()); if (rec.material == p->material) p->wrs.Add( [&] PBRT_CPU_GPU() { Point2f uv(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1())); return getBilinearPatchIntersection(uv); }, 1.f); optixIgnoreIntersection(); } extern "C" __global__ void __anyhit__randomHitQuadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); RandomHitPayload *p = getPayload<RandomHitPayload>(); PBRT_DBG("Anyhit quadric for random hit: rec.material %p params.materials %p\n", rec.material.ptr(), p->material.ptr()); if (rec.material == p->material) { p->wrs.Add( [&] PBRT_CPU_GPU() { QuadricIntersection qi; qi.pObj = Point3f(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1()), BitsToFloat(optixGetAttribute_2())); qi.phi = BitsToFloat(optixGetAttribute_3()); return getQuadricIntersection(qi); }, 1.f); } optixIgnoreIntersection(); }
ce74c56e053cda13e2cfc483a3cb4bdc5e626910.cu
// pbrt is Copyright(c) 1998-2020 Matt Pharr, Wenzel Jakob, and Greg Humphreys. // The pbrt source code is licensed under the Apache License, Version 2.0. // SPDX: Apache-2.0 #include <pbrt/pbrt.h> #include <pbrt/gpu/accel.h> #include <pbrt/gpu/optix.h> #include <pbrt/interaction.h> #include <pbrt/materials.h> #include <pbrt/media.h> #include <pbrt/shapes.h> #include <pbrt/textures.h> #include <pbrt/util/float.h> #include <pbrt/util/rng.h> #include <pbrt/util/transform.h> #include <pbrt/util/vecmath.h> // Make various functions visible to OptiX, which doesn't get to link // shader code with the CUDA code in the main executable... #include <pbrt/util/color.cpp> #include <pbrt/util/colorspace.cpp> #include <pbrt/util/noise.cpp> #include <pbrt/util/spectrum.cpp> #include <pbrt/util/transform.cpp> #include <optix_device.h> #include <utility> using namespace pbrt; extern "C" { extern __constant__ pbrt::RayIntersectParameters params; } /////////////////////////////////////////////////////////////////////////// // Utility functions // Payload management __device__ inline uint32_t packPointer0(void *ptr) { uint64_t uptr = reinterpret_cast<uint64_t>(ptr); return uptr >> 32; } __device__ inline uint32_t packPointer1(void *ptr) { uint64_t uptr = reinterpret_cast<uint64_t>(ptr); return uint32_t(uptr); } template <typename T> static __forceinline__ __device__ T *getPayload() { uint32_t p0 = optixGetPayload_0(), p1 = optixGetPayload_1(); const uint64_t uptr = (uint64_t(p0) << 32) | p1; return reinterpret_cast<T *>(uptr); } template <typename... Args> __device__ inline void Trace(OptixTraversableHandle traversable, Ray ray, Float tMin, Float tMax, OptixRayFlags flags, Args &&... payload) { optixTrace(traversable, make_float3(ray.o.x, ray.o.y, ray.o.z), make_float3(ray.d.x, ray.d.y, ray.d.z), tMin, tMax, ray.time, OptixVisibilityMask(255), flags, 0, /* ray type */ 1, /* number of ray types */ 0, /* missSBTIndex */ std::forward<Args>(payload)...); } /////////////////////////////////////////////////////////////////////////// // Closest hit struct ClosestHitContext { PBRT_GPU ClosestHitContext(MediumHandle rayMedium, bool shadowRay) : rayMedium(rayMedium), shadowRay(shadowRay) {} MediumHandle rayMedium; bool shadowRay; // out Point3fi piHit; Normal3f nHit; MaterialHandle material; MediumInterface mediumInterface; PBRT_GPU Ray SpawnRayTo(const Point3f &p) const { Interaction intr(piHit, nHit); intr.mediumInterface = &mediumInterface; return intr.SpawnRayTo(p); } }; extern "C" __global__ void __raygen__findClosest() { int rayIndex(optixGetLaunchIndex().x); if (rayIndex >= params.rayQueue->Size()) return; RayWorkItem r = (*params.rayQueue)[rayIndex]; Ray ray = r.ray; Float tMax = 1e30f; ClosestHitContext ctx(ray.medium, false); uint32_t p0 = packPointer0(&ctx), p1 = packPointer1(&ctx); PBRT_DBG("ray o %f %f %f dir %f %f %f tmax %f\n", ray.o.x, ray.o.y, ray.o.z, ray.d.x, ray.d.y, ray.d.z, tMax); uint32_t missed = 0; Trace(params.traversable, ray, 0.f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, p0, p1, missed); if (missed) { if (ray.medium) { PBRT_DBG("Adding miss ray to mediumSampleQueue. " "ray %f %f %f d %f %f %f T_hat %f %f %f %f\n", r.ray.o.x, r.ray.o.y, r.ray.o.z, r.ray.d.x, r.ray.d.y, r.ray.d.z, r.T_hat[0], r.T_hat[1], r.T_hat[2], r.T_hat[3]); params.mediumSampleQueue->Push(r.ray, Infinity, r.lambda, r.T_hat, r.uniPathPDF, r.lightPathPDF, r.pixelIndex, r.prevIntrCtx, r.isSpecularBounce, r.anyNonSpecularBounces, r.etaScale); } else if (params.escapedRayQueue) { PBRT_DBG("Adding ray to escapedRayQueue ray index %d pixel index %d\n", rayIndex, r.pixelIndex); params.escapedRayQueue->Push(EscapedRayWorkItem{ ray.o, ray.d, r.lambda, r.pixelIndex, (int)r.isSpecularBounce, r.T_hat, r.uniPathPDF, r.lightPathPDF, r.prevIntrCtx}); } } } extern "C" __global__ void __miss__noop() { optixSetPayload_2(1); } static __forceinline__ __device__ void ProcessClosestIntersection( SurfaceInteraction intr) { int rayIndex = optixGetLaunchIndex().x; MediumHandle rayMedium = getPayload<ClosestHitContext>()->rayMedium; if (intr.mediumInterface) getPayload<ClosestHitContext>()->mediumInterface = *intr.mediumInterface; else getPayload<ClosestHitContext>()->mediumInterface = MediumInterface(rayMedium); getPayload<ClosestHitContext>()->piHit = intr.pi; getPayload<ClosestHitContext>()->nHit = intr.n; getPayload<ClosestHitContext>()->material = intr.material; if (getPayload<ClosestHitContext>()->shadowRay) return; // We only have the ray queue (and it only makes sense to access) for // regular closest hit rays. RayWorkItem r = (*params.rayQueue)[rayIndex]; if (rayMedium) { assert(params.mediumSampleQueue); PBRT_DBG("Enqueuing into medium sample queue\n"); params.mediumSampleQueue->Push( MediumSampleWorkItem{r.ray, optixGetRayTmax(), r.lambda, r.T_hat, r.uniPathPDF, r.lightPathPDF, r.pixelIndex, r.prevIntrCtx, r.isSpecularBounce, r.anyNonSpecularBounces, r.etaScale, intr.areaLight, intr.pi, intr.n, -r.ray.d, intr.uv, intr.material, intr.shading.n, intr.shading.dpdu, intr.shading.dpdv, intr.shading.dndu, intr.shading.dndv, getPayload<ClosestHitContext>()->mediumInterface}); return; } // FIXME: this is all basically duplicate code w/medium.cpp MaterialHandle material = intr.material; const MixMaterial *mix = material.CastOrNullptr<MixMaterial>(); if (mix) { MaterialEvalContext ctx(intr); material = mix->ChooseMaterial(BasicTextureEvaluator(), ctx); } if (!material) { PBRT_DBG("Enqueuing into medium transition queue: ray index %d pixel index %d \n", rayIndex, r.pixelIndex); Ray newRay = intr.SpawnRay(r.ray.d); params.nextRayQueue->PushIndirectRay( newRay, r.prevIntrCtx, r.T_hat, r.uniPathPDF, r.lightPathPDF, r.lambda, r.etaScale, r.isSpecularBounce, r.anyNonSpecularBounces, r.pixelIndex); return; } if (intr.areaLight) { PBRT_DBG("Ray hit an area light: adding to hitAreaLightQueue ray index %d pixel index " "%d\n", rayIndex, r.pixelIndex); Ray ray = r.ray; // TODO: intr.wo == -ray.d? params.hitAreaLightQueue->Push(HitAreaLightWorkItem{ intr.areaLight, intr.p(), intr.n, intr.uv, intr.wo, r.lambda, r.T_hat, r.uniPathPDF, r.lightPathPDF, r.prevIntrCtx, (int)r.isSpecularBounce, r.pixelIndex}); } FloatTextureHandle displacement = material.GetDisplacement(); MaterialEvalQueue *q = (material.CanEvaluateTextures(BasicTextureEvaluator()) && (!displacement || BasicTextureEvaluator().CanEvaluate({displacement}, {}))) ? params.basicEvalMaterialQueue : params.universalEvalMaterialQueue; PBRT_DBG("Enqueuing for material eval, mtl tag %d\n", material.Tag()); auto enqueue = [=](auto ptr) { using Material = typename std::remove_reference_t<decltype(*ptr)>; q->Push(MaterialEvalWorkItem<Material>{ ptr, intr.pi, intr.n, intr.shading.n, intr.shading.dpdu, intr.shading.dpdv, intr.shading.dndu, intr.shading.dndv, intr.uv, r.lambda, r.anyNonSpecularBounces, intr.wo, r.pixelIndex, r.T_hat, r.uniPathPDF, r.etaScale, getPayload<ClosestHitContext>()->mediumInterface, intr.time}); }; material.Dispatch(enqueue); PBRT_DBG("Closest hit found intersection at t %f\n", optixGetRayTmax()); } /////////////////////////////////////////////////////////////////////////// // Triangles static __forceinline__ __device__ SurfaceInteraction getTriangleIntersection() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); float b1 = optixGetTriangleBarycentrics().x; float b2 = optixGetTriangleBarycentrics().y; float b0 = 1 - b1 - b2; float3 rd = optixGetWorldRayDirection(); Vector3f wo = -Vector3f(rd.x, rd.y, rd.z); assert(optixGetTransformListSize() == 1); float worldFromObj[12], objFromWorld[12]; optixGetObjectToWorldTransformMatrix(worldFromObj); optixGetWorldToObjectTransformMatrix(objFromWorld); SquareMatrix<4> worldFromObjM(worldFromObj[0], worldFromObj[1], worldFromObj[2], worldFromObj[3], worldFromObj[4], worldFromObj[5], worldFromObj[6], worldFromObj[7], worldFromObj[8], worldFromObj[9], worldFromObj[10], worldFromObj[11], 0.f, 0.f, 0.f, 1.f); SquareMatrix<4> objFromWorldM(objFromWorld[0], objFromWorld[1], objFromWorld[2], objFromWorld[3], objFromWorld[4], objFromWorld[5], objFromWorld[6], objFromWorld[7], objFromWorld[8], objFromWorld[9], objFromWorld[10], objFromWorld[11], 0.f, 0.f, 0.f, 1.f); Transform worldFromInstance(worldFromObjM, objFromWorldM); Float time = optixGetRayTime(); wo = worldFromInstance.ApplyInverse(wo); TriangleIntersection ti{b0, b1, b2, optixGetRayTmax()}; SurfaceInteraction intr = Triangle::InteractionFromIntersection(rec.mesh, optixGetPrimitiveIndex(), ti, time, wo); return worldFromInstance(intr); } static __forceinline__ __device__ bool alphaKilled(const TriangleMeshRecord &rec) { if (!rec.alphaTexture) return false; SurfaceInteraction intr = getTriangleIntersection(); BasicTextureEvaluator eval; Float alpha = eval(rec.alphaTexture, intr); if (alpha >= 1) return false; if (alpha <= 0) return true; else { float3 o = optixGetWorldRayOrigin(); float3 d = optixGetWorldRayDirection(); Float u = uint32_t(Hash(o, d)) * 0x1p-32f; return u > alpha; } } extern "C" __global__ void __closesthit__triangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); SurfaceInteraction intr = getTriangleIntersection(); if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition()) intr.mediumInterface = rec.mediumInterface; intr.material = rec.material; if (!rec.areaLights.empty()) intr.areaLight = rec.areaLights[optixGetPrimitiveIndex()]; ProcessClosestIntersection(intr); } extern "C" __global__ void __anyhit__triangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); if (alphaKilled(rec)) optixIgnoreIntersection(); } extern "C" __global__ void __anyhit__shadowTriangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); if (rec.material && rec.material.IsTransparent()) optixIgnoreIntersection(); if (alphaKilled(rec)) optixIgnoreIntersection(); } /////////////////////////////////////////////////////////////////////////// // Shadow rays extern "C" __global__ void __raygen__shadow() { int index = optixGetLaunchIndex().x; if (index >= params.shadowRayQueue->Size()) return; ShadowRayWorkItem sr = (*params.shadowRayQueue)[index]; PBRT_DBG("Tracing shadow ray index %d o %f %f %f d %f %f %f\n", index, sr.ray.o.x, sr.ray.o.y, sr.ray.o.z, sr.ray.d.x, sr.ray.d.y, sr.ray.d.z); uint32_t missed = 0; Trace(params.traversable, sr.ray, 1e-5f /* tMin */, sr.tMax, OPTIX_RAY_FLAG_NONE, missed); SampledSpectrum Ld; if (missed) { Ld = sr.Ld / (sr.uniPathPDF + sr.lightPathPDF).Average(); PBRT_DBG("Unoccluded shadow ray. Final Ld %f %f %f %f " "(sr.Ld %f %f %f %f uniPathPDF %f %f %f %f lightPathPDF %f %f %f %f)\n", Ld[0], Ld[1], Ld[2], Ld[3], sr.Ld[0], sr.Ld[1], sr.Ld[2], sr.Ld[3], sr.uniPathPDF[0], sr.uniPathPDF[1], sr.uniPathPDF[2], sr.uniPathPDF[3], sr.lightPathPDF[0], sr.lightPathPDF[1], sr.lightPathPDF[2], sr.lightPathPDF[3]); SampledSpectrum Lpixel = params.pixelSampleState->L[sr.pixelIndex]; params.pixelSampleState->L[sr.pixelIndex] = Lpixel + Ld; } else { PBRT_DBG("Shadow ray was occluded\n"); } } extern "C" __global__ void __miss__shadow() { optixSetPayload_0(1); } __device__ inline void rescale(SampledSpectrum &T_hat, SampledSpectrum &lightPathPDF, SampledSpectrum &uniPathPDF) { if (T_hat.MaxComponentValue() > 0x1p24f || lightPathPDF.MaxComponentValue() > 0x1p24f || uniPathPDF.MaxComponentValue() > 0x1p24f) { T_hat *= 1.f / 0x1p24f; lightPathPDF *= 1.f / 0x1p24f; uniPathPDF *= 1.f / 0x1p24f; } else if (T_hat.MaxComponentValue() < 0x1p-24f || lightPathPDF.MaxComponentValue() < 0x1p-24f || uniPathPDF.MaxComponentValue() < 0x1p-24f) { T_hat *= 0x1p24f; lightPathPDF *= 0x1p24f; uniPathPDF *= 0x1p24f; } } extern "C" __global__ void __raygen__shadow_Tr() { PBRT_DBG("raygen sahadow tr %d\n", optixGetLaunchIndex().x); int index = optixGetLaunchIndex().x; if (index >= params.shadowRayQueue->Size()) return; ShadowRayWorkItem sr = (*params.shadowRayQueue)[index]; SampledWavelengths lambda = sr.lambda; SampledSpectrum Ld = sr.Ld; PBRT_DBG("Initial Ld %f %f %f %f shadow ray index %d pixel index %d\n", Ld[0], Ld[1], Ld[2], Ld[3], index, sr.pixelIndex); Ray ray = sr.ray; Float tMax = sr.tMax; Point3f pLight = ray(tMax); RNG rng(Hash(ray.o), Hash(ray.d)); SampledSpectrum T_ray(1.f); SampledSpectrum uniPathPDF(1.f), lightPathPDF(1.f); while (ray.d != Vector3f(0, 0, 0)) { ClosestHitContext ctx(ray.medium, true); uint32_t p0 = packPointer0(&ctx), p1 = packPointer1(&ctx); PBRT_DBG("Tracing shadow tr shadow ray index %d pixel index %d " "ray %f %f %f d %f %f %f tMax %f\n", index, sr.pixelIndex, ray.o.x, ray.o.y, ray.o.z, ray.d.x, ray.d.y, ray.d.z, tMax); uint32_t missed = 0; Trace(params.traversable, ray, 1e-5f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, p0, p1, missed); if (!missed && ctx.material) { PBRT_DBG("Hit opaque. Bye\n"); // Hit opaque surface T_ray = SampledSpectrum(0.f); break; } if (ray.medium) { PBRT_DBG("Ray medium %p. Will sample tmaj...\n", ray.medium.ptr()); Float tEnd = missed ? tMax : (Distance(ray.o, Point3f(ctx.piHit)) / Length(ray.d)); SampledSpectrum Tmaj = ray.medium.SampleTmaj(ray, tEnd, rng, lambda, [&](const MediumSample &mediumSample) { const SampledSpectrum &Tmaj = mediumSample.Tmaj; const MediumInteraction &intr = mediumSample.intr; SampledSpectrum sigma_n = intr.sigma_n(); // ratio-tracking: only evaluate null scattering T_ray *= Tmaj * sigma_n; lightPathPDF *= Tmaj * intr.sigma_maj; uniPathPDF *= Tmaj * sigma_n; // Possibly terminate transmittance computation using Russian roulette SampledSpectrum Tr = T_ray / (lightPathPDF + uniPathPDF).Average(); if (Tr.MaxComponentValue() < 0.05f) { Float q = 0.75f; if (rng.Uniform<Float>() < q) T_ray = SampledSpectrum(0.); else { lightPathPDF *= 1 - q; uniPathPDF *= 1 - q; } } PBRT_DBG("Tmaj %f %f %f %f sigma_n %f %f %f %f sigma_maj %f %f %f %f\n", Tmaj[0], Tmaj[1], Tmaj[2], Tmaj[3], sigma_n[0], sigma_n[1], sigma_n[2], sigma_n[3], intr.sigma_maj[0], intr.sigma_maj[1], intr.sigma_maj[2], intr.sigma_maj[3]); PBRT_DBG("T_ray %f %f %f %f lightPathPDF %f %f %f %f uniPathPDF %f %f %f %f\n", T_ray[0], T_ray[1], T_ray[2], T_ray[3], lightPathPDF[0], lightPathPDF[1], lightPathPDF[2], lightPathPDF[3], uniPathPDF[0], uniPathPDF[1], uniPathPDF[2], uniPathPDF[3]); if (!T_ray) return false; rescale(T_ray, lightPathPDF, uniPathPDF); return true; }); T_ray *= Tmaj; lightPathPDF *= Tmaj; uniPathPDF *= Tmaj; } if (missed || !T_ray) // done break; ray = ctx.SpawnRayTo(pLight); } PBRT_DBG("Final T_ray %.9g %.9g %.9g %.9g sr.uniPathPDF %.9g %.9g %.9g %.9g uniPathPDF %.9g %.9g %.9g %.9g\n", T_ray[0], T_ray[1], T_ray[2], T_ray[3], sr.uniPathPDF[0], sr.uniPathPDF[1], sr.uniPathPDF[2], sr.uniPathPDF[3], uniPathPDF[0], uniPathPDF[1], uniPathPDF[2], uniPathPDF[3]); PBRT_DBG("sr.lightPathPDF %.9g %.9g %.9g %.9g lightPathPDF %.9g %.9g %.9g %.9g\n", sr.lightPathPDF[0], sr.lightPathPDF[1], sr.lightPathPDF[2], sr.lightPathPDF[3], lightPathPDF[0], lightPathPDF[1], lightPathPDF[2], lightPathPDF[3]); PBRT_DBG("scaled throughput %.9g %.9g %.9g %.9g\n", T_ray[0] / (sr.uniPathPDF * uniPathPDF + sr.lightPathPDF * lightPathPDF).Average(), T_ray[1] / (sr.uniPathPDF * uniPathPDF + sr.lightPathPDF * lightPathPDF).Average(), T_ray[2] / (sr.uniPathPDF * uniPathPDF + sr.lightPathPDF * lightPathPDF).Average(), T_ray[3] / (sr.uniPathPDF * uniPathPDF + sr.lightPathPDF * lightPathPDF).Average()); if (T_ray) { // FIXME/reconcile: this takes lightPathPDF as input while // e.g. VolPathIntegrator::SampleLd() does not... Ld *= T_ray / (sr.uniPathPDF * uniPathPDF + sr.lightPathPDF * lightPathPDF).Average(); PBRT_DBG("Setting final Ld for shadow ray index %d pixel index %d = as %f %f %f %f\n", index, sr.pixelIndex, Ld[0], Ld[1], Ld[2], Ld[3]); SampledSpectrum Lpixel = params.pixelSampleState->L[sr.pixelIndex]; params.pixelSampleState->L[sr.pixelIndex] = Lpixel + Ld; } } extern "C" __global__ void __miss__shadow_Tr() { optixSetPayload_2(1); } ///////////////////////////////////////////////////////////////////////////////////// // Quadrics static __device__ inline SurfaceInteraction getQuadricIntersection( const QuadricIntersection &si) { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); float3 rd = optixGetWorldRayDirection(); Vector3f wo = -Vector3f(rd.x, rd.y, rd.z); Float time = optixGetRayTime(); SurfaceInteraction intr; if (const Sphere *sphere = rec.shape.CastOrNullptr<Sphere>()) intr = sphere->InteractionFromIntersection(si, wo, time); else if (const Cylinder *cylinder = rec.shape.CastOrNullptr<Cylinder>()) intr = cylinder->InteractionFromIntersection(si, wo, time); else if (const Disk *disk = rec.shape.CastOrNullptr<Disk>()) intr = disk->InteractionFromIntersection(si, wo, time); else assert(!"unexpected quadric"); return intr; } extern "C" __global__ void __closesthit__quadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); QuadricIntersection qi; qi.pObj = Point3f(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1()), BitsToFloat(optixGetAttribute_2())); qi.phi = BitsToFloat(optixGetAttribute_3()); SurfaceInteraction intr = getQuadricIntersection(qi); if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition()) intr.mediumInterface = rec.mediumInterface; intr.material = rec.material; if (rec.areaLight) intr.areaLight = rec.areaLight; ProcessClosestIntersection(intr); } extern "C" __global__ void __anyhit__shadowQuadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); if (rec.material && rec.material.IsTransparent()) optixIgnoreIntersection(); } extern "C" __global__ void __intersection__quadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); float3 org = optixGetObjectRayOrigin(); float3 dir = optixGetObjectRayDirection(); Float tMax = optixGetRayTmax(); Ray ray(Point3f(org.x, org.y, org.z), Vector3f(dir.x, dir.y, dir.z)); pstd::optional<QuadricIntersection> isect; if (const Sphere *sphere = rec.shape.CastOrNullptr<Sphere>()) isect = sphere->BasicIntersect(ray, tMax); else if (const Cylinder *cylinder = rec.shape.CastOrNullptr<Cylinder>()) isect = cylinder->BasicIntersect(ray, tMax); else if (const Disk *disk = rec.shape.CastOrNullptr<Disk>()) isect = disk->BasicIntersect(ray, tMax); if (!isect) return; if (rec.alphaTexture) { SurfaceInteraction intr = getQuadricIntersection(*isect); BasicTextureEvaluator eval; Float alpha = eval(rec.alphaTexture, intr); if (alpha < 1) { if (alpha == 0) // No hit return; float3 o = optixGetWorldRayOrigin(); float3 d = optixGetWorldRayDirection(); Float u = uint32_t(Hash(o.x, o.y, o.z, d.x, d.y, d.z)) * 0x1p-32f; if (u > alpha) // no hit return; } } optixReportIntersection(isect->tHit, 0 /* hit kind */, FloatToBits(isect->pObj.x), FloatToBits(isect->pObj.y), FloatToBits(isect->pObj.z), FloatToBits(isect->phi)); } /////////////////////////////////////////////////////////////////////////// // Bilinear patches static __forceinline__ __device__ SurfaceInteraction getBilinearPatchIntersection(Point2f uv) { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); float3 rd = optixGetWorldRayDirection(); Vector3f wo = -Vector3f(rd.x, rd.y, rd.z); return BilinearPatch::InteractionFromIntersection(rec.mesh, optixGetPrimitiveIndex(), uv, optixGetRayTime(), wo); } extern "C" __global__ void __closesthit__bilinearPatch() { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); Point2f uv(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1())); SurfaceInteraction intr = getBilinearPatchIntersection(uv); if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition()) intr.mediumInterface = rec.mediumInterface; intr.material = rec.material; if (!rec.areaLights.empty()) intr.areaLight = rec.areaLights[optixGetPrimitiveIndex()]; ProcessClosestIntersection(intr); } extern "C" __global__ void __anyhit__shadowBilinearPatch() { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); if (rec.material && rec.material.IsTransparent()) optixIgnoreIntersection(); } extern "C" __global__ void __intersection__bilinearPatch() { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); float3 org = optixGetObjectRayOrigin(); float3 dir = optixGetObjectRayDirection(); Float tMax = optixGetRayTmax(); Ray ray(Point3f(org.x, org.y, org.z), Vector3f(dir.x, dir.y, dir.z)); int vertexIndex = 4 * optixGetPrimitiveIndex(); Point3f p00 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex]]; Point3f p10 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 1]]; Point3f p01 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 2]]; Point3f p11 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 3]]; pstd::optional<BilinearIntersection> isect = IntersectBilinearPatch(ray, tMax, p00, p10, p01, p11); if (!isect) return; if (rec.alphaTexture) { SurfaceInteraction intr = getBilinearPatchIntersection(isect->uv); BasicTextureEvaluator eval; Float alpha = eval(rec.alphaTexture, intr); if (alpha < 1) { if (alpha == 0) // No hit return; float3 o = optixGetWorldRayOrigin(); float3 d = optixGetWorldRayDirection(); Float u = uint32_t(Hash(o, d)) * 0x1p-32f; if (u > alpha) // no hit return; } } optixReportIntersection(isect->t, 0 /* hit kind */, FloatToBits(isect->uv[0]), FloatToBits(isect->uv[1])); } /////////////////////////////////////////////////////////////////////////// // Random hit (for subsurface scattering) struct RandomHitPayload { WeightedReservoirSampler<SubsurfaceInteraction> wrs; MaterialHandle material; }; extern "C" __global__ void __raygen__randomHit() { // Keep as uint32_t so can pass directly to optixTrace. uint32_t index = optixGetLaunchIndex().x; if (index >= params.subsurfaceScatterQueue->Size()) return; SubsurfaceScatterWorkItem s = (*params.subsurfaceScatterQueue)[index]; Ray ray(s.p0, s.p1 - s.p0); Float tMax = 1.f; RandomHitPayload payload; payload.wrs.Seed(Hash(s.p0, s.p1)); payload.material = s.material; uint32_t ptr0 = packPointer0(&payload), ptr1 = packPointer1(&payload); PBRT_DBG("Randomhit raygen ray.o %f %f %f ray.d %f %f %f tMax %f\n", ray.o.x, ray.o.y, ray.o.z, ray.d.x, ray.d.y, ray.d.z, tMax); Trace(params.traversable, ray, 0.f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, ptr0, ptr1); if (payload.wrs.HasSample() && payload.wrs.WeightSum() > 0) { // TODO: latter check shouldn't be needed... const SubsurfaceInteraction &si = payload.wrs.GetSample(); PBRT_DBG("optix si p %f %f %f n %f %f %f\n", si.p().x, si.p().y, si.p().z, si.n.x, si.n.y, si.n.z); params.subsurfaceScatterQueue->weight[index] = payload.wrs.WeightSum(); params.subsurfaceScatterQueue->ssi[index] = payload.wrs.GetSample(); } else params.subsurfaceScatterQueue->weight[index] = 0; } extern "C" __global__ void __anyhit__randomHitTriangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); RandomHitPayload *p = getPayload<RandomHitPayload>(); PBRT_DBG("Anyhit triangle for random hit: rec.material %p params.materials %p\n", rec.material.ptr(), p->material.ptr()); if (rec.material == p->material) p->wrs.Add([&] PBRT_CPU_GPU() { return getTriangleIntersection(); }, 1.f); optixIgnoreIntersection(); } extern "C" __global__ void __anyhit__randomHitBilinearPatch() { BilinearMeshRecord &rec = *(BilinearMeshRecord *)optixGetSbtDataPointer(); RandomHitPayload *p = getPayload<RandomHitPayload>(); PBRT_DBG("Anyhit blp for random hit: rec.material %p params.materials %p\n", rec.material.ptr(), p->material.ptr()); if (rec.material == p->material) p->wrs.Add( [&] PBRT_CPU_GPU() { Point2f uv(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1())); return getBilinearPatchIntersection(uv); }, 1.f); optixIgnoreIntersection(); } extern "C" __global__ void __anyhit__randomHitQuadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); RandomHitPayload *p = getPayload<RandomHitPayload>(); PBRT_DBG("Anyhit quadric for random hit: rec.material %p params.materials %p\n", rec.material.ptr(), p->material.ptr()); if (rec.material == p->material) { p->wrs.Add( [&] PBRT_CPU_GPU() { QuadricIntersection qi; qi.pObj = Point3f(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1()), BitsToFloat(optixGetAttribute_2())); qi.phi = BitsToFloat(optixGetAttribute_3()); return getQuadricIntersection(qi); }, 1.f); } optixIgnoreIntersection(); }
ba146d52001dbc9e55c2979ab39e72d104b38b2b.hip
// !!! This is a file automatically generated by hipify!!! #include "THHUNN.h" #include "common.h" #if THRUST_PATH #include <thrust/fill.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/inner_product.h> #else #include <bolt/amp/functional.h> #include <bolt/amp/inner_product.h> #endif struct margin_functor { __host__ __device__ margin_functor(float margin) : margin(margin) {} __host__ __device__ float operator()(const float &x, const float &y) const { float z = margin - x * y; return z >= 0 ? z : 0; } __host__ __device__ ~margin_functor() {} const float margin; }; void THNN_CudaMarginCriterion_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *output, bool sizeAverage, float margin) { THCUNN_assertSameGPU(state, 2, input, target); long size = THCudaTensor_nElement(state, input); input = THCudaTensor_newContiguous(state, input); target = THCudaTensor_newContiguous(state, target); #if THRUST_PATH thrust::device_ptr<float> input_data(THCudaTensor_data(state, input)); thrust::device_ptr<float> target_data(THCudaTensor_data(state, target)); float sum = thrust::inner_product(input_data, input_data+size, target_data, (float) 0, thrust::plus<float>(), margin_functor(margin)); #else auto input_data = THCudaTensor_data(state, input); auto target_data = THCudaTensor_data(state, target); float sum = bolt::amp::inner_product(input_data, input_data+size, target_data, 0.0f, bolt::amp::plus<float>(), margin_functor(margin)); #endif if (sizeAverage) sum /= size; THCudaTensor_free(state, input); THCudaTensor_free(state, target); THCudaTensor_set1d(state, output, 0, sum); } struct margin_updateGradInput_functor { float margin, norm; __host__ __device__ margin_updateGradInput_functor() = default; __host__ __device__ margin_updateGradInput_functor(float margin_, float norm_) : margin(margin_) , norm(norm_) {} margin_updateGradInput_functor(const margin_updateGradInput_functor& fun) = default; __host__ __device__ float operator()(const float &x, const float &y) const { return (x * y) < margin ? -norm * y : 0; } }; void THNN_CudaMarginCriterion_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *gradInput, bool sizeAverage, float margin) { THCUNN_assertSameGPU(state, 3, input, target, gradInput); long size = THCudaTensor_nElement(state, input); float norm = sizeAverage ? 1.f/size : 1; input = THCudaTensor_newContiguous(state, input); target = THCudaTensor_newContiguous(state, target); THCudaTensor_resizeAs(state, gradInput, input); #if THRUST_PATH thrust::device_ptr<float> input_data(THCudaTensor_data(state, input)); thrust::device_ptr<float> target_data(THCudaTensor_data(state, target)); thrust::device_ptr<float> gradInput_data(THCudaTensor_data(state, gradInput)); thrust::transform(input_data, input_data+size, target_data, gradInput_data, margin_updateGradInput_functor(margin, norm)); #else auto input_data = THCudaTensor_data(state, input); auto target_data = THCudaTensor_data(state, target); auto gradInput_data = THCudaTensor_data(state, gradInput); bolt::amp::transform(input_data, input_data+size, target_data, gradInput_data, margin_updateGradInput_functor(margin, norm)); #endif THCudaTensor_free(state, input); THCudaTensor_free(state, target); }
ba146d52001dbc9e55c2979ab39e72d104b38b2b.cu
#include "THCUNN.h" #include "common.h" #if THRUST_PATH #include <thrust/fill.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/inner_product.h> #else #include <bolt/amp/functional.h> #include <bolt/amp/inner_product.h> #endif struct margin_functor { __host__ __device__ margin_functor(float margin) : margin(margin) {} __host__ __device__ float operator()(const float &x, const float &y) const { float z = margin - x * y; return z >= 0 ? z : 0; } __host__ __device__ ~margin_functor() {} const float margin; }; void THNN_CudaMarginCriterion_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *output, bool sizeAverage, float margin) { THCUNN_assertSameGPU(state, 2, input, target); long size = THCudaTensor_nElement(state, input); input = THCudaTensor_newContiguous(state, input); target = THCudaTensor_newContiguous(state, target); #if THRUST_PATH thrust::device_ptr<float> input_data(THCudaTensor_data(state, input)); thrust::device_ptr<float> target_data(THCudaTensor_data(state, target)); float sum = thrust::inner_product(input_data, input_data+size, target_data, (float) 0, thrust::plus<float>(), margin_functor(margin)); #else auto input_data = THCudaTensor_data(state, input); auto target_data = THCudaTensor_data(state, target); float sum = bolt::amp::inner_product(input_data, input_data+size, target_data, 0.0f, bolt::amp::plus<float>(), margin_functor(margin)); #endif if (sizeAverage) sum /= size; THCudaTensor_free(state, input); THCudaTensor_free(state, target); THCudaTensor_set1d(state, output, 0, sum); } struct margin_updateGradInput_functor { float margin, norm; __host__ __device__ margin_updateGradInput_functor() = default; __host__ __device__ margin_updateGradInput_functor(float margin_, float norm_) : margin(margin_) , norm(norm_) {} margin_updateGradInput_functor(const margin_updateGradInput_functor& fun) = default; __host__ __device__ float operator()(const float &x, const float &y) const { return (x * y) < margin ? -norm * y : 0; } }; void THNN_CudaMarginCriterion_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *gradInput, bool sizeAverage, float margin) { THCUNN_assertSameGPU(state, 3, input, target, gradInput); long size = THCudaTensor_nElement(state, input); float norm = sizeAverage ? 1.f/size : 1; input = THCudaTensor_newContiguous(state, input); target = THCudaTensor_newContiguous(state, target); THCudaTensor_resizeAs(state, gradInput, input); #if THRUST_PATH thrust::device_ptr<float> input_data(THCudaTensor_data(state, input)); thrust::device_ptr<float> target_data(THCudaTensor_data(state, target)); thrust::device_ptr<float> gradInput_data(THCudaTensor_data(state, gradInput)); thrust::transform(input_data, input_data+size, target_data, gradInput_data, margin_updateGradInput_functor(margin, norm)); #else auto input_data = THCudaTensor_data(state, input); auto target_data = THCudaTensor_data(state, target); auto gradInput_data = THCudaTensor_data(state, gradInput); bolt::amp::transform(input_data, input_data+size, target_data, gradInput_data, margin_updateGradInput_functor(margin, norm)); #endif THCudaTensor_free(state, input); THCudaTensor_free(state, target); }
e1dfe5d2cbbd77cecc3480bb144c3f6fcb1a7853.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* this example to show malloc 2D interface used to get a memory optimized add padding then make sure alignment is better for fetch */ #include <common_functions.h> #include <iostream> using namespace std; __global__ void MyKernel(char* devPtr, size_t pitch, int width, int height) { for (int r = 0; r < height; ++r) { float* row = (float*)((char*)devPtr + r * pitch); for (int c = 0; c < width; ++c) { float element = row[c]; } } } int main(int argc, char **argv) { int width = 64, height = 64; char* devPtr; size_t pitch; hipMallocPitch(&devPtr, &pitch, width * sizeof(char), height); cout << sizeof(float) << endl; cout << pitch << endl; hipLaunchKernelGGL(( MyKernel), dim3(100), dim3(512), 0, 0, devPtr, pitch, width, height); cin >> width; return 0; }
e1dfe5d2cbbd77cecc3480bb144c3f6fcb1a7853.cu
/* this example to show malloc 2D interface used to get a memory optimized add padding then make sure alignment is better for fetch */ #include <common_functions.h> #include <iostream> using namespace std; __global__ void MyKernel(char* devPtr, size_t pitch, int width, int height) { for (int r = 0; r < height; ++r) { float* row = (float*)((char*)devPtr + r * pitch); for (int c = 0; c < width; ++c) { float element = row[c]; } } } int main(int argc, char **argv) { int width = 64, height = 64; char* devPtr; size_t pitch; cudaMallocPitch(&devPtr, &pitch, width * sizeof(char), height); cout << sizeof(float) << endl; cout << pitch << endl; MyKernel<<<100, 512>>>(devPtr, pitch, width, height); cin >> width; return 0; }
40d85435f862cdb86752ea8ca6951507cc0f7bfb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "vadd.cuh" #include <cstdio> #include <cstdlib> #include <iostream> #define THREAD_NUM 512 // #define THREAD_NUM 1024 int main(int argc, char *argv[]) { int N = atoi(argv[1]); float *a, *b; hipMallocManaged(&a, N * sizeof(float)); hipMallocManaged(&b, N * sizeof(float)); // assign a and b for (int i = 0; i < N; i++) { a[i] = (float)i; b[i] = (float)i; } hipEvent_t start; hipEvent_t stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( vadd), dim3(N / THREAD_NUM + 1), dim3(THREAD_NUM), 0, 0, a, b, N); hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); // for (int i = 0; i < N; i++) { // std::cout << b[i] << " "; // } float ms; hipEventElapsedTime(&ms, start, stop); std::cout << ms / 1000 << "\n"; std::cout << b[0] << "\n"; std::cout << b[N - 1] << "\n"; hipFree(a); hipFree(b); return 0; }
40d85435f862cdb86752ea8ca6951507cc0f7bfb.cu
#include "vadd.cuh" #include <cstdio> #include <cstdlib> #include <iostream> #define THREAD_NUM 512 // #define THREAD_NUM 1024 int main(int argc, char *argv[]) { int N = atoi(argv[1]); float *a, *b; cudaMallocManaged(&a, N * sizeof(float)); cudaMallocManaged(&b, N * sizeof(float)); // assign a and b for (int i = 0; i < N; i++) { a[i] = (float)i; b[i] = (float)i; } cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); vadd<<<N / THREAD_NUM + 1, THREAD_NUM>>>(a, b, N); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); // for (int i = 0; i < N; i++) { // std::cout << b[i] << " "; // } float ms; cudaEventElapsedTime(&ms, start, stop); std::cout << ms / 1000 << "\n"; std::cout << b[0] << "\n"; std::cout << b[N - 1] << "\n"; cudaFree(a); cudaFree(b); return 0; }
f1f0d9f73197b2683116d28cb5d515d076b97f71.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N 8192 // Number of rows/columns of the matrix. #define TILE_DIM 32 #define SIZE N*N // Total size of a matrix. // Compares two matrices element by element. int isTransposed (const double* a, const double* b, const int dim) { for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { if(b[j + i*dim] != a[i + j*dim]) return 0; } } return 1; } // Gpu naive transposition. __global__ void gpuNaiveTrans (double* a, double* b, const int size, const int brows) { int col = blockIdx.x * TILE_DIM + threadIdx.x; int row = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for(int i = 0; i < TILE_DIM; i += brows) { b[col * width + (row + i)] = a[(row + i) * width + col]; } } // Gpu optimised transposition. __global__ void gpuOptTrans (double* a, double* b, const int size, const int brows) { // Buffer on the shared memory. __shared__ double tmp[TILE_DIM][TILE_DIM]; int col = blockIdx.x * TILE_DIM + threadIdx.x; int row = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; // Transposition on buffer. for(int i = 0; i < TILE_DIM; i += brows) { tmp[threadIdx.y + i][threadIdx.x] = a[(row + i) * width + col]; } __syncthreads(); col = blockIdx.y * TILE_DIM + threadIdx.x; row = blockIdx.x * TILE_DIM + threadIdx.y; // Writing to output. for(int i = 0; i < TILE_DIM; i += brows) { b[col + width * (row + i)] = tmp[threadIdx.x][threadIdx.y + i]; } } void matrixFill (double* a, const int dim) { for(int i = 0; i < dim; i++) { a[i] = (double) i; } } int main(int argc, char* argv[]) { double* hostInput, * hostOutput; double* devInput, * devOutput; // Allocate host memory. hostInput = (double* )malloc(SIZE * sizeof(double)); hostOutput = (double* )malloc(SIZE * sizeof(double)); // Allocate device memory. hipMalloc((void**)&devInput, SIZE * sizeof(double)); hipMalloc((void**)&devOutput, SIZE * sizeof(double)); // Dimensions. dim3 grid, block; block.x = TILE_DIM; block.y = atoi(argv[1])/TILE_DIM; // Threads-per-block readed as input. grid.x = N / TILE_DIM; grid.y = N / TILE_DIM; printf("\n--------------------------\n"); printf("--------------------------\n\n"); printf("Threads per block = %d\n\n", atoi(argv[1])); /// NAIVE TRANSPOSE // // Initialise matricx. matrixFill(hostInput, SIZE); // Copy input to device. hipMemcpy(devInput, hostInput, SIZE * sizeof(double), hipMemcpyHostToDevice); // Timing. float elapsedTime = 0.0; hipEvent_t tStart, tEnd; hipEventCreate(&tStart); hipEventCreate(&tEnd); hipEventRecord(tStart); hipLaunchKernelGGL(( gpuNaiveTrans), dim3(grid), dim3(block) , 0, 0, devInput, devOutput, N, block.y); hipEventRecord(tEnd); hipEventSynchronize(tEnd); hipEventElapsedTime(&elapsedTime, tStart, tEnd); printf("NAIVE TRANSPOSE: Elapsed time: %fms\n", elapsedTime); printf("Bandwidth: %f GB/s\n", 2 * SIZE * sizeof(double) / elapsedTime / 1000000); // Copy output to host. hipMemcpy(hostOutput, devOutput, SIZE * sizeof(double), hipMemcpyDeviceToHost); printf("Is the tranposition correct? %s\n", isTransposed(hostOutput, hostInput, N) ? "CORRECT" : "ERROR!" ); /// OPTIMISED TRANSPOSE // printf("\n\n"); // Initialise matricx. matrixFill(hostInput, SIZE); // Copy input to device. hipMemcpy(devInput, hostInput, SIZE * sizeof(double), hipMemcpyHostToDevice); // Timing. elapsedTime = 0.0; hipEventRecord(tStart); hipLaunchKernelGGL(( gpuOptTrans), dim3(grid), dim3(block) , 0, 0, devInput, devOutput, N, block.y); hipEventRecord(tEnd); hipEventSynchronize(tEnd); hipEventElapsedTime(&elapsedTime, tStart, tEnd); printf("OPTIMISED TRANSPOSE: Elapsed time: %fms\n", elapsedTime); printf("Bandwidth: %f GB/s\n", 2 * SIZE * sizeof(double) / elapsedTime / 1000000); // Copy output to host. hipMemcpy(hostOutput, devOutput, SIZE * sizeof(double), hipMemcpyDeviceToHost); printf("Is the tranposition correct? %s\n", isTransposed(hostOutput, hostInput, N) ? "CORRECT" : "ERROR!" ); // Freeing resources. free(hostInput); free(hostOutput); hipFree(devInput); hipFree(devOutput); hipEventDestroy(tStart); hipEventDestroy(tEnd); }
f1f0d9f73197b2683116d28cb5d515d076b97f71.cu
#include <stdio.h> #define N 8192 // Number of rows/columns of the matrix. #define TILE_DIM 32 #define SIZE N*N // Total size of a matrix. // Compares two matrices element by element. int isTransposed (const double* a, const double* b, const int dim) { for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { if(b[j + i*dim] != a[i + j*dim]) return 0; } } return 1; } // Gpu naive transposition. __global__ void gpuNaiveTrans (double* a, double* b, const int size, const int brows) { int col = blockIdx.x * TILE_DIM + threadIdx.x; int row = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for(int i = 0; i < TILE_DIM; i += brows) { b[col * width + (row + i)] = a[(row + i) * width + col]; } } // Gpu optimised transposition. __global__ void gpuOptTrans (double* a, double* b, const int size, const int brows) { // Buffer on the shared memory. __shared__ double tmp[TILE_DIM][TILE_DIM]; int col = blockIdx.x * TILE_DIM + threadIdx.x; int row = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; // Transposition on buffer. for(int i = 0; i < TILE_DIM; i += brows) { tmp[threadIdx.y + i][threadIdx.x] = a[(row + i) * width + col]; } __syncthreads(); col = blockIdx.y * TILE_DIM + threadIdx.x; row = blockIdx.x * TILE_DIM + threadIdx.y; // Writing to output. for(int i = 0; i < TILE_DIM; i += brows) { b[col + width * (row + i)] = tmp[threadIdx.x][threadIdx.y + i]; } } void matrixFill (double* a, const int dim) { for(int i = 0; i < dim; i++) { a[i] = (double) i; } } int main(int argc, char* argv[]) { double* hostInput, * hostOutput; double* devInput, * devOutput; // Allocate host memory. hostInput = (double* )malloc(SIZE * sizeof(double)); hostOutput = (double* )malloc(SIZE * sizeof(double)); // Allocate device memory. cudaMalloc((void**)&devInput, SIZE * sizeof(double)); cudaMalloc((void**)&devOutput, SIZE * sizeof(double)); // Dimensions. dim3 grid, block; block.x = TILE_DIM; block.y = atoi(argv[1])/TILE_DIM; // Threads-per-block readed as input. grid.x = N / TILE_DIM; grid.y = N / TILE_DIM; printf("\n--------------------------\n"); printf("--------------------------\n\n"); printf("Threads per block = %d\n\n", atoi(argv[1])); /// NAIVE TRANSPOSE // // Initialise matricx. matrixFill(hostInput, SIZE); // Copy input to device. cudaMemcpy(devInput, hostInput, SIZE * sizeof(double), cudaMemcpyHostToDevice); // Timing. float elapsedTime = 0.0; cudaEvent_t tStart, tEnd; cudaEventCreate(&tStart); cudaEventCreate(&tEnd); cudaEventRecord(tStart); gpuNaiveTrans<<< grid, block >>>(devInput, devOutput, N, block.y); cudaEventRecord(tEnd); cudaEventSynchronize(tEnd); cudaEventElapsedTime(&elapsedTime, tStart, tEnd); printf("NAIVE TRANSPOSE: Elapsed time: %fms\n", elapsedTime); printf("Bandwidth: %f GB/s\n", 2 * SIZE * sizeof(double) / elapsedTime / 1000000); // Copy output to host. cudaMemcpy(hostOutput, devOutput, SIZE * sizeof(double), cudaMemcpyDeviceToHost); printf("Is the tranposition correct? %s\n", isTransposed(hostOutput, hostInput, N) ? "CORRECT" : "ERROR!" ); /// OPTIMISED TRANSPOSE // printf("\n\n"); // Initialise matricx. matrixFill(hostInput, SIZE); // Copy input to device. cudaMemcpy(devInput, hostInput, SIZE * sizeof(double), cudaMemcpyHostToDevice); // Timing. elapsedTime = 0.0; cudaEventRecord(tStart); gpuOptTrans<<< grid, block >>>(devInput, devOutput, N, block.y); cudaEventRecord(tEnd); cudaEventSynchronize(tEnd); cudaEventElapsedTime(&elapsedTime, tStart, tEnd); printf("OPTIMISED TRANSPOSE: Elapsed time: %fms\n", elapsedTime); printf("Bandwidth: %f GB/s\n", 2 * SIZE * sizeof(double) / elapsedTime / 1000000); // Copy output to host. cudaMemcpy(hostOutput, devOutput, SIZE * sizeof(double), cudaMemcpyDeviceToHost); printf("Is the tranposition correct? %s\n", isTransposed(hostOutput, hostInput, N) ? "CORRECT" : "ERROR!" ); // Freeing resources. free(hostInput); free(hostOutput); cudaFree(devInput); cudaFree(devOutput); cudaEventDestroy(tStart); cudaEventDestroy(tEnd); }
d83846d691ba4a92e5dc192f6939988c7284ccf1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "funset.h" #include <iostream> using namespace std; int test1(); int test2(); int test3();// int test4();//JuliaCUDA int test5();// int test6();// int test7();//rippleCUDA int test8();//CUDA int test9();//JuliaCUDA__syncthreads() int test10();//(Ray Tracing)+GPU int test11();//(Ray Tracing)+GPU int test12();// int test13();// int test14();//rippleCUDA+OpenGL int test15();//,CUDA+OpenGL int test16();//atomicAdd int test17();// int test18();//stream int test19();//stream int test20();// int test21();//GPU int test1() { int a = 2, b = 3, c = 0; int* dev_c = NULL; hipError_t cudaStatus; cudaStatus = hipMalloc((void**)&dev_c, sizeof(int)); if (cudaStatus != hipSuccess){ fprintf(stderr, "hipMalloc failed!"); goto Error; } hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, a, b, dev_c); cudaStatus = hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess){ fprintf(stderr, "hipMalloc failed!"); goto Error; } printf("%d + %d = %d\n", a, b, c); Error: hipFree(dev_c); return 0; } int test2() { int count = -1; HANDLE_ERROR(hipGetDeviceCount(&count)); printf("device count: %d\n", count); hipDeviceProp_t prop; for (int i = 0; i < count; i++) { HANDLE_ERROR(hipGetDeviceProperties(&prop, i)); printf(" --- General Information for device %d ---\n", i); printf("Name: %s\n", prop.name); printf("Compute capability: %d.%d\n", prop.major, prop.minor); printf("Clock rate: %d\n", prop.clockRate); printf("Device copy overlap: "); if (prop.deviceOverlap) printf("Enabled\n"); else printf("Disabled\n"); printf("Kernel execution timeout : "); if (prop.kernelExecTimeoutEnabled) printf("Enabled\n"); else printf("Disabled\n"); printf(" --- Memory Information for device %d ---\n", i); printf("Total global mem: %ld\n", prop.totalGlobalMem); printf("Total constant Mem: %ld\n", prop.totalConstMem); printf("Max mem pitch: %ld\n", prop.memPitch); printf("Texture Alignment: %ld\n", prop.textureAlignment); printf(" --- MP Information for device %d ---\n", i); printf("Multiprocessor count: %d\n", prop.multiProcessorCount); printf("Shared mem per mp: %ld\n", prop.sharedMemPerBlock); printf("Registers per mp: %d\n", prop.regsPerBlock); printf("Threads in warp: %d\n", prop.warpSize); printf("Max threads per block: %d\n", prop.maxThreadsPerBlock); printf("Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf("Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf("\n"); } int dev; HANDLE_ERROR(hipGetDevice(&dev)); printf("ID of current CUDA device: %d\n", dev); memset(&prop, 0, sizeof(hipDeviceProp_t)); prop.major = 1; prop.minor = 3; HANDLE_ERROR(hipChooseDevice(&dev, &prop)); printf("ID of CUDA device closest to revision %d.%d: %d\n", prop.major, prop.minor, dev); HANDLE_ERROR(hipSetDevice(dev)); return 0; } int test3() { int a[NUM] = {0}, b[NUM] = {0}, c[NUM] = {0}; int *dev_a = NULL, *dev_b = NULL, *dev_c = NULL; //allocate the memory on the GPU HANDLE_ERROR(hipMalloc((void**)&dev_a, NUM * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&dev_b, NUM * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&dev_c, NUM * sizeof(int))); //fill the arrays 'a' and 'b' on the CPU for (int i=0; i<NUM; i++) { a[i] = -i; b[i] = i * i; } cout<<"NUM:"<<NUM<<endl; cout<<"b[NUM-1]:"<<b[NUM-1]<<endl; cout<<"sizeof(int):"<<sizeof(int)<<endl; //copy the arrays 'a' and 'b' to the GPU HANDLE_ERROR(hipMemcpy(dev_a, a, NUM * sizeof(int), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(dev_b, b, NUM * sizeof(int), hipMemcpyHostToDevice)); // hipLaunchKernelGGL(( add_blockIdx), dim3(NUM),dim3(1), 0, 0, dev_a, dev_b, dev_c ); //copy the array 'c' back from the GPU to the CPU HANDLE_ERROR(hipMemcpy(c, dev_c, NUM * sizeof(int), hipMemcpyDeviceToHost)); //display the results //for (int i=0; i<NUM; i++) { // printf( "%d + %d = %d\n", a[i], b[i], c[i] ); //} printf( "%d + %d = %d\n", a[NUM-1], b[NUM-1], c[NUM-1] ); //free the memory allocated on the GPU HANDLE_ERROR(hipFree(dev_a)); HANDLE_ERROR(hipFree(dev_b)); HANDLE_ERROR(hipFree(dev_c)); return 0; } int test5() { int a[NUM], b[NUM], c[NUM]; int *dev_a = NULL, *dev_b = NULL, *dev_c = NULL; HANDLE_ERROR(hipMalloc((void**)&dev_a, NUM * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&dev_b, NUM * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&dev_c, NUM * sizeof(int))); for (int i = 0; i < NUM; i++) { a[i] = i; b[i] = i * i; } HANDLE_ERROR(hipMemcpy(dev_a, a, NUM * sizeof(int), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(dev_b, b, NUM * sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( add_threadIdx), dim3(1), dim3(NUM), 0, 0, dev_a, dev_b, dev_c); HANDLE_ERROR(hipMemcpy(c, dev_c, NUM * sizeof(int), hipMemcpyDeviceToHost)); printf("%d + %d = %d\n", a[NUM-1], b[NUM-1], c[NUM-1]); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); return 0; } int test6() { int a[NUM], b[NUM], c[NUM]; int *dev_a = NULL, *dev_b = NULL, *dev_c = NULL; HANDLE_ERROR(hipMalloc((void**)&dev_a, NUM * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&dev_b, NUM * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&dev_c, NUM * sizeof(int))); for (int i = 0; i < NUM; i++) { a[i] = i; b[i] = i * i / 10; } HANDLE_ERROR(hipMemcpy(dev_a, a, NUM * sizeof(int), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(dev_b, b, NUM * sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( add_blockIdx_threadIdx), dim3(128), dim3(128), 0, 0, dev_a, dev_b, dev_c); HANDLE_ERROR(hipMemcpy(c, dev_c, NUM * sizeof(int), hipMemcpyDeviceToHost)); bool success = true; for (int i = 0; i < NUM; i++) { if ((a[i] + b[i]) != c[i]) { printf("error: %d + %d != %d\n", a[i], b[i], c[i]); success = false; } } if (success) printf("we did it!\n"); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); return 0; } int test8() { float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; a = (float*)malloc(NUM * sizeof(float)); b = (float*)malloc(NUM * sizeof(float)); partial_c = (float*)malloc(blocksPerGrid * sizeof(float)); HANDLE_ERROR(hipMalloc((void**)&dev_a, NUM * sizeof(float))); HANDLE_ERROR(hipMalloc((void**)&dev_b, NUM * sizeof(float))); HANDLE_ERROR(hipMalloc((void**)&dev_partial_c, blocksPerGrid*sizeof(float))); for (int i = 0; i < NUM; i++) { a[i] = i; b[i] = i*2; } HANDLE_ERROR(hipMemcpy(dev_a, a, NUM * sizeof(float), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(dev_b, b, NUM * sizeof(float), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( dot_kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_a, dev_b, dev_partial_c); HANDLE_ERROR(hipMemcpy(partial_c, dev_partial_c, blocksPerGrid * sizeof(float), hipMemcpyDeviceToHost)); //finish up on the CPU side c = 0; for (int i = 0; i < blocksPerGrid; i++) { c += partial_c[i]; } //0NUM-12 // #define sum_squares(x) (x * (x + 1) * (2 * x + 1) / 6) printf("Does GPU value %.6g = %.6g?\n", c, 2 * sum_squares((float)(NUM - 1))); HANDLE_ERROR(hipFree(dev_a)); HANDLE_ERROR(hipFree(dev_b)); HANDLE_ERROR(hipFree(dev_partial_c)); free(a); free(b); free(partial_c); return 0; } // without definition of big_random_block() //int test16() //{ // unsigned char *buffer = (unsigned char*)big_random_block(SIZE); // //capture the start time starting the timer here so that we include the cost of // //all of the operations on the GPU. if the data were already on the GPU and we just // //timed the kernel the timing would drop from 74 ms to 15 ms. Very fast. // hipEvent_t start, stop; // HANDLE_ERROR( hipEventCreate( &start ) ); // HANDLE_ERROR( hipEventCreate( &stop ) ); // HANDLE_ERROR( hipEventRecord( start, 0 ) ); // // // allocate memory on the GPU for the file's data // unsigned char *dev_buffer; // unsigned int *dev_histo; // HANDLE_ERROR(hipMalloc((void**)&dev_buffer, SIZE)); // HANDLE_ERROR(hipMemcpy(dev_buffer, buffer, SIZE, hipMemcpyHostToDevice)); // // HANDLE_ERROR(hipMalloc((void**)&dev_histo, 256 * sizeof(int))); // HANDLE_ERROR(hipMemset(dev_histo, 0, 256 * sizeof(int))); // // //kernel launch - 2x the number of mps gave best timing // hipDeviceProp_t prop; // HANDLE_ERROR(hipGetDeviceProperties(&prop, 0)); // int blocks = prop.multiProcessorCount; // histo_kernel<<<blocks*2, 256>>>(dev_buffer, SIZE, dev_histo); // // unsigned int histo[256]; // HANDLE_ERROR(hipMemcpy(histo, dev_histo, 256 * sizeof(int), hipMemcpyDeviceToHost)); // // //get stop time, and display the timing results // HANDLE_ERROR(hipEventRecord(stop, 0)); // HANDLE_ERROR(hipEventSynchronize(stop)); // float elapsedTime; // HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop)); // printf("Time to generate: %3.1f ms\n", elapsedTime); // // long histoCount = 0; // for (int i=0; i<256; i++) { // histoCount += histo[i]; // } // printf("Histogram Sum: %ld\n", histoCount); // // //verify that we have the same counts via CPU // for (int i = 0; i < SIZE; i++) // histo[buffer[i]]--; // for (int i = 0; i < 256; i++) { // if (histo[i] != 0) // printf("Failure at %d!\n", i); // } // // HANDLE_ERROR(hipEventDestroy(start)); // HANDLE_ERROR(hipEventDestroy(stop)); // hipFree(dev_histo); // hipFree(dev_buffer); // free(buffer); // return 0; //} int main(int argc, char* argv[]) { //test1(); //test2(); //test3(); //test5(); //test6(); test8(); //test16(); cout<<"ok!"<<endl; }
d83846d691ba4a92e5dc192f6939988c7284ccf1.cu
#include "funset.h" #include <iostream> using namespace std; int test1(); int test2(); int test3();//通过线程块索引来计算两个矢量和 int test4();//Julia的CUDA实现 int test5();//通过线程索引来计算两个矢量和 int test6();//通过线程块索引和线程索引来计算两个矢量和 int test7();//ripple的CUDA实现 int test8();//点积运算的CUDA实现 int test9();//Julia的CUDA实现,加入了线程同步函数__syncthreads() int test10();//光线跟踪(Ray Tracing)实现,没有常量内存+使用事件来计算GPU运行时间 int test11();//光线跟踪(Ray Tracing)实现,使用常量内存+使用事件来计算GPU运行时间 int test12();//模拟热传导,使用纹理内存,有些问题 int test13();//模拟热传导,使用二维纹理内存,有些问题 int test14();//ripple的CUDA+OpenGL实现 int test15();//模拟热传导,CUDA+OpenGL实现,有些问题 int test16();//直方图计算,利用原子操作函数atomicAdd实现 int test17();//固定内存的使用 int test18();//单个stream的使用 int test19();//多个stream的使用 int test20();//通过零拷贝内存的方式实现点积运算 int test21();//使用多个GPU实现点积运算 int test1() { int a = 2, b = 3, c = 0; int* dev_c = NULL; cudaError_t cudaStatus; cudaStatus = cudaMalloc((void**)&dev_c, sizeof(int)); if (cudaStatus != cudaSuccess){ fprintf(stderr, "cudaMalloc failed!"); goto Error; } add<<<1, 1>>>(a, b, dev_c); cudaStatus = cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess){ fprintf(stderr, "cudaMalloc failed!"); goto Error; } printf("%d + %d = %d\n", a, b, c); Error: cudaFree(dev_c); return 0; } int test2() { int count = -1; HANDLE_ERROR(cudaGetDeviceCount(&count)); printf("device count: %d\n", count); cudaDeviceProp prop; for (int i = 0; i < count; i++) { HANDLE_ERROR(cudaGetDeviceProperties(&prop, i)); printf(" --- General Information for device %d ---\n", i); printf("Name: %s\n", prop.name); printf("Compute capability: %d.%d\n", prop.major, prop.minor); printf("Clock rate: %d\n", prop.clockRate); printf("Device copy overlap: "); if (prop.deviceOverlap) printf("Enabled\n"); else printf("Disabled\n"); printf("Kernel execution timeout : "); if (prop.kernelExecTimeoutEnabled) printf("Enabled\n"); else printf("Disabled\n"); printf(" --- Memory Information for device %d ---\n", i); printf("Total global mem: %ld\n", prop.totalGlobalMem); printf("Total constant Mem: %ld\n", prop.totalConstMem); printf("Max mem pitch: %ld\n", prop.memPitch); printf("Texture Alignment: %ld\n", prop.textureAlignment); printf(" --- MP Information for device %d ---\n", i); printf("Multiprocessor count: %d\n", prop.multiProcessorCount); printf("Shared mem per mp: %ld\n", prop.sharedMemPerBlock); printf("Registers per mp: %d\n", prop.regsPerBlock); printf("Threads in warp: %d\n", prop.warpSize); printf("Max threads per block: %d\n", prop.maxThreadsPerBlock); printf("Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf("Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf("\n"); } int dev; HANDLE_ERROR(cudaGetDevice(&dev)); printf("ID of current CUDA device: %d\n", dev); memset(&prop, 0, sizeof(cudaDeviceProp)); prop.major = 1; prop.minor = 3; HANDLE_ERROR(cudaChooseDevice(&dev, &prop)); printf("ID of CUDA device closest to revision %d.%d: %d\n", prop.major, prop.minor, dev); HANDLE_ERROR(cudaSetDevice(dev)); return 0; } int test3() { int a[NUM] = {0}, b[NUM] = {0}, c[NUM] = {0}; int *dev_a = NULL, *dev_b = NULL, *dev_c = NULL; //allocate the memory on the GPU HANDLE_ERROR(cudaMalloc((void**)&dev_a, NUM * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&dev_b, NUM * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&dev_c, NUM * sizeof(int))); //fill the arrays 'a' and 'b' on the CPU for (int i=0; i<NUM; i++) { a[i] = -i; b[i] = i * i; } cout<<"NUM:"<<NUM<<endl; cout<<"b[NUM-1]:"<<b[NUM-1]<<endl; cout<<"sizeof(int):"<<sizeof(int)<<endl; //copy the arrays 'a' and 'b' to the GPU HANDLE_ERROR(cudaMemcpy(dev_a, a, NUM * sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(dev_b, b, NUM * sizeof(int), cudaMemcpyHostToDevice)); //尖括号中的第一个参数表示设备在执行核函数时使用的并行线程块的数量 add_blockIdx<<<NUM,1>>>( dev_a, dev_b, dev_c ); //copy the array 'c' back from the GPU to the CPU HANDLE_ERROR(cudaMemcpy(c, dev_c, NUM * sizeof(int), cudaMemcpyDeviceToHost)); //display the results //for (int i=0; i<NUM; i++) { // printf( "%d + %d = %d\n", a[i], b[i], c[i] ); //} printf( "%d + %d = %d\n", a[NUM-1], b[NUM-1], c[NUM-1] ); //free the memory allocated on the GPU HANDLE_ERROR(cudaFree(dev_a)); HANDLE_ERROR(cudaFree(dev_b)); HANDLE_ERROR(cudaFree(dev_c)); return 0; } int test5() { int a[NUM], b[NUM], c[NUM]; int *dev_a = NULL, *dev_b = NULL, *dev_c = NULL; HANDLE_ERROR(cudaMalloc((void**)&dev_a, NUM * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&dev_b, NUM * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&dev_c, NUM * sizeof(int))); for (int i = 0; i < NUM; i++) { a[i] = i; b[i] = i * i; } HANDLE_ERROR(cudaMemcpy(dev_a, a, NUM * sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(dev_b, b, NUM * sizeof(int), cudaMemcpyHostToDevice)); add_threadIdx<<<1, NUM>>>(dev_a, dev_b, dev_c); HANDLE_ERROR(cudaMemcpy(c, dev_c, NUM * sizeof(int), cudaMemcpyDeviceToHost)); printf("%d + %d = %d\n", a[NUM-1], b[NUM-1], c[NUM-1]); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; } int test6() { int a[NUM], b[NUM], c[NUM]; int *dev_a = NULL, *dev_b = NULL, *dev_c = NULL; HANDLE_ERROR(cudaMalloc((void**)&dev_a, NUM * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&dev_b, NUM * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&dev_c, NUM * sizeof(int))); for (int i = 0; i < NUM; i++) { a[i] = i; b[i] = i * i / 10; } HANDLE_ERROR(cudaMemcpy(dev_a, a, NUM * sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(dev_b, b, NUM * sizeof(int), cudaMemcpyHostToDevice)); add_blockIdx_threadIdx<<<128, 128>>>(dev_a, dev_b, dev_c); HANDLE_ERROR(cudaMemcpy(c, dev_c, NUM * sizeof(int), cudaMemcpyDeviceToHost)); bool success = true; for (int i = 0; i < NUM; i++) { if ((a[i] + b[i]) != c[i]) { printf("error: %d + %d != %d\n", a[i], b[i], c[i]); success = false; } } if (success) printf("we did it!\n"); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; } int test8() { float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; a = (float*)malloc(NUM * sizeof(float)); b = (float*)malloc(NUM * sizeof(float)); partial_c = (float*)malloc(blocksPerGrid * sizeof(float)); HANDLE_ERROR(cudaMalloc((void**)&dev_a, NUM * sizeof(float))); HANDLE_ERROR(cudaMalloc((void**)&dev_b, NUM * sizeof(float))); HANDLE_ERROR(cudaMalloc((void**)&dev_partial_c, blocksPerGrid*sizeof(float))); for (int i = 0; i < NUM; i++) { a[i] = i; b[i] = i*2; } HANDLE_ERROR(cudaMemcpy(dev_a, a, NUM * sizeof(float), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(dev_b, b, NUM * sizeof(float), cudaMemcpyHostToDevice)); dot_kernel<<<blocksPerGrid, threadsPerBlock>>>(dev_a, dev_b, dev_partial_c); HANDLE_ERROR(cudaMemcpy(partial_c, dev_partial_c, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost)); //finish up on the CPU side c = 0; for (int i = 0; i < blocksPerGrid; i++) { c += partial_c[i]; } //点积计算结果应该是从0到NUM-1中每个数值的平方再乘以2 //闭合形式解 #define sum_squares(x) (x * (x + 1) * (2 * x + 1) / 6) printf("Does GPU value %.6g = %.6g?\n", c, 2 * sum_squares((float)(NUM - 1))); HANDLE_ERROR(cudaFree(dev_a)); HANDLE_ERROR(cudaFree(dev_b)); HANDLE_ERROR(cudaFree(dev_partial_c)); free(a); free(b); free(partial_c); return 0; } // without definition of big_random_block() //int test16() //{ // unsigned char *buffer = (unsigned char*)big_random_block(SIZE); // //capture the start time starting the timer here so that we include the cost of // //all of the operations on the GPU. if the data were already on the GPU and we just // //timed the kernel the timing would drop from 74 ms to 15 ms. Very fast. // cudaEvent_t start, stop; // HANDLE_ERROR( cudaEventCreate( &start ) ); // HANDLE_ERROR( cudaEventCreate( &stop ) ); // HANDLE_ERROR( cudaEventRecord( start, 0 ) ); // // // allocate memory on the GPU for the file's data // unsigned char *dev_buffer; // unsigned int *dev_histo; // HANDLE_ERROR(cudaMalloc((void**)&dev_buffer, SIZE)); // HANDLE_ERROR(cudaMemcpy(dev_buffer, buffer, SIZE, cudaMemcpyHostToDevice)); // // HANDLE_ERROR(cudaMalloc((void**)&dev_histo, 256 * sizeof(int))); // HANDLE_ERROR(cudaMemset(dev_histo, 0, 256 * sizeof(int))); // // //kernel launch - 2x the number of mps gave best timing // cudaDeviceProp prop; // HANDLE_ERROR(cudaGetDeviceProperties(&prop, 0)); // int blocks = prop.multiProcessorCount; // histo_kernel<<<blocks*2, 256>>>(dev_buffer, SIZE, dev_histo); // // unsigned int histo[256]; // HANDLE_ERROR(cudaMemcpy(histo, dev_histo, 256 * sizeof(int), cudaMemcpyDeviceToHost)); // // //get stop time, and display the timing results // HANDLE_ERROR(cudaEventRecord(stop, 0)); // HANDLE_ERROR(cudaEventSynchronize(stop)); // float elapsedTime; // HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop)); // printf("Time to generate: %3.1f ms\n", elapsedTime); // // long histoCount = 0; // for (int i=0; i<256; i++) { // histoCount += histo[i]; // } // printf("Histogram Sum: %ld\n", histoCount); // // //verify that we have the same counts via CPU // for (int i = 0; i < SIZE; i++) // histo[buffer[i]]--; // for (int i = 0; i < 256; i++) { // if (histo[i] != 0) // printf("Failure at %d!\n", i); // } // // HANDLE_ERROR(cudaEventDestroy(start)); // HANDLE_ERROR(cudaEventDestroy(stop)); // cudaFree(dev_histo); // cudaFree(dev_buffer); // free(buffer); // return 0; //} int main(int argc, char* argv[]) { //test1(); //test2(); //test3(); //test5(); //test6(); test8(); //test16(); cout<<"ok!"<<endl; }
8a03311e10206f49e8057bf910a80e07153270f2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <opencv2\core.hpp> #include <opencv2\highgui.hpp> #include <stdio.h> #include<ctime> using namespace cv; using namespace std; void Thresholding(const Mat& input, Mat& output, unsigned char th) { // TODO: Add your CPU side serial code to perform thresholding here } //CUDA function protorype. It takes output image and input image and a threshold value hipError_t performWithCuda(Mat&, const Mat&, unsigned char th); // CUDA GPU Kernel __global__ void gpuThreshold(unsigned char *b, const unsigned char * a, unsigned char th, unsigned int r, unsigned int c) { // TODO: // 1- calculate the index of the pointers based on pixel location for each thread // 2- perform the thresholding } int main() { unsigned char threshold = 128; // This is a threshold value, you can change this value hipError_t cudaStatus; // This is the hipError_t code that your functions may return to troubleshoot // TODO: // 1- Read the input gray-scale image with imread // 1-1- if image has no data show an error message // 1-2- if iamge has data // 1-2-1- create an image for the CPU output, and one for the GPU output // 1-2-2- call your CPU side code to threshold the image (pass the input image and the cpu output image and the threshold) // 1-2-3- call the performWithCuda function to create gpu pointers, copy data from host to device, invoke kernel // and copy results back to host (refer to the above function prototype on line 23 for reference.) // 1-2-4- Use imshow to show the input image, the CPU output and the GPU output. Note: CPU and GPU outputs should look alike. // 1-3- use cvWaitKey(0); to pause. // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. // DO NOT CHANGE THE FOLLOWING! cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } cvWaitKey(0); return 0; } // Helper function for using CUDA to perform image thresholding in parallel. Takes as input the thresholded image (bwImage), the input image (input), and the threshold value. hipError_t performWithCuda(Mat &bwImage, const Mat &input, unsigned char threshold) { unsigned char *dev_ptrout, *dev_ptrin; // these are the gpu side ouput and input pointers hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for the buffers (one input, one output) . // TODO: add your code here to allocate the input pointer on the device. Note the size of the pointer in hipMalloc // TODO: add your code here to allocate the outpu pointer on the device. Note the size of the pointer in hipMalloc // Copy input data from host memory to GPU buffers. // TODO: Add your code here. Use hipMemcpy // TODO: Launch a kernel on the GPU with one thread for each element. use <<< grid_size (or number of blocks), block_size(or number of threads) >>> // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // TODO: Copy output data from GPU buffer to host memory. use hipMemcpy Error: hipFree(dev_ptrin); hipFree(dev_ptrout); return cudaStatus; }
8a03311e10206f49e8057bf910a80e07153270f2.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <opencv2\core.hpp> #include <opencv2\highgui.hpp> #include <stdio.h> #include<ctime> using namespace cv; using namespace std; void Thresholding(const Mat& input, Mat& output, unsigned char th) { // TODO: Add your CPU side serial code to perform thresholding here } //CUDA function protorype. It takes output image and input image and a threshold value cudaError_t performWithCuda(Mat&, const Mat&, unsigned char th); // CUDA GPU Kernel __global__ void gpuThreshold(unsigned char *b, const unsigned char * a, unsigned char th, unsigned int r, unsigned int c) { // TODO: // 1- calculate the index of the pointers based on pixel location for each thread // 2- perform the thresholding } int main() { unsigned char threshold = 128; // This is a threshold value, you can change this value cudaError_t cudaStatus; // This is the cudaError code that your functions may return to troubleshoot // TODO: // 1- Read the input gray-scale image with imread // 1-1- if image has no data show an error message // 1-2- if iamge has data // 1-2-1- create an image for the CPU output, and one for the GPU output // 1-2-2- call your CPU side code to threshold the image (pass the input image and the cpu output image and the threshold) // 1-2-3- call the performWithCuda function to create gpu pointers, copy data from host to device, invoke kernel // and copy results back to host (refer to the above function prototype on line 23 for reference.) // 1-2-4- Use imshow to show the input image, the CPU output and the GPU output. Note: CPU and GPU outputs should look alike. // 1-3- use cvWaitKey(0); to pause. // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. // DO NOT CHANGE THE FOLLOWING! cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } cvWaitKey(0); return 0; } // Helper function for using CUDA to perform image thresholding in parallel. Takes as input the thresholded image (bwImage), the input image (input), and the threshold value. cudaError_t performWithCuda(Mat &bwImage, const Mat &input, unsigned char threshold) { unsigned char *dev_ptrout, *dev_ptrin; // these are the gpu side ouput and input pointers cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for the buffers (one input, one output) . // TODO: add your code here to allocate the input pointer on the device. Note the size of the pointer in cudaMalloc // TODO: add your code here to allocate the outpu pointer on the device. Note the size of the pointer in cudaMalloc // Copy input data from host memory to GPU buffers. // TODO: Add your code here. Use cudaMemcpy // TODO: Launch a kernel on the GPU with one thread for each element. use <<< grid_size (or number of blocks), block_size(or number of threads) >>> // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // TODO: Copy output data from GPU buffer to host memory. use cudaMemcpy Error: cudaFree(dev_ptrin); cudaFree(dev_ptrout); return cudaStatus; }
fa73b8553adb35da44a67981dfe779b05bd4499f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include "fitsio.h" #include "ddyn.cuh" void Grid::write_image(int wj,const char * Version){ fitsfile *fptr; /* pointer to the FITS file; defined in fitsio.h */ configdata *cfg = &cconfig; char *fname; char *ver; int status,i,x,y,jlen; long naxis = 2; long naxes[2]; long fpixel = 1; double *F[cfg->ysize]; status = 0; /* initialize status before calling fitsio routines */ gpuErrchk(hipMemcpy(Fh,Fd,sizeof(double)*cfg->xsize*cfg->ysize,hipMemcpyDeviceToHost)); int whole = floor(time); jlen = ( whole == 0 ? 1 : (int)(log10(whole)+1)); whole = floor(cfg->w[wj]*1e6); jlen += ( whole == 0 ? 1 : (int)(log10(whole)+1)); fname = (char *)malloc(strlen(cfg->name_stub)+jlen+16); if ( !fname ) exit_with_usage(49); sprintf(fname,"%s_%.2f_%.3f.fits",cfg->name_stub,cfg->w[wj]*1e6,time); ver=(char *)malloc(strlen(Version)+1); if ( !ver ) exit_with_usage(49); strcpy(ver,Version); naxes[0]=cfg->xsize; naxes[1]=cfg->ysize; remove(fname); /* Overwrite if exists */ fits_create_file(&fptr,fname, &status); /* create new file */ fits_report_error(stderr, status); /* print out any error messages */ fits_create_img(fptr, DOUBLE_IMG, naxis, naxes, &status); fits_report_error(stderr, status); /* print out any error messages */ F[0] = (double *)malloc(sizeof(double)*cfg->xsize*cfg->ysize); if ( !F[0] ) exit_with_usage(49); for ( y=1 ; y < cfg->ysize ; y++ ) F[y] = F[y-1] + cfg->xsize; double Ptot=0; for ( y=0 ; y<cfg->ysize ; y++ ){ for ( x=0 ; x<cfg->xsize ; x++){ i = cfg->ysize * x + y; F[y][x] = Fh[i]; Ptot += Fh[i]; } } int size; if ( cfg->xsize >= cfg->ysize ) size = cfg->xsize; else size = cfg->ysize; float pxscale = (float)(cfg->fov/cfg->dist*pc/size); float lambda = (float)(cfg->w[wj]*1e6); fits_update_key(fptr, TFLOAT, "WAVELENG", &lambda,"[micron] Wavelength of dataset", &status); fits_update_key(fptr, TDOUBLE, "FLUX", &Ptot,"[Jy] Total flux detected", &status); fits_update_key(fptr, TFLOAT, "PIXELSCA", &pxscale,"[''/px] pixel scale of instrument setup", &status); fits_update_key(fptr, TSTRING, "VERSION", ver,"DiskDyn version", &status); fits_write_history(fptr,"Created with DiskDyn",&status); fits_write_date(fptr,&status); fits_write_img(fptr, TDOUBLE, fpixel, cfg->xsize*cfg->ysize, F[0], &status); fits_report_error(stderr, status); /* print out any error messages */ fits_close_file(fptr, &status); /* close the file */ fits_report_error(stderr, status); /* print out any error messages */ if ( cfg->verb>=2 ) printf("\tFile %s successfully written\n",fname); free(fname); free(ver); free(F[0]); return; }
fa73b8553adb35da44a67981dfe779b05bd4499f.cu
#include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include "fitsio.h" #include "ddyn.cuh" void Grid::write_image(int wj,const char * Version){ fitsfile *fptr; /* pointer to the FITS file; defined in fitsio.h */ configdata *cfg = &cconfig; char *fname; char *ver; int status,i,x,y,jlen; long naxis = 2; long naxes[2]; long fpixel = 1; double *F[cfg->ysize]; status = 0; /* initialize status before calling fitsio routines */ gpuErrchk(cudaMemcpy(Fh,Fd,sizeof(double)*cfg->xsize*cfg->ysize,cudaMemcpyDeviceToHost)); int whole = floor(time); jlen = ( whole == 0 ? 1 : (int)(log10(whole)+1)); whole = floor(cfg->w[wj]*1e6); jlen += ( whole == 0 ? 1 : (int)(log10(whole)+1)); fname = (char *)malloc(strlen(cfg->name_stub)+jlen+16); if ( !fname ) exit_with_usage(49); sprintf(fname,"%s_%.2f_%.3f.fits",cfg->name_stub,cfg->w[wj]*1e6,time); ver=(char *)malloc(strlen(Version)+1); if ( !ver ) exit_with_usage(49); strcpy(ver,Version); naxes[0]=cfg->xsize; naxes[1]=cfg->ysize; remove(fname); /* Overwrite if exists */ fits_create_file(&fptr,fname, &status); /* create new file */ fits_report_error(stderr, status); /* print out any error messages */ fits_create_img(fptr, DOUBLE_IMG, naxis, naxes, &status); fits_report_error(stderr, status); /* print out any error messages */ F[0] = (double *)malloc(sizeof(double)*cfg->xsize*cfg->ysize); if ( !F[0] ) exit_with_usage(49); for ( y=1 ; y < cfg->ysize ; y++ ) F[y] = F[y-1] + cfg->xsize; double Ptot=0; for ( y=0 ; y<cfg->ysize ; y++ ){ for ( x=0 ; x<cfg->xsize ; x++){ i = cfg->ysize * x + y; F[y][x] = Fh[i]; Ptot += Fh[i]; } } int size; if ( cfg->xsize >= cfg->ysize ) size = cfg->xsize; else size = cfg->ysize; float pxscale = (float)(cfg->fov/cfg->dist*pc/size); float lambda = (float)(cfg->w[wj]*1e6); fits_update_key(fptr, TFLOAT, "WAVELENG", &lambda,"[micron] Wavelength of dataset", &status); fits_update_key(fptr, TDOUBLE, "FLUX", &Ptot,"[Jy] Total flux detected", &status); fits_update_key(fptr, TFLOAT, "PIXELSCA", &pxscale,"[''/px] pixel scale of instrument setup", &status); fits_update_key(fptr, TSTRING, "VERSION", ver,"DiskDyn version", &status); fits_write_history(fptr,"Created with DiskDyn",&status); fits_write_date(fptr,&status); fits_write_img(fptr, TDOUBLE, fpixel, cfg->xsize*cfg->ysize, F[0], &status); fits_report_error(stderr, status); /* print out any error messages */ fits_close_file(fptr, &status); /* close the file */ fits_report_error(stderr, status); /* print out any error messages */ if ( cfg->verb>=2 ) printf("\tFile %s successfully written\n",fname); free(fname); free(ver); free(F[0]); return; }
566750a884a5b877638809f6c2f1f501ed960afa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @precisions normal z -> s d c */ #include "magma_internal.h" #define NB 64 // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void zlascl_diag_lower( int m, int n, const magmaDoubleComplex* D, int ldd, magmaDoubleComplex* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; A += ind; if (ind < m) { for (int j=0; j < n; j++ ) { A[j*lda] = MAGMA_Z_DIV( A[j*lda], D[j + j*ldd] ); } } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void zlascl_diag_upper( int m, int n, const magmaDoubleComplex* D, int ldd, magmaDoubleComplex* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; A += ind; if (ind < m) { for (int j=0; j < n; j++ ) { A[j*lda] = MAGMA_Z_DIV( A[j*lda], D[ind + ind*ldd] ); } } } /** Purpose ------- ZLASCL_DIAG scales the M by N complex matrix A by the real diagonal matrix dD. TYPE specifies that A may be upper triangular or lower triangular. Arguments --------- @param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in] dD DOUBLE PRECISION vector, dimension (LDDD,M) The matrix storing the scaling factor on its diagonal. @param[in] lddd INTEGER The leading dimension of the array D. @param[in,out] dA COMPLEX*16 array, dimension (LDDA,N) The matrix to be scaled by dD. See TYPE for the storage type. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_zaux2 ********************************************************************/ extern "C" void magmablas_zlascl_diag_q( magma_type_t type, magma_int_t m, magma_int_t n, magmaDoubleComplex_const_ptr dD, magma_int_t lddd, magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper ) *info = -1; else if ( m < 0 ) *info = -2; else if ( n < 0 ) *info = -3; else if ( lddd < max(1,m) ) *info = -5; else if ( ldda < max(1,m) ) *info = -7; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); if (type == MagmaLower) { hipLaunchKernelGGL(( zlascl_diag_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, lddd, dA, ldda); } else if (type == MagmaUpper) { hipLaunchKernelGGL(( zlascl_diag_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, lddd, dA, ldda); } }
566750a884a5b877638809f6c2f1f501ed960afa.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @precisions normal z -> s d c */ #include "magma_internal.h" #define NB 64 // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void zlascl_diag_lower( int m, int n, const magmaDoubleComplex* D, int ldd, magmaDoubleComplex* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; A += ind; if (ind < m) { for (int j=0; j < n; j++ ) { A[j*lda] = MAGMA_Z_DIV( A[j*lda], D[j + j*ldd] ); } } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void zlascl_diag_upper( int m, int n, const magmaDoubleComplex* D, int ldd, magmaDoubleComplex* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; A += ind; if (ind < m) { for (int j=0; j < n; j++ ) { A[j*lda] = MAGMA_Z_DIV( A[j*lda], D[ind + ind*ldd] ); } } } /** Purpose ------- ZLASCL_DIAG scales the M by N complex matrix A by the real diagonal matrix dD. TYPE specifies that A may be upper triangular or lower triangular. Arguments --------- @param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in] dD DOUBLE PRECISION vector, dimension (LDDD,M) The matrix storing the scaling factor on its diagonal. @param[in] lddd INTEGER The leading dimension of the array D. @param[in,out] dA COMPLEX*16 array, dimension (LDDA,N) The matrix to be scaled by dD. See TYPE for the storage type. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_zaux2 ********************************************************************/ extern "C" void magmablas_zlascl_diag_q( magma_type_t type, magma_int_t m, magma_int_t n, magmaDoubleComplex_const_ptr dD, magma_int_t lddd, magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper ) *info = -1; else if ( m < 0 ) *info = -2; else if ( n < 0 ) *info = -3; else if ( lddd < max(1,m) ) *info = -5; else if ( ldda < max(1,m) ) *info = -7; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); if (type == MagmaLower) { zlascl_diag_lower <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, lddd, dA, ldda); } else if (type == MagmaUpper) { zlascl_diag_upper <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, lddd, dA, ldda); } }
169e5007782e1bb5447c739a3ab8ddc2d04b4c20.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Simple 3D volume renderer #ifndef _VOLUMERENDER_KERNEL_CU_ #define _VOLUMERENDER_KERNEL_CU_ #include <cutil_inline.h> #include <cutil_math.h> typedef unsigned int uint; typedef unsigned char uchar; hipArray *d_volumeArray = 0; hipArray *d_transferFuncArray; typedef unsigned char VolumeType; //typedef unsigned short VolumeType; texture<VolumeType, 3, hipReadModeNormalizedFloat> tex; // 3D texture texture<float4, 1, hipReadModeElementType> transferTex; // 1D transfer function texture typedef struct { float4 m[3]; } float3x4; __constant__ float3x4 c_invViewMatrix; // inverse view matrix struct Ray { float3 o; // origin float3 d; // direction }; // intersect ray with a box // http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm __device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar) { // compute intersection of ray with all six bbox planes float3 invR = make_float3(1.0f) / r.d; float3 tbot = invR * (boxmin - r.o); float3 ttop = invR * (boxmax - r.o); // re-order intersections to find smallest and largest on each axis float3 tmin = fminf(ttop, tbot); float3 tmax = fmaxf(ttop, tbot); // find the largest tmin and the smallest tmax float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); *tnear = largest_tmin; *tfar = smallest_tmax; return smallest_tmax > largest_tmin; } // transform vector by matrix (no translation) __device__ float3 mul(const float3x4 &M, const float3 &v) { float3 r; r.x = dot(v, make_float3(M.m[0])); r.y = dot(v, make_float3(M.m[1])); r.z = dot(v, make_float3(M.m[2])); return r; } // transform vector by matrix with translation __device__ float4 mul(const float3x4 &M, const float4 &v) { float4 r; r.x = dot(v, M.m[0]); r.y = dot(v, M.m[1]); r.z = dot(v, M.m[2]); r.w = 1.0f; return r; } __device__ uint rgbaFloatToInt(float4 rgba) { rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0] rgba.y = __saturatef(rgba.y); rgba.z = __saturatef(rgba.z); rgba.w = __saturatef(rgba.w); return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255); } __global__ void d_render(uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale) { const int maxSteps = 500; const float tstep = 0.01f; const float opacityThreshold = 0.95f; const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f); const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f); uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= imageW) || (y >= imageH)) return; float u = (x / (float) imageW)*2.0f-1.0f; float v = (y / (float) imageH)*2.0f-1.0f; // calculate eye ray in world space Ray eyeRay; eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f))); eyeRay.d = normalize(make_float3(u, v, -2.0f)); eyeRay.d = mul(c_invViewMatrix, eyeRay.d); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating color float4 sum = make_float4(0.0f); float t = tnear; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for(int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] coordinates float sample = tex3D(tex, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f); //sample *= 64.0f; // scale for 10-bit data // lookup in transfer function texture float4 col = tex1D(transferTex, (sample-transferOffset)*transferScale); col.w *= density; // "under" operator for back-to-front blending //sum = lerp(sum, col, col.w); // pre-multiply alpha col.x *= col.w; col.y *= col.w; col.z *= col.w; // "over" operator for front-to-back blending sum = sum + col*(1.0f - sum.w); // exit early if opaque if (sum.w > opacityThreshold) break; t += tstep; if (t > tfar) break; pos += step; } sum *= brightness; // write output color d_output[y*imageW + x] = rgbaFloatToInt(sum); } extern "C" void setTextureFilterMode(bool bLinearFilter) { tex.filterMode = bLinearFilter ? hipFilterModeLinear : hipFilterModePoint; } extern "C" void initCuda(void *h_volume, hipExtent volumeSize) { // create 3D array hipChannelFormatDesc channelDesc = hipCreateChannelDesc<VolumeType>(); cutilSafeCall( hipMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize) ); // copy data to 3D array hipMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_hipPitchedPtr(h_volume, volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height); copyParams.dstArray = d_volumeArray; copyParams.extent = volumeSize; copyParams.kind = hipMemcpyHostToDevice; cutilSafeCall( hipMemcpy3D(&copyParams) ); // set texture parameters tex.normalized = true; // access with normalized texture coordinates tex.filterMode = hipFilterModeLinear; // linear interpolation tex.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates tex.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture cutilSafeCall(hipBindTextureToArray(tex, d_volumeArray, channelDesc)); // create transfer function texture float4 transferFunc[] = { { 0.0, 0.0, 0.0, 0.0, }, { 1.0, 0.0, 0.0, 1.0, }, { 1.0, 0.5, 0.0, 1.0, }, { 1.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 1.0, 1.0, }, { 0.0, 0.0, 1.0, 1.0, }, { 1.0, 0.0, 1.0, 1.0, }, { 0.0, 0.0, 0.0, 0.0, }, }; hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float4>(); hipArray* d_transferFuncArray; cutilSafeCall(hipMallocArray( &d_transferFuncArray, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); cutilSafeCall(hipMemcpyToArray( d_transferFuncArray, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice)); transferTex.filterMode = hipFilterModeLinear; transferTex.normalized = true; // access with normalized texture coordinates transferTex.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates // Bind the array to the texture cutilSafeCall( hipBindTextureToArray( transferTex, d_transferFuncArray, channelDesc2)); } extern "C" void freeCudaBuffers() { cutilSafeCall(hipFreeArray(d_volumeArray)); cutilSafeCall(hipFreeArray(d_transferFuncArray)); } extern "C" void render_kernel(dim3 gridSize, dim3 blockSize, uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale) { hipLaunchKernelGGL(( d_render), dim3(gridSize), dim3(blockSize), 0, 0, d_output, imageW, imageH, density, brightness, transferOffset, transferScale); } extern "C" void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix) { cutilSafeCall( hipMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix) ); } #endif // #ifndef _VOLUMERENDER_KERNEL_CU_
169e5007782e1bb5447c739a3ab8ddc2d04b4c20.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Simple 3D volume renderer #ifndef _VOLUMERENDER_KERNEL_CU_ #define _VOLUMERENDER_KERNEL_CU_ #include <cutil_inline.h> #include <cutil_math.h> typedef unsigned int uint; typedef unsigned char uchar; cudaArray *d_volumeArray = 0; cudaArray *d_transferFuncArray; typedef unsigned char VolumeType; //typedef unsigned short VolumeType; texture<VolumeType, 3, cudaReadModeNormalizedFloat> tex; // 3D texture texture<float4, 1, cudaReadModeElementType> transferTex; // 1D transfer function texture typedef struct { float4 m[3]; } float3x4; __constant__ float3x4 c_invViewMatrix; // inverse view matrix struct Ray { float3 o; // origin float3 d; // direction }; // intersect ray with a box // http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm __device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar) { // compute intersection of ray with all six bbox planes float3 invR = make_float3(1.0f) / r.d; float3 tbot = invR * (boxmin - r.o); float3 ttop = invR * (boxmax - r.o); // re-order intersections to find smallest and largest on each axis float3 tmin = fminf(ttop, tbot); float3 tmax = fmaxf(ttop, tbot); // find the largest tmin and the smallest tmax float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); *tnear = largest_tmin; *tfar = smallest_tmax; return smallest_tmax > largest_tmin; } // transform vector by matrix (no translation) __device__ float3 mul(const float3x4 &M, const float3 &v) { float3 r; r.x = dot(v, make_float3(M.m[0])); r.y = dot(v, make_float3(M.m[1])); r.z = dot(v, make_float3(M.m[2])); return r; } // transform vector by matrix with translation __device__ float4 mul(const float3x4 &M, const float4 &v) { float4 r; r.x = dot(v, M.m[0]); r.y = dot(v, M.m[1]); r.z = dot(v, M.m[2]); r.w = 1.0f; return r; } __device__ uint rgbaFloatToInt(float4 rgba) { rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0] rgba.y = __saturatef(rgba.y); rgba.z = __saturatef(rgba.z); rgba.w = __saturatef(rgba.w); return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255); } __global__ void d_render(uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale) { const int maxSteps = 500; const float tstep = 0.01f; const float opacityThreshold = 0.95f; const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f); const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f); uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= imageW) || (y >= imageH)) return; float u = (x / (float) imageW)*2.0f-1.0f; float v = (y / (float) imageH)*2.0f-1.0f; // calculate eye ray in world space Ray eyeRay; eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f))); eyeRay.d = normalize(make_float3(u, v, -2.0f)); eyeRay.d = mul(c_invViewMatrix, eyeRay.d); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating color float4 sum = make_float4(0.0f); float t = tnear; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for(int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] coordinates float sample = tex3D(tex, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f); //sample *= 64.0f; // scale for 10-bit data // lookup in transfer function texture float4 col = tex1D(transferTex, (sample-transferOffset)*transferScale); col.w *= density; // "under" operator for back-to-front blending //sum = lerp(sum, col, col.w); // pre-multiply alpha col.x *= col.w; col.y *= col.w; col.z *= col.w; // "over" operator for front-to-back blending sum = sum + col*(1.0f - sum.w); // exit early if opaque if (sum.w > opacityThreshold) break; t += tstep; if (t > tfar) break; pos += step; } sum *= brightness; // write output color d_output[y*imageW + x] = rgbaFloatToInt(sum); } extern "C" void setTextureFilterMode(bool bLinearFilter) { tex.filterMode = bLinearFilter ? cudaFilterModeLinear : cudaFilterModePoint; } extern "C" void initCuda(void *h_volume, cudaExtent volumeSize) { // create 3D array cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<VolumeType>(); cutilSafeCall( cudaMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize) ); // copy data to 3D array cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr(h_volume, volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height); copyParams.dstArray = d_volumeArray; copyParams.extent = volumeSize; copyParams.kind = cudaMemcpyHostToDevice; cutilSafeCall( cudaMemcpy3D(&copyParams) ); // set texture parameters tex.normalized = true; // access with normalized texture coordinates tex.filterMode = cudaFilterModeLinear; // linear interpolation tex.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates tex.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture cutilSafeCall(cudaBindTextureToArray(tex, d_volumeArray, channelDesc)); // create transfer function texture float4 transferFunc[] = { { 0.0, 0.0, 0.0, 0.0, }, { 1.0, 0.0, 0.0, 1.0, }, { 1.0, 0.5, 0.0, 1.0, }, { 1.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 1.0, 1.0, }, { 0.0, 0.0, 1.0, 1.0, }, { 1.0, 0.0, 1.0, 1.0, }, { 0.0, 0.0, 0.0, 0.0, }, }; cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float4>(); cudaArray* d_transferFuncArray; cutilSafeCall(cudaMallocArray( &d_transferFuncArray, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); cutilSafeCall(cudaMemcpyToArray( d_transferFuncArray, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice)); transferTex.filterMode = cudaFilterModeLinear; transferTex.normalized = true; // access with normalized texture coordinates transferTex.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates // Bind the array to the texture cutilSafeCall( cudaBindTextureToArray( transferTex, d_transferFuncArray, channelDesc2)); } extern "C" void freeCudaBuffers() { cutilSafeCall(cudaFreeArray(d_volumeArray)); cutilSafeCall(cudaFreeArray(d_transferFuncArray)); } extern "C" void render_kernel(dim3 gridSize, dim3 blockSize, uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale) { d_render<<<gridSize, blockSize>>>( d_output, imageW, imageH, density, brightness, transferOffset, transferScale); } extern "C" void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix) { cutilSafeCall( cudaMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix) ); } #endif // #ifndef _VOLUMERENDER_KERNEL_CU_
f76f3e039bc20c2e5f9e0156f3ff12a91a28bcd3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "addScalar.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int a = 2; int b = 2; int *ptrC = NULL; hipMalloc(&ptrC, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( addScalar), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,ptrC); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( addScalar), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,ptrC); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( addScalar), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,ptrC); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f76f3e039bc20c2e5f9e0156f3ff12a91a28bcd3.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "addScalar.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int a = 2; int b = 2; int *ptrC = NULL; cudaMalloc(&ptrC, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); addScalar<<<gridBlock,threadBlock>>>(a,b,ptrC); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { addScalar<<<gridBlock,threadBlock>>>(a,b,ptrC); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { addScalar<<<gridBlock,threadBlock>>>(a,b,ptrC); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d43d3811990e804e0b50186a1446048e7c856797.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <memory> #include <thrust/count.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <vector> #include "caffe/layers/mtcnn_bbox_layer.hpp" namespace caffe { template <typename Dtype> __global__ void filter_by_threshold(const Dtype *prob, const int prob_cnt, const Dtype threshold, int *out, int *out_size) { // __shared__ int local_idx[CAFFE_CUDA_NUM_THREADS]; auto x = blockIdx.x * blockDim.x + threadIdx.x; if (x < prob_cnt) { if (prob[x] > threshold) { int old_size = atomicAdd(out_size, 1); out[old_size] = x; // local_idx[threadIdx.x]=x; } else { // local_idx[threadIdx.x]=-1; } /* __syncthreads() ; if(threadIdx.x==0) { } */ } } template <typename Dtype> __global__ void generateBBox(const Dtype scale, const int height, const int width, const int index_cnt, const int *index_data, const Dtype *bbox_reg, const Dtype *prob, const int stride, const int cellsize, Dtype *out) { CUDA_KERNEL_LOOP(i, index_cnt) { int idx = index_data[i]; int h = idx / width; int w = idx % width; auto out_ptr = out + i * 9; out_ptr[0] = static_cast<int>(1e-4 + ((stride * h + 1) / scale - 1)); out_ptr[1] = static_cast<int>(1e-4 + ((stride * w + 1) / scale - 1)); out_ptr[2] = static_cast<int>(1e-4 + ((stride * h + cellsize) / scale - 1)); out_ptr[3] = static_cast<int>(1e-4 + ((stride * w + cellsize) / scale - 1)); out_ptr[4] = prob[idx]; out_ptr[5] = (bbox_reg[0 * width * height + idx]); out_ptr[6] = (bbox_reg[1 * width * height + idx]); out_ptr[7] = (bbox_reg[2 * width * height + idx]); out_ptr[8] = (bbox_reg[3 * width * height + idx]); } } template <typename Dtype> void MTCNNBBoxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { Forward_const_gpu(bottom, top); } template <typename Dtype> void MTCNNBBoxLayer<Dtype>::Forward_const_gpu( const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) const { return Forward_const_cpu(bottom, top); const auto bbox_reg = bottom[0]->gpu_data(); const auto &shape = bottom[1]->shape(); const auto prob = bottom[1]->gpu_data() + shape[2] * shape[3]; const auto scale = bottom[2]->cpu_data()[0]; std::unique_ptr<Blob<int>> indices_ptr; indices_ptr.reset(new Blob<int>(shape[2] * shape[3], 1, 1, 1)); std::unique_ptr<Blob<int>> index_cnt_ptr; index_cnt_ptr.reset(new Blob<int>(1, 1, 1, 1)); index_cnt_ptr->mutable_cpu_data()[0] = 0; hipLaunchKernelGGL(( filter_by_threshold<Dtype>) , dim3(CAFFE_GET_BLOCKS(shape[2] * shape[3])), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, prob, shape[2] * shape[3], threshold_, indices_ptr->mutable_gpu_data(), index_cnt_ptr->mutable_gpu_data()); auto cnt = indices_ptr->mutable_cpu_data()[0]; if (cnt == 0) { return; } top[0]->Reshape(1, 1, cnt, 9); hipLaunchKernelGGL(( generateBBox<Dtype>), dim3(CAFFE_GET_BLOCKS(cnt)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, scale, shape[2], shape[3], (int)cnt, indices_ptr->gpu_data(), bbox_reg, prob, stride_, cellsize_, top[0]->mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS_CONST(MTCNNBBoxLayer); } // namespace caffe
d43d3811990e804e0b50186a1446048e7c856797.cu
#include <algorithm> #include <memory> #include <thrust/count.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <vector> #include "caffe/layers/mtcnn_bbox_layer.hpp" namespace caffe { template <typename Dtype> __global__ void filter_by_threshold(const Dtype *prob, const int prob_cnt, const Dtype threshold, int *out, int *out_size) { // __shared__ int local_idx[CAFFE_CUDA_NUM_THREADS]; auto x = blockIdx.x * blockDim.x + threadIdx.x; if (x < prob_cnt) { if (prob[x] > threshold) { int old_size = atomicAdd(out_size, 1); out[old_size] = x; // local_idx[threadIdx.x]=x; } else { // local_idx[threadIdx.x]=-1; } /* __syncthreads() ; if(threadIdx.x==0) { } */ } } template <typename Dtype> __global__ void generateBBox(const Dtype scale, const int height, const int width, const int index_cnt, const int *index_data, const Dtype *bbox_reg, const Dtype *prob, const int stride, const int cellsize, Dtype *out) { CUDA_KERNEL_LOOP(i, index_cnt) { int idx = index_data[i]; int h = idx / width; int w = idx % width; auto out_ptr = out + i * 9; out_ptr[0] = static_cast<int>(1e-4 + ((stride * h + 1) / scale - 1)); out_ptr[1] = static_cast<int>(1e-4 + ((stride * w + 1) / scale - 1)); out_ptr[2] = static_cast<int>(1e-4 + ((stride * h + cellsize) / scale - 1)); out_ptr[3] = static_cast<int>(1e-4 + ((stride * w + cellsize) / scale - 1)); out_ptr[4] = prob[idx]; out_ptr[5] = (bbox_reg[0 * width * height + idx]); out_ptr[6] = (bbox_reg[1 * width * height + idx]); out_ptr[7] = (bbox_reg[2 * width * height + idx]); out_ptr[8] = (bbox_reg[3 * width * height + idx]); } } template <typename Dtype> void MTCNNBBoxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { Forward_const_gpu(bottom, top); } template <typename Dtype> void MTCNNBBoxLayer<Dtype>::Forward_const_gpu( const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) const { return Forward_const_cpu(bottom, top); const auto bbox_reg = bottom[0]->gpu_data(); const auto &shape = bottom[1]->shape(); const auto prob = bottom[1]->gpu_data() + shape[2] * shape[3]; const auto scale = bottom[2]->cpu_data()[0]; std::unique_ptr<Blob<int>> indices_ptr; indices_ptr.reset(new Blob<int>(shape[2] * shape[3], 1, 1, 1)); std::unique_ptr<Blob<int>> index_cnt_ptr; index_cnt_ptr.reset(new Blob<int>(1, 1, 1, 1)); index_cnt_ptr->mutable_cpu_data()[0] = 0; filter_by_threshold<Dtype> <<<CAFFE_GET_BLOCKS(shape[2] * shape[3]), CAFFE_CUDA_NUM_THREADS>>>( prob, shape[2] * shape[3], threshold_, indices_ptr->mutable_gpu_data(), index_cnt_ptr->mutable_gpu_data()); auto cnt = indices_ptr->mutable_cpu_data()[0]; if (cnt == 0) { return; } top[0]->Reshape(1, 1, cnt, 9); generateBBox<Dtype><<<CAFFE_GET_BLOCKS(cnt), CAFFE_CUDA_NUM_THREADS>>>( scale, shape[2], shape[3], (int)cnt, indices_ptr->gpu_data(), bbox_reg, prob, stride_, cellsize_, top[0]->mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS_CONST(MTCNNBBoxLayer); } // namespace caffe
cf1bff21b28444dd295f4ededfa7c2f7dcd38a91.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kernel.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const unsigned char *src = NULL; hipMalloc(&src, XSIZE*YSIZE); unsigned char *dst = NULL; hipMalloc(&dst, XSIZE*YSIZE); int level = 1; const size_t width = XSIZE*YSIZE; const size_t height = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,level,width,height); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,level,width,height); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,level,width,height); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
cf1bff21b28444dd295f4ededfa7c2f7dcd38a91.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const unsigned char *src = NULL; cudaMalloc(&src, XSIZE*YSIZE); unsigned char *dst = NULL; cudaMalloc(&dst, XSIZE*YSIZE); int level = 1; const size_t width = XSIZE*YSIZE; const size_t height = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kernel<<<gridBlock,threadBlock>>>(src,dst,level,width,height); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kernel<<<gridBlock,threadBlock>>>(src,dst,level,width,height); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kernel<<<gridBlock,threadBlock>>>(src,dst,level,width,height); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
39d41575acd745be2f4848c5a5c105bd09a9410f.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "common.h" #include "thrust.h" namespace StreamCompaction { namespace Thrust { float timeThrust(int n, int *odata, const int *idata) { int* dev_odata; int* dev_idata; hipMalloc((void**)&dev_odata, n * sizeof(int)); hipMalloc((void**)&dev_idata, n * sizeof(int)); hipMemcpy(dev_idata, idata, sizeof(int) * n, hipMemcpyHostToDevice); hipMemcpy(dev_odata, dev_idata, sizeof(int) * n, hipMemcpyDeviceToDevice); thrust::host_vector<int> dv_in(idata, idata + n); thrust::host_vector<int> dv_out(odata, odata + n); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); //printf("\nELAPSED TIME = %f\n", milliseconds); hipEventDestroy(start); hipEventDestroy(stop); hipFree(dev_odata); hipFree(dev_idata); return milliseconds; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); //thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); thrust::host_vector<int> dv_in(idata, idata + n); thrust::host_vector<int> dv_out(odata, odata + n); thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); //odata = &(dv_out.front()); for (int i = 0; i < n; i++) odata[i] = dv_out[i]; } } }
39d41575acd745be2f4848c5a5c105bd09a9410f.cu
#include <cuda.h> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "common.h" #include "thrust.h" namespace StreamCompaction { namespace Thrust { float timeThrust(int n, int *odata, const int *idata) { int* dev_odata; int* dev_idata; cudaMalloc((void**)&dev_odata, n * sizeof(int)); cudaMalloc((void**)&dev_idata, n * sizeof(int)); cudaMemcpy(dev_idata, idata, sizeof(int) * n, cudaMemcpyHostToDevice); cudaMemcpy(dev_odata, dev_idata, sizeof(int) * n, cudaMemcpyDeviceToDevice); thrust::host_vector<int> dv_in(idata, idata + n); thrust::host_vector<int> dv_out(odata, odata + n); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); //printf("\nELAPSED TIME = %f\n", milliseconds); cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(dev_odata); cudaFree(dev_idata); return milliseconds; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); //thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); thrust::host_vector<int> dv_in(idata, idata + n); thrust::host_vector<int> dv_out(odata, odata + n); thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); //odata = &(dv_out.front()); for (int i = 0; i < n; i++) odata[i] = dv_out[i]; } } }
36fe0e26e725c96ac9dec06bc0b0bdd7e3bff299.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <sys/time.h> extern "C" { #include "timer.h" } #define NTIMES 16 // CUDA kernel version of stream triad __global__ void StreamTriad(int n, double scalar, double *a, double *b, double *c){ int i = blockIdx.x*blockDim.x+threadIdx.x; // Protect from going out-of-bounds if (i >= n) return; c[i] = a[i] + scalar*b[i]; } int main(int argc, char *argv[]){ struct timespec tkernel, ttotal; // initializing data and arrays int stream_array_size = 80000000; double scalar = 3.0, tkernel_sum = 0.0, ttotal_sum = 0.0; // allocate host memory and initialize double *a = (double *)malloc(stream_array_size*sizeof(double)); double *b = (double *)malloc(stream_array_size*sizeof(double)); double *c = (double *)malloc(stream_array_size*sizeof(double)); for (int i=0; i<stream_array_size; i++) { a[i] = 1.0; b[i] = 2.0; } // allocate device memory. suffix of _d indicates a device pointer double *a_d, *b_d, *c_d; hipMalloc(&a_d, stream_array_size*sizeof(double)); hipMalloc(&b_d, stream_array_size*sizeof(double)); hipMalloc(&c_d, stream_array_size*sizeof(double)); // setting block size and padding total grid size to get even block sizes int blocksize = 512; int gridsize = (stream_array_size + blocksize - 1)/blocksize; for (int k=0; k<NTIMES; k++){ cpu_timer_start(&ttotal); // copying array data from host to device hipMemcpy(a_d, a, stream_array_size*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(b_d, b, stream_array_size*sizeof(double), hipMemcpyHostToDevice); // cuda memcopy to device returns after buffer available, so synchronize to // get accurate timing for kernel only hipDeviceSynchronize(); cpu_timer_start(&tkernel); // launch stream triad kernel hipLaunchKernelGGL(( StreamTriad), dim3(gridsize), dim3(blocksize), 0, 0, stream_array_size, scalar, a_d, b_d, c_d); // need to force completion to get timing hipDeviceSynchronize(); tkernel_sum += cpu_timer_stop(tkernel); // cuda memcpy from device to host blocks for completion so no need for synchronize hipMemcpy(c, c_d, stream_array_size*sizeof(double), hipMemcpyDeviceToHost); ttotal_sum += cpu_timer_stop(ttotal); // check results and print errors if found. limit to only 10 errors per iteration for (int i=0, icount=0; i<stream_array_size && icount < 10; i++){ if (c[i] != 1.0 + 3.0*2.0) { printf("Error with result c[%d]=%lf on iter %d\n",i,c[i],k); icount++; } } } printf("Average runtime is %lf msecs data transfer is %lf msecs\n", tkernel_sum/NTIMES, (ttotal_sum - tkernel_sum)/NTIMES); hipFree(a_d); hipFree(b_d); hipFree(c_d); free(a); free(b); free(c); }
36fe0e26e725c96ac9dec06bc0b0bdd7e3bff299.cu
#include <stdio.h> #include <sys/time.h> extern "C" { #include "timer.h" } #define NTIMES 16 // CUDA kernel version of stream triad __global__ void StreamTriad(int n, double scalar, double *a, double *b, double *c){ int i = blockIdx.x*blockDim.x+threadIdx.x; // Protect from going out-of-bounds if (i >= n) return; c[i] = a[i] + scalar*b[i]; } int main(int argc, char *argv[]){ struct timespec tkernel, ttotal; // initializing data and arrays int stream_array_size = 80000000; double scalar = 3.0, tkernel_sum = 0.0, ttotal_sum = 0.0; // allocate host memory and initialize double *a = (double *)malloc(stream_array_size*sizeof(double)); double *b = (double *)malloc(stream_array_size*sizeof(double)); double *c = (double *)malloc(stream_array_size*sizeof(double)); for (int i=0; i<stream_array_size; i++) { a[i] = 1.0; b[i] = 2.0; } // allocate device memory. suffix of _d indicates a device pointer double *a_d, *b_d, *c_d; cudaMalloc(&a_d, stream_array_size*sizeof(double)); cudaMalloc(&b_d, stream_array_size*sizeof(double)); cudaMalloc(&c_d, stream_array_size*sizeof(double)); // setting block size and padding total grid size to get even block sizes int blocksize = 512; int gridsize = (stream_array_size + blocksize - 1)/blocksize; for (int k=0; k<NTIMES; k++){ cpu_timer_start(&ttotal); // copying array data from host to device cudaMemcpy(a_d, a, stream_array_size*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(b_d, b, stream_array_size*sizeof(double), cudaMemcpyHostToDevice); // cuda memcopy to device returns after buffer available, so synchronize to // get accurate timing for kernel only cudaDeviceSynchronize(); cpu_timer_start(&tkernel); // launch stream triad kernel StreamTriad<<<gridsize, blocksize>>>(stream_array_size, scalar, a_d, b_d, c_d); // need to force completion to get timing cudaDeviceSynchronize(); tkernel_sum += cpu_timer_stop(tkernel); // cuda memcpy from device to host blocks for completion so no need for synchronize cudaMemcpy(c, c_d, stream_array_size*sizeof(double), cudaMemcpyDeviceToHost); ttotal_sum += cpu_timer_stop(ttotal); // check results and print errors if found. limit to only 10 errors per iteration for (int i=0, icount=0; i<stream_array_size && icount < 10; i++){ if (c[i] != 1.0 + 3.0*2.0) { printf("Error with result c[%d]=%lf on iter %d\n",i,c[i],k); icount++; } } } printf("Average runtime is %lf msecs data transfer is %lf msecs\n", tkernel_sum/NTIMES, (ttotal_sum - tkernel_sum)/NTIMES); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); free(a); free(b); free(c); }
3e75c95a6a6e1422da2470b30e080380195b38b6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__)) using namespace std; using namespace cv; static const int MASK_SIZE = 5; static const int mask[MASK_SIZE][MASK_SIZE] = { {0,1,2,1,0}, {1,4,8,4,1}, {2,8,16,8,2}, {1,4,8,4,1}, {0,1,2,1,0} }; static void HandleError(hipError_t err, string file, int line) { if (err != hipSuccess) { cout << hipGetErrorString(err) << " in " << file << " at line " << line << endl; exit(EXIT_FAILURE); } } __constant__ int dev_mask[MASK_SIZE][MASK_SIZE]; __constant__ int dev_weight; __global__ void gaussianBlur(uchar * inputImage, uchar * outputImage, long width, long height) { long x = (blockIdx.x * blockDim.x) + threadIdx.x; long y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < width-2 && y < height-2 && x>1 && y>1) { long r=0, g=0, b=0; long pixelIn, pixelOut; for (int y_m = 0; y_m<5; y_m++) { for (int x_m = 0; x_m<5; x_m++) { pixelIn = width*(y + y_m - 2) * 3 + (x + x_m - 2) * 3; r += inputImage[pixelIn + 2] * dev_mask[x_m][y_m]; g += inputImage[pixelIn + 1] * dev_mask[x_m][y_m]; b += inputImage[pixelIn] * dev_mask[x_m][y_m]; } } pixelOut = (width - 4)*(y - 2) * 3 + (x - 2) * 3; outputImage[pixelOut + 2] = r / dev_weight; outputImage[pixelOut + 1] = g / dev_weight; outputImage[pixelOut] = b / dev_weight; } } void compute(int blockSize, Mat inputImage, string inputName, string outputName) { Mat outputImage; outputImage = Mat(inputImage.rows, inputImage.cols, CV_8UC3); copyMakeBorder(inputImage, inputImage, 2, 2, 2, 2, BORDER_REPLICATE); int gridWidth, gridHeight; gridWidth = inputImage.cols / blockSize + ((inputImage.cols % blockSize) == 0 ? 0 : 1); gridHeight = inputImage.rows / blockSize + ((inputImage.rows % blockSize) == 0 ? 0 : 1); long inputSize = sizeof(uchar) * inputImage.rows* inputImage.cols * 3; long outputSize = sizeof(uchar) * outputImage.rows* outputImage.cols * 3; uchar * dev_inputImage; uchar * dev_outputImage; HANDLE_ERROR(hipMalloc((void**)& dev_inputImage, inputSize)); HANDLE_ERROR(hipMalloc((void**)& dev_outputImage, outputSize)); HANDLE_ERROR(hipMemcpy(dev_inputImage, inputImage.data, inputSize, hipMemcpyHostToDevice)); dim3 gridSize(gridWidth, gridHeight); dim3 threadsPerBlock(blockSize, blockSize); hipEvent_t begin, end; float time; HANDLE_ERROR(hipEventCreate(&begin)); HANDLE_ERROR(hipEventCreate(&end)); HANDLE_ERROR(hipEventRecord(begin, 0)); hipLaunchKernelGGL(( gaussianBlur) , dim3(gridSize),dim3(threadsPerBlock) , 0, 0, dev_inputImage, dev_outputImage, inputImage.cols, inputImage.rows); HANDLE_ERROR(hipEventRecord(end, 0)); HANDLE_ERROR(hipEventSynchronize(end)); HANDLE_ERROR(hipEventElapsedTime(&time, begin, end)); HANDLE_ERROR(hipMemcpy(outputImage.data, dev_outputImage, outputSize, hipMemcpyDeviceToHost)); try { imwrite(outputName, outputImage); } catch (Exception &e) { cout << "Exception while writing to file " << e.msg; } HANDLE_ERROR(hipEventDestroy(begin)); HANDLE_ERROR(hipEventDestroy(end)); hipFree(dev_inputImage); hipFree(dev_outputImage); // cout << "X: "<< gridWidth << ", Y: " << gridHeight << endl; // cout << "Threads per block: " << blockSize << endl; cout << "Czas: " << time << "ms" << endl; } int main(int argc, char** argv) { Mat inputImage; string inputName, outputName; int maxThreads = 32; int blockSize = 32; int weight = 0; if (argc < 3) { cout << "Invalid arugments"; return -1; } else { inputName = argv[1]; outputName = argv[2]; if (argc == 4) { blockSize = atoi(argv[3]); } } inputImage = imread(inputName, CV_LOAD_IMAGE_COLOR); if (inputImage.data == false) { cout << "No image defined"; return -1; } for (int xMask = 0; xMask<5; xMask++) { for (int yMask = 0; yMask<5; yMask++) { weight += mask[xMask][yMask]; } } HANDLE_ERROR(hipMemcpyToSymbol(dev_mask, &mask, sizeof(int) * MASK_SIZE * MASK_SIZE)); HANDLE_ERROR(hipMemcpyToSymbol(dev_weight, &weight, sizeof(int))); struct hipDeviceProp_t properties; HANDLE_ERROR(hipGetDeviceProperties(&properties, 0)); if (blockSize <= maxThreads && blockSize > 0) { compute(blockSize, inputImage, inputName, outputName); } else { for(int size=1; size <= maxThreads; size++) { compute(size, inputImage, inputName, outputName); } } return 0; }
3e75c95a6a6e1422da2470b30e080380195b38b6.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__)) using namespace std; using namespace cv; static const int MASK_SIZE = 5; static const int mask[MASK_SIZE][MASK_SIZE] = { {0,1,2,1,0}, {1,4,8,4,1}, {2,8,16,8,2}, {1,4,8,4,1}, {0,1,2,1,0} }; static void HandleError(cudaError_t err, string file, int line) { if (err != cudaSuccess) { cout << cudaGetErrorString(err) << " in " << file << " at line " << line << endl; exit(EXIT_FAILURE); } } __constant__ int dev_mask[MASK_SIZE][MASK_SIZE]; __constant__ int dev_weight; __global__ void gaussianBlur(uchar * inputImage, uchar * outputImage, long width, long height) { long x = (blockIdx.x * blockDim.x) + threadIdx.x; long y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < width-2 && y < height-2 && x>1 && y>1) { long r=0, g=0, b=0; long pixelIn, pixelOut; for (int y_m = 0; y_m<5; y_m++) { for (int x_m = 0; x_m<5; x_m++) { pixelIn = width*(y + y_m - 2) * 3 + (x + x_m - 2) * 3; r += inputImage[pixelIn + 2] * dev_mask[x_m][y_m]; g += inputImage[pixelIn + 1] * dev_mask[x_m][y_m]; b += inputImage[pixelIn] * dev_mask[x_m][y_m]; } } pixelOut = (width - 4)*(y - 2) * 3 + (x - 2) * 3; outputImage[pixelOut + 2] = r / dev_weight; outputImage[pixelOut + 1] = g / dev_weight; outputImage[pixelOut] = b / dev_weight; } } void compute(int blockSize, Mat inputImage, string inputName, string outputName) { Mat outputImage; outputImage = Mat(inputImage.rows, inputImage.cols, CV_8UC3); copyMakeBorder(inputImage, inputImage, 2, 2, 2, 2, BORDER_REPLICATE); int gridWidth, gridHeight; gridWidth = inputImage.cols / blockSize + ((inputImage.cols % blockSize) == 0 ? 0 : 1); gridHeight = inputImage.rows / blockSize + ((inputImage.rows % blockSize) == 0 ? 0 : 1); long inputSize = sizeof(uchar) * inputImage.rows* inputImage.cols * 3; long outputSize = sizeof(uchar) * outputImage.rows* outputImage.cols * 3; uchar * dev_inputImage; uchar * dev_outputImage; HANDLE_ERROR(cudaMalloc((void**)& dev_inputImage, inputSize)); HANDLE_ERROR(cudaMalloc((void**)& dev_outputImage, outputSize)); HANDLE_ERROR(cudaMemcpy(dev_inputImage, inputImage.data, inputSize, cudaMemcpyHostToDevice)); dim3 gridSize(gridWidth, gridHeight); dim3 threadsPerBlock(blockSize, blockSize); cudaEvent_t begin, end; float time; HANDLE_ERROR(cudaEventCreate(&begin)); HANDLE_ERROR(cudaEventCreate(&end)); HANDLE_ERROR(cudaEventRecord(begin, 0)); gaussianBlur <<< gridSize,threadsPerBlock >>> (dev_inputImage, dev_outputImage, inputImage.cols, inputImage.rows); HANDLE_ERROR(cudaEventRecord(end, 0)); HANDLE_ERROR(cudaEventSynchronize(end)); HANDLE_ERROR(cudaEventElapsedTime(&time, begin, end)); HANDLE_ERROR(cudaMemcpy(outputImage.data, dev_outputImage, outputSize, cudaMemcpyDeviceToHost)); try { imwrite(outputName, outputImage); } catch (Exception &e) { cout << "Exception while writing to file " << e.msg; } HANDLE_ERROR(cudaEventDestroy(begin)); HANDLE_ERROR(cudaEventDestroy(end)); cudaFree(dev_inputImage); cudaFree(dev_outputImage); // cout << "X: "<< gridWidth << ", Y: " << gridHeight << endl; // cout << "Threads per block: " << blockSize << endl; cout << "Czas: " << time << "ms" << endl; } int main(int argc, char** argv) { Mat inputImage; string inputName, outputName; int maxThreads = 32; int blockSize = 32; int weight = 0; if (argc < 3) { cout << "Invalid arugments"; return -1; } else { inputName = argv[1]; outputName = argv[2]; if (argc == 4) { blockSize = atoi(argv[3]); } } inputImage = imread(inputName, CV_LOAD_IMAGE_COLOR); if (inputImage.data == false) { cout << "No image defined"; return -1; } for (int xMask = 0; xMask<5; xMask++) { for (int yMask = 0; yMask<5; yMask++) { weight += mask[xMask][yMask]; } } HANDLE_ERROR(cudaMemcpyToSymbol(dev_mask, &mask, sizeof(int) * MASK_SIZE * MASK_SIZE)); HANDLE_ERROR(cudaMemcpyToSymbol(dev_weight, &weight, sizeof(int))); struct cudaDeviceProp properties; HANDLE_ERROR(cudaGetDeviceProperties(&properties, 0)); if (blockSize <= maxThreads && blockSize > 0) { compute(blockSize, inputImage, inputName, outputName); } else { for(int size=1; size <= maxThreads; size++) { compute(size, inputImage, inputName, outputName); } } return 0; }
dce2240b4e8c19237992b1202ddb9bcddaa99214.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************** * * Copyright (C) 2015 Culham Centre for Fusion Energy, * United Kingdom Atomic Energy Authority, Oxfordshire OX14 3DB, UK * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************** * * Program: SPILADY - A Spin-Lattice Dynamics Simulation Program * Version: 1.0 * Date: Aug 2015 * Author: Pui-Wai (Leo) MA * Contact: [email protected] * Address: Culham Centre for Fusion Energy, OX14 3DB, United Kingdom * ********************************************************************************/ #if (defined SDH || defined SDHL || defined SLDH || defined SLDHL || defined SLDNC) && defined GPU #include "spilady.h" #include "prototype_GPU.h" /**************************************************************************** * GPU prototypes ****************************************************************************/ __global__ void LP1ChSp_part1(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d, double *ave_sx_ptr_d, double *ave_sy_ptr_d, double *ave_sz_ptr_d); __global__ void LP1ChSp_part2(double *ave_sx_ptr_d, double *ave_sy_ptr_d, double *ave_sz_ptr_d); /**************************************************************************** * CPU codes ****************************************************************************/ void check_spin_GPU(int current_step){ size_t size = no_of_MP*no_of_threads*sizeof(double); ave_s = vec_zero(); ave_m = vec_zero(); double *ave_sx_ptr_d; double *ave_sy_ptr_d; double *ave_sz_ptr_d; hipMalloc((void**)&ave_sx_ptr_d, size); hipMalloc((void**)&ave_sy_ptr_d, size); hipMalloc((void**)&ave_sz_ptr_d, size); hipLaunchKernelGGL(( LP1ChSp_part1), dim3(no_of_MP),dim3(no_of_threads), 0, 0, var_ptr_d, first_atom_ptr_d, ave_sx_ptr_d, ave_sy_ptr_d, ave_sz_ptr_d); hipLaunchKernelGGL(( LP1ChSp_part2), dim3(no_of_MP),dim3(no_of_threads), 0, 0, ave_sx_ptr_d, ave_sy_ptr_d, ave_sz_ptr_d); hipMemcpy(&ave_s.x, ave_sx_ptr_d, sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(&ave_s.y, ave_sy_ptr_d, sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(&ave_s.z, ave_sz_ptr_d, sizeof(double), hipMemcpyDeviceToHost); ave_s = vec_divide(ave_s, natom); double ave_s0 = vec_length(ave_s); ave_m = vec_times(-el_g, ave_s); double ave_m0 = vec_length(ave_m); char out_spn_front[] = "spn-"; char out_spn[256]; strcpy(out_spn,out_spn_front); strcat(out_spn,out_body); strcat(out_spn,".dat"); ofstream out_file(out_spn,ios::app); out_file << setiosflags(ios::scientific) << setprecision(15); out_file << current_step << " " << total_time << " " << ave_s.x << " " << ave_s.y << " " << ave_s.z << " " << ave_s0 << " " << ave_m.x << " " << ave_m.y << " " << ave_m.z << " " << ave_m0 << '\n'; out_file.close(); hipFree(ave_sx_ptr_d); hipFree(ave_sy_ptr_d); hipFree(ave_sz_ptr_d); } void check_spin(int current_step){ check_spin_GPU(current_step); } /**************************************************************************** * GPU codes ****************************************************************************/ __global__ void LP1ChSp_part1(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d, double *ave_sx_ptr_d, double *ave_sy_ptr_d, double *ave_sz_ptr_d){ int i = blockIdx.x*blockDim.x + threadIdx.x; *(ave_sx_ptr_d + i) = 0.0; *(ave_sy_ptr_d + i) = 0.0; *(ave_sz_ptr_d + i) = 0.0; int area = blockDim.x*gridDim.x; int k = (var_ptr_d->natom - 1)/area + 1; for (int j = 0; j < k; ++j){ int m = i + j*area; if (m < var_ptr_d->natom) { struct atom_struct *atom_ptr; atom_ptr = first_atom_ptr_d + m; *(ave_sx_ptr_d + i) += atom_ptr->s.x; *(ave_sy_ptr_d + i) += atom_ptr->s.y; *(ave_sz_ptr_d + i) += atom_ptr->s.z; } } __syncthreads(); } __global__ void LP1ChSp_part2(double *ave_sx_ptr_d, double *ave_sy_ptr_d, double *ave_sz_ptr_d){ int depth = blockIdx.x*blockDim.x; if (threadIdx.x == 0){ for (int j = 1; j < blockDim.x; ++j) *(ave_sx_ptr_d + depth) += *(ave_sx_ptr_d + depth + j); } if (threadIdx.x == 1){ for (int j = 1; j < blockDim.x; ++j) *(ave_sy_ptr_d + depth) += *(ave_sy_ptr_d + depth + j); } if (threadIdx.x == 2){ for (int j = 1; j < blockDim.x; ++j) *(ave_sz_ptr_d + depth) += *(ave_sz_ptr_d + depth + j); } __threadfence(); if (blockIdx.x == 0 && threadIdx.x == 0){ for (int j = 1; j < gridDim.x; ++j) *ave_sx_ptr_d += *(ave_sx_ptr_d + j*blockDim.x); } if (blockIdx.x == 0 && threadIdx.x == 1){ for (int j = 1; j < gridDim.x; ++j) *ave_sy_ptr_d += *(ave_sy_ptr_d + j*blockDim.x); } if (blockIdx.x == 0 && threadIdx.x == 2){ for (int j = 1; j < gridDim.x; ++j) *ave_sz_ptr_d += *(ave_sz_ptr_d + j*blockDim.x); } } #endif
dce2240b4e8c19237992b1202ddb9bcddaa99214.cu
/******************************************************************************** * * Copyright (C) 2015 Culham Centre for Fusion Energy, * United Kingdom Atomic Energy Authority, Oxfordshire OX14 3DB, UK * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************** * * Program: SPILADY - A Spin-Lattice Dynamics Simulation Program * Version: 1.0 * Date: Aug 2015 * Author: Pui-Wai (Leo) MA * Contact: [email protected] * Address: Culham Centre for Fusion Energy, OX14 3DB, United Kingdom * ********************************************************************************/ #if (defined SDH || defined SDHL || defined SLDH || defined SLDHL || defined SLDNC) && defined GPU #include "spilady.h" #include "prototype_GPU.h" /**************************************************************************** * GPU prototypes ****************************************************************************/ __global__ void LP1ChSp_part1(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d, double *ave_sx_ptr_d, double *ave_sy_ptr_d, double *ave_sz_ptr_d); __global__ void LP1ChSp_part2(double *ave_sx_ptr_d, double *ave_sy_ptr_d, double *ave_sz_ptr_d); /**************************************************************************** * CPU codes ****************************************************************************/ void check_spin_GPU(int current_step){ size_t size = no_of_MP*no_of_threads*sizeof(double); ave_s = vec_zero(); ave_m = vec_zero(); double *ave_sx_ptr_d; double *ave_sy_ptr_d; double *ave_sz_ptr_d; cudaMalloc((void**)&ave_sx_ptr_d, size); cudaMalloc((void**)&ave_sy_ptr_d, size); cudaMalloc((void**)&ave_sz_ptr_d, size); LP1ChSp_part1<<<no_of_MP,no_of_threads>>>(var_ptr_d, first_atom_ptr_d, ave_sx_ptr_d, ave_sy_ptr_d, ave_sz_ptr_d); LP1ChSp_part2<<<no_of_MP,no_of_threads>>>(ave_sx_ptr_d, ave_sy_ptr_d, ave_sz_ptr_d); cudaMemcpy(&ave_s.x, ave_sx_ptr_d, sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(&ave_s.y, ave_sy_ptr_d, sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(&ave_s.z, ave_sz_ptr_d, sizeof(double), cudaMemcpyDeviceToHost); ave_s = vec_divide(ave_s, natom); double ave_s0 = vec_length(ave_s); ave_m = vec_times(-el_g, ave_s); double ave_m0 = vec_length(ave_m); char out_spn_front[] = "spn-"; char out_spn[256]; strcpy(out_spn,out_spn_front); strcat(out_spn,out_body); strcat(out_spn,".dat"); ofstream out_file(out_spn,ios::app); out_file << setiosflags(ios::scientific) << setprecision(15); out_file << current_step << " " << total_time << " " << ave_s.x << " " << ave_s.y << " " << ave_s.z << " " << ave_s0 << " " << ave_m.x << " " << ave_m.y << " " << ave_m.z << " " << ave_m0 << '\n'; out_file.close(); cudaFree(ave_sx_ptr_d); cudaFree(ave_sy_ptr_d); cudaFree(ave_sz_ptr_d); } void check_spin(int current_step){ check_spin_GPU(current_step); } /**************************************************************************** * GPU codes ****************************************************************************/ __global__ void LP1ChSp_part1(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d, double *ave_sx_ptr_d, double *ave_sy_ptr_d, double *ave_sz_ptr_d){ int i = blockIdx.x*blockDim.x + threadIdx.x; *(ave_sx_ptr_d + i) = 0.0; *(ave_sy_ptr_d + i) = 0.0; *(ave_sz_ptr_d + i) = 0.0; int area = blockDim.x*gridDim.x; int k = (var_ptr_d->natom - 1)/area + 1; for (int j = 0; j < k; ++j){ int m = i + j*area; if (m < var_ptr_d->natom) { struct atom_struct *atom_ptr; atom_ptr = first_atom_ptr_d + m; *(ave_sx_ptr_d + i) += atom_ptr->s.x; *(ave_sy_ptr_d + i) += atom_ptr->s.y; *(ave_sz_ptr_d + i) += atom_ptr->s.z; } } __syncthreads(); } __global__ void LP1ChSp_part2(double *ave_sx_ptr_d, double *ave_sy_ptr_d, double *ave_sz_ptr_d){ int depth = blockIdx.x*blockDim.x; if (threadIdx.x == 0){ for (int j = 1; j < blockDim.x; ++j) *(ave_sx_ptr_d + depth) += *(ave_sx_ptr_d + depth + j); } if (threadIdx.x == 1){ for (int j = 1; j < blockDim.x; ++j) *(ave_sy_ptr_d + depth) += *(ave_sy_ptr_d + depth + j); } if (threadIdx.x == 2){ for (int j = 1; j < blockDim.x; ++j) *(ave_sz_ptr_d + depth) += *(ave_sz_ptr_d + depth + j); } __threadfence(); if (blockIdx.x == 0 && threadIdx.x == 0){ for (int j = 1; j < gridDim.x; ++j) *ave_sx_ptr_d += *(ave_sx_ptr_d + j*blockDim.x); } if (blockIdx.x == 0 && threadIdx.x == 1){ for (int j = 1; j < gridDim.x; ++j) *ave_sy_ptr_d += *(ave_sy_ptr_d + j*blockDim.x); } if (blockIdx.x == 0 && threadIdx.x == 2){ for (int j = 1; j < gridDim.x; ++j) *ave_sz_ptr_d += *(ave_sz_ptr_d + j*blockDim.x); } } #endif
4d6b308ef931caeffc912f8eacc129298a09dfdb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2020 Naval Postgraduate School * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <nps_uw_multibeam_sonar/sonar_calculation_cuda.cuh> // #include <math.h> #include <assert.h> // For complex numbers #include <thrust/complex.h> #include <hip/hip_complex.h> // For rand() function #include <unistd.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> // For FFT #include <hipfft.h> #include <cufftw.h> #include <thrust/device_vector.h> #include <list> #include <chrono> #define BLOCK_SIZE 32 static inline void _safe_cuda_call(hipError_t err, const char *msg, const char *file_name, const int line_number) { if (err != hipSuccess) { fprintf(stderr, "%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n", msg, file_name, line_number, hipGetErrorString(err)); std::cin.get(); exit(EXIT_FAILURE); } } #define SAFE_CALL(call, msg) _safe_cuda_call((call), (msg), __FILE__, __LINE__) /////////////////////////////////////////////////////////////////////////// // Incident Angle Calculation Function // incidence angle is target's normal angle accounting for the ray's azimuth // and elevation __device__ float compute_incidence(float azimuth, float elevation, float *normal) { // ray normal from camera azimuth and elevation float camera_x = cosf(-azimuth) * cosf(elevation); float camera_y = sinf(-azimuth) * cosf(elevation); float camera_z = sinf(elevation); float ray_normal[3] = {camera_x, camera_y, camera_z}; // target normal with axes compensated to camera axes float target_normal[3] = {normal[2], -normal[0], -normal[1]}; // dot product float dot_product = ray_normal[0] * target_normal[0] + ray_normal[1] * target_normal[1] + ray_normal[2] * target_normal[2]; return M_PI - acosf(dot_product); } /////////////////////////////////////////////////////////////////////////// __device__ __host__ float unnormalized_sinc(float t) { if (abs(t) < 1E-8) return 1.0; else return sin(t) / t; } /////////////////////////////////////////////////////////////////////////// template <typename T> __global__ void column_sums_reduce(const T *__restrict__ in, T *__restrict__ out, size_t width, size_t height) { __shared__ T sdata[BLOCK_SIZE][BLOCK_SIZE + 1]; size_t idx = threadIdx.x + blockDim.x * blockIdx.x; size_t width_stride = gridDim.x * blockDim.x; size_t full_width = (width & (~((unsigned long long)(BLOCK_SIZE - 1)))) + ((width & (BLOCK_SIZE - 1)) ? BLOCK_SIZE : 0); // round up to next block for (size_t w = idx; w < full_width; w += width_stride) { // grid-stride loop across matrix width sdata[threadIdx.y][threadIdx.x] = 0; size_t in_ptr = w + threadIdx.y * width; for (size_t h = threadIdx.y; h < height; h += BLOCK_SIZE) { // block-stride loop across matrix height sdata[threadIdx.y][threadIdx.x] += (w < width) ? in[in_ptr] : 0; in_ptr += width * BLOCK_SIZE; } __syncthreads(); T my_val = sdata[threadIdx.x][threadIdx.y]; for (int i = warpSize >> 1; i > 0; i >>= 1) // warp-wise parallel sum reduction my_val += __shfl_xor_sync(0xFFFFFFFFU, my_val, i); __syncthreads(); if (threadIdx.x == 0) sdata[0][threadIdx.y] = my_val; __syncthreads(); if ((threadIdx.y == 0) && ((w) < width)) out[w] = sdata[0][threadIdx.x]; } } __global__ void gpu_matrix_mult(float *a, float *b, float *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0; if (col < k && row < m) { for (int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } __global__ void gpu_diag_matrix_mult(float *Val, int *RowPtr, float *diagVals, int total_rows) { const int row = threadIdx.x + blockIdx.x * blockDim.x; if (row < total_rows) { for (int i = RowPtr[row]; i < RowPtr[row + 1]; i++) { Val[i] = diagVals[row] * Val[i]; } } } /////////////////////////////////////////////////////////////////////////// // Sonar Claculation Function __global__ void sonar_calculation(thrust::complex<float> *P_Beams, float *depth_image, float *normal_image, int width, int height, int depth_image_step, int normal_image_step, float *rand_image, int rand_image_step, float *reflectivity_image, int reflectivity_image_step, float hPixelSize, float vPixelSize, float hFOV, float vFOV, float beam_azimuthAngleWidth, float beam_elevationAngleWidth, float ray_azimuthAngleWidth, float *ray_elevationAngles, float ray_elevationAngleWidth, float soundSpeed, float sourceTerm, int nBeams, int nRays, int raySkips, float sonarFreq, float delta_f, int nFreq, float bandwidth, float maxDistance, float attenuation, float area_scaler) { // 2D Index of current thread const int beam = blockIdx.x * blockDim.x + threadIdx.x; const int ray = blockIdx.y * blockDim.y + threadIdx.y; //Only valid threads perform memory I/O if ((beam < width) && (ray < height) && (ray % raySkips == 0)) { // Location of the image pixel const int depth_index = ray * depth_image_step / sizeof(float) + beam; const int normal_index = ray * normal_image_step / sizeof(float) + (3 * beam); const int rand_index = ray * rand_image_step / sizeof(float) + (2 * beam); const int reflectivity_index = ray * reflectivity_image_step / sizeof(float) + beam; // Input parameters for ray processing float distance = depth_image[depth_index] * 1.0f; float normal[3] = {normal_image[normal_index], normal_image[normal_index + 1], normal_image[normal_index + 2]}; // Beam pattern // only one column of rays for each beam at beam center, interference calculated later float azimuthBeamPattern = 1.0; float elevationBeamPattern = 1.0; // float elevationBeamPattern = abs(unnormalized_sinc(M_PI * 0.884 // / (beam_elevationAngleWidth) * sin(ray_elevationAngles[ray]))); // printf("angles %f", ray_elevationAngles[ray]); // incidence angle (taking that of normal_image) float incidence = acos(normal[2]); // compute_incidence(ray_azimuthAngle, ray_elevationAngle, normal); // ----- Point scattering model ------ // // Gaussian noise generated using opencv RNG float xi_z = rand_image[rand_index]; float xi_y = rand_image[rand_index + 1]; // Calculate amplitude thrust::complex<float> randomAmps = thrust::complex<float>(xi_z / sqrt(2.0), xi_y / sqrt(2.0)); thrust::complex<float> lambert_sqrt = thrust::complex<float>(sqrt(reflectivity_image[reflectivity_index]) * cos(incidence), 0.0); thrust::complex<float> beamPattern = thrust::complex<float>(azimuthBeamPattern * elevationBeamPattern, 0.0); thrust::complex<float> targetArea_sqrt = thrust::complex<float>(sqrt(distance * area_scaler), 0.0); thrust::complex<float> propagationTerm = thrust::complex<float>(1.0 / pow(distance, 2.0) * exp(-2.0 * attenuation * distance), 0.0); thrust::complex<float> amplitude = randomAmps * thrust::complex<float>(sourceTerm, 0.0) * propagationTerm * beamPattern * lambert_sqrt * targetArea_sqrt; // Max distance cut-off if (distance > maxDistance) amplitude = thrust::complex<float>(0.0, 0.0); // Summation of Echo returned from a signal (frequency domain) for (size_t f = 0; f < nFreq; f++) { float freq; if (nFreq % 2 == 0) freq = delta_f * (-nFreq / 2.0 + f*1.0f + 1.0); else freq = delta_f * (-(nFreq - 1) / 2.0 + f*1.0f + 1.0); float kw = 2.0 * M_PI * freq / soundSpeed; // wave vector // Transmit spectrum, frequency domain thrust::complex<float> kernel = exp(thrust::complex<float>(0.0f, 2.0f * distance * kw)) * amplitude; P_Beams[beam * nFreq * (int)(nRays / raySkips) + (int)(ray / raySkips) * nFreq + f] = thrust::complex<float>(kernel.real() , kernel.imag()); } } } /////////////////////////////////////////////////////////////////////////// namespace NpsGazeboSonar { // CUDA Device Checker Wrapper void check_cuda_init_wrapper(void) { // Check CUDA device hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr, "ERROR: %s\n", hipGetErrorString(error)); exit(-1); } } // Sonar Claculation Function Wrapper CArray2D sonar_calculation_wrapper(const cv::Mat &depth_image, const cv::Mat &normal_image, const cv::Mat &rand_image, double _hPixelSize, double _vPixelSize, double _hFOV, double _vFOV, double _beam_azimuthAngleWidth, double _beam_elevationAngleWidth, double _ray_azimuthAngleWidth, float *_ray_elevationAngles, double _ray_elevationAngleWidth, double _soundSpeed, double _maxDistance, double _sourceLevel, int _nBeams, int _nRays, int _raySkips, double _sonarFreq, double _bandwidth, int _nFreq, const cv::Mat &reflectivity_image, double _attenuation, float *window, float **beamCorrector, float beamCorrectorSum, bool debugFlag) { auto start = std::chrono::high_resolution_clock::now(); auto stop = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start); if (debugFlag) start = std::chrono::high_resolution_clock::now(); // ---- Allocation of properties parameters ---- // const float hPixelSize = (float)_hPixelSize; const float vPixelSize = (float)_vPixelSize; const float hFOV = (float)_hFOV; const float vFOV = (float)_vFOV; const float beam_elevationAngleWidth = (float)_beam_elevationAngleWidth; const float beam_azimuthAngleWidth = (float)_beam_azimuthAngleWidth; const float ray_elevationAngleWidth = (float)_ray_elevationAngleWidth; const float ray_azimuthAngleWidth = (float)_ray_azimuthAngleWidth; const float soundSpeed = (float)_soundSpeed; const float maxDistance = (float)_maxDistance; const float sonarFreq = (float)_sonarFreq; const float bandwidth = (float)_bandwidth; const float attenuation = (float)_attenuation; const int nBeams = _nBeams; const int nRays = _nRays; const int nFreq = _nFreq; const int raySkips = _raySkips; //#######################################################// //############### Sonar Calculation ################// //#######################################################// // --------- Calculation parameters --------- // const float max_distance = maxDistance; // Signal const float delta_f = bandwidth/nFreq; // Precalculation const float area_scaler = ray_azimuthAngleWidth * ray_elevationAngleWidth; const float sourceLevel = (float)_sourceLevel; // db re 1 muPa; const float pref = 1e-6; // 1 micro pascal (muPa); const float sourceTerm = sqrt(pow(10, (sourceLevel / 10))) * pref; // source term // --------- Allocate GPU memory for image --------- // //Calculate total number of bytes of input and output image const int depth_image_Bytes = depth_image.step * depth_image.rows; const int normal_image_Bytes = normal_image.step * normal_image.rows; const int rand_image_Bytes = rand_image.step * rand_image.rows; const int reflectivity_image_Bytes = reflectivity_image.step * reflectivity_image.rows; const int ray_elevationAngles_Bytes = sizeof(float) * nRays; //Allocate device memory float *d_depth_image, *d_normal_image, *d_rand_image, *d_reflectivity_image, *ray_elevationAngles, *d_ray_elevationAngles; SAFE_CALL(hipMalloc((void **)&d_depth_image, depth_image_Bytes), "CUDA Malloc Failed"); SAFE_CALL(hipMalloc((void **)&d_normal_image, normal_image_Bytes), "CUDA Malloc Failed"); SAFE_CALL(hipMalloc((void **)&d_rand_image, rand_image_Bytes), "CUDA Malloc Failed"); SAFE_CALL(hipMalloc((void **)&d_reflectivity_image, reflectivity_image_Bytes), "CUDA Malloc Failed"); hipHostMalloc((void **)&ray_elevationAngles, ray_elevationAngles_Bytes); SAFE_CALL(hipMalloc((void **)&d_ray_elevationAngles, ray_elevationAngles_Bytes), "CUDA Malloc Failed"); for (size_t ray = 0; ray < nRays; ray ++) ray_elevationAngles[ray] = _ray_elevationAngles[ray]; //Copy data from OpenCV input image to device memory SAFE_CALL(hipMemcpy(d_depth_image, depth_image.ptr(), depth_image_Bytes, hipMemcpyHostToDevice), "CUDA Memcpy Failed"); SAFE_CALL(hipMemcpy(d_normal_image, normal_image.ptr(), normal_image_Bytes, hipMemcpyHostToDevice),"CUDA Memcpy Failed"); SAFE_CALL(hipMemcpy(d_rand_image, rand_image.ptr(), rand_image_Bytes, hipMemcpyHostToDevice),"CUDA Memcpy Failed"); SAFE_CALL(hipMemcpy(d_reflectivity_image, reflectivity_image.ptr(), reflectivity_image_Bytes, hipMemcpyHostToDevice), "CUDA Memcpy Failed"); SAFE_CALL(hipMemcpy(d_ray_elevationAngles, ray_elevationAngles, ray_elevationAngles_Bytes, hipMemcpyHostToDevice), "CUDA Memcpy Failed"); //Specify a reasonable block size const dim3 block(BLOCK_SIZE, BLOCK_SIZE); //Calculate grid size to cover the whole image const dim3 grid((depth_image.cols + block.x - 1) / block.x, (depth_image.rows + block.y - 1) / block.y); // Beam data array thrust::complex<float> *P_Beams; thrust::complex<float> *d_P_Beams; const int P_Beams_N = nBeams * (int)(nRays / raySkips) * (nFreq + 1); const int P_Beams_Bytes = sizeof(thrust::complex<float>) * P_Beams_N; SAFE_CALL(hipHostMalloc((void **)&P_Beams, P_Beams_Bytes), "CUDA Malloc Failed"); SAFE_CALL(hipMalloc((void **)&d_P_Beams, P_Beams_Bytes), "CUDA Malloc Failed"); //Launch the beamor conversion kernel hipLaunchKernelGGL(( sonar_calculation), dim3(grid), dim3(block), 0, 0, d_P_Beams, d_depth_image, d_normal_image, normal_image.cols, normal_image.rows, depth_image.step, normal_image.step, d_rand_image, rand_image.step, d_reflectivity_image, reflectivity_image.step, hPixelSize, vPixelSize, hFOV, vFOV, beam_azimuthAngleWidth, beam_elevationAngleWidth, ray_azimuthAngleWidth, d_ray_elevationAngles, ray_elevationAngleWidth, soundSpeed, sourceTerm, nBeams, nRays, raySkips, sonarFreq, delta_f, nFreq, bandwidth, max_distance, attenuation, area_scaler); //Synchronize to check for any kernel launch errors SAFE_CALL(hipDeviceSynchronize(), "Kernel Launch Failed"); //Copy back data from destination device meory to OpenCV output image SAFE_CALL(hipMemcpy(P_Beams, d_P_Beams, P_Beams_Bytes, hipMemcpyDeviceToHost), "CUDA Memcpy Failed"); // Free GPU memory hipFree(d_depth_image); hipFree(d_normal_image); hipFree(d_rand_image); hipFree(d_reflectivity_image); hipFree(d_P_Beams); hipFree(d_ray_elevationAngles); hipHostFree(ray_elevationAngles); // For calc time measure if (debugFlag) { stop = std::chrono::high_resolution_clock::now(); duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start); printf("GPU Sonar Computation Time %lld/100 [s]\n", static_cast<long long int>(duration.count() / 10000)); start = std::chrono::high_resolution_clock::now(); } //########################################################// //######### Summation, Culling and windowing #########// //########################################################// // Preallocate an array for return CArray2D P_Beams_F(CArray(nFreq), nBeams); // GPU grids and rows unsigned int grid_rows, grid_cols; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // GPU Ray summation using column sum float *P_Ray_real, *P_Ray_imag; float *d_P_Ray_real, *d_P_Ray_imag; const int P_Ray_N = (int)(nRays / raySkips) * (nFreq); const int P_Ray_Bytes = sizeof(float) * P_Ray_N; float *P_Ray_F_real, *P_Ray_F_imag; float *d_P_Ray_F_real, *d_P_Ray_F_imag; const int P_Ray_F_N = (nFreq)*1; const int P_Ray_F_Bytes = sizeof(float) * P_Ray_F_N; hipHostMalloc((void **)&P_Ray_real, P_Ray_Bytes); hipHostMalloc((void **)&P_Ray_imag, P_Ray_Bytes); hipHostMalloc((void **)&P_Ray_F_real, P_Ray_F_Bytes); hipHostMalloc((void **)&P_Ray_F_imag, P_Ray_F_Bytes); SAFE_CALL(hipMalloc((void **)&d_P_Ray_real, P_Ray_Bytes), "CUDA Malloc Failed"); SAFE_CALL(hipMalloc((void **)&d_P_Ray_imag, P_Ray_Bytes), "CUDA Malloc Failed"); SAFE_CALL(hipMalloc((void **)&d_P_Ray_F_real, P_Ray_F_Bytes), "CUDA Malloc Failed"); SAFE_CALL(hipMalloc((void **)&d_P_Ray_F_imag, P_Ray_F_Bytes), "CUDA Malloc Failed"); dim3 dimGrid_Ray((nFreq + BLOCK_SIZE - 1) / BLOCK_SIZE); for (size_t beam = 0; beam < nBeams; beam ++) { for (size_t ray = 0; ray < (int)(nRays / raySkips); ray++) { for (size_t f = 0; f < nFreq; f++) { P_Ray_real[ray * nFreq + f] = P_Beams[beam * nFreq * (int)(nRays / raySkips) + ray * nFreq + f].real(); P_Ray_imag[ray * nFreq + f] = P_Beams[beam * nFreq * (int)(nRays / raySkips) + ray * nFreq + f].imag(); } } SAFE_CALL(hipMemcpy(d_P_Ray_real, P_Ray_real, P_Ray_Bytes, hipMemcpyHostToDevice), "CUDA Memcpy Failed"); SAFE_CALL(hipMemcpy(d_P_Ray_imag, P_Ray_imag, P_Ray_Bytes, hipMemcpyHostToDevice), "CUDA Memcpy Failed"); hipLaunchKernelGGL(( column_sums_reduce), dim3(dimGrid_Ray), dim3(dimBlock), 0, 0, d_P_Ray_real, d_P_Ray_F_real, nFreq, (int)(nRays / raySkips)); hipLaunchKernelGGL(( column_sums_reduce), dim3(dimGrid_Ray), dim3(dimBlock), 0, 0, d_P_Ray_imag, d_P_Ray_F_imag, nFreq, (int)(nRays / raySkips)); SAFE_CALL(hipMemcpy(P_Ray_F_real, d_P_Ray_F_real, P_Ray_F_Bytes, hipMemcpyDeviceToHost), "CUDA Memcpy Failed"); SAFE_CALL(hipMemcpy(P_Ray_F_imag, d_P_Ray_F_imag, P_Ray_F_Bytes, hipMemcpyDeviceToHost), "CUDA Memcpy Failed"); SAFE_CALL(hipDeviceSynchronize(), "Kernel Launch Failed"); for (size_t f = 0; f < nFreq; f++) P_Beams_F[beam][f] = Complex(P_Ray_F_real[f], P_Ray_F_imag[f]); } // free memory hipHostFree(P_Beams); hipHostFree(P_Ray_real); hipHostFree(P_Ray_imag); hipHostFree(P_Ray_F_real); hipHostFree(P_Ray_F_imag); hipFree(d_P_Ray_real); hipFree(d_P_Ray_imag); hipFree(d_P_Ray_F_real); hipFree(d_P_Ray_F_imag); if (debugFlag) { stop = std::chrono::high_resolution_clock::now(); duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start); printf("Sonar Ray Summation %lld/100 [s]\n", static_cast<long long int>(duration.count() / 10000)); start = std::chrono::high_resolution_clock::now(); } // -------------- Beam culling correction -----------------// // beamCorrector and beamCorrectorSum is precalculated at parent cpp float *P_Beams_Cor_real, *P_Beams_Cor_imag; // float *P_Beams_Cor_F_real, *P_Beams_Cor_F_imag; float *P_Beams_Cor_real_tmp, *P_Beams_Cor_imag_tmp; float *d_P_Beams_Cor_real, *d_P_Beams_Cor_imag; float *d_P_Beams_Cor_F_real, *d_P_Beams_Cor_F_imag; const int P_Beams_Cor_N = nBeams * nFreq; const int P_Beams_Cor_Bytes = sizeof(float) * P_Beams_Cor_N; hipHostMalloc((void **)&P_Beams_Cor_real, P_Beams_Cor_Bytes); hipHostMalloc((void **)&P_Beams_Cor_imag, P_Beams_Cor_Bytes); hipHostMalloc((void **)&P_Beams_Cor_real_tmp, P_Beams_Cor_Bytes); hipHostMalloc((void **)&P_Beams_Cor_imag_tmp, P_Beams_Cor_Bytes); // hipHostMalloc((void **)&P_Beams_Cor_F_real, P_Beams_Cor_Bytes); // hipHostMalloc((void **)&P_Beams_Cor_F_imag, P_Beams_Cor_Bytes); SAFE_CALL(hipMalloc((void **)&d_P_Beams_Cor_real, P_Beams_Cor_Bytes), "CUDA Malloc Failed"); SAFE_CALL(hipMalloc((void **)&d_P_Beams_Cor_imag, P_Beams_Cor_Bytes), "CUDA Malloc Failed"); SAFE_CALL(hipMalloc((void **)&d_P_Beams_Cor_F_real, P_Beams_Cor_Bytes), "CUDA Malloc Failed"); SAFE_CALL(hipMalloc((void **)&d_P_Beams_Cor_F_imag, P_Beams_Cor_Bytes), "CUDA Malloc Failed"); float *beamCorrector_lin, *d_beamCorrector_lin; const int beamCorrector_lin_N = nBeams * nBeams; const int beamCorrector_lin_Bytes = sizeof(float) * beamCorrector_lin_N; hipHostMalloc((void **)&beamCorrector_lin, beamCorrector_lin_Bytes); SAFE_CALL(hipMalloc((void **)&d_beamCorrector_lin, beamCorrector_lin_Bytes), "CUDA Malloc Failed"); // (nfreq x nBeams) * (nBeams x nBeams) = (nfreq x nBeams) for (size_t beam = 0; beam < nBeams; beam ++) { for (size_t f = 0; f < nFreq; f++) { P_Beams_Cor_real[f * nBeams + beam] = P_Beams_F[beam][f].real() * 1.0f; P_Beams_Cor_imag[f * nBeams + beam] = P_Beams_F[beam][f].imag() * 1.0f; } for (size_t beam_other = 0; beam_other < nBeams; beam_other ++) beamCorrector_lin[beam_other * nBeams + beam] = beamCorrector[beam][beam_other]; } SAFE_CALL(hipMemcpy(d_P_Beams_Cor_real, P_Beams_Cor_real, P_Beams_Cor_Bytes, hipMemcpyHostToDevice), "CUDA Memcpy Failed"); SAFE_CALL(hipMemcpy(d_P_Beams_Cor_imag, P_Beams_Cor_imag, P_Beams_Cor_Bytes, hipMemcpyHostToDevice), "CUDA Memcpy Failed"); SAFE_CALL(hipMemcpy(d_beamCorrector_lin, beamCorrector_lin, beamCorrector_lin_Bytes, hipMemcpyHostToDevice), "CUDA Memcpy Failed"); grid_rows = (nFreq + BLOCK_SIZE - 1) / BLOCK_SIZE; grid_cols = (nBeams + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid_Beam(grid_cols, grid_rows); hipLaunchKernelGGL(( gpu_matrix_mult), dim3(dimGrid_Beam), dim3(dimBlock), 0, 0, d_P_Beams_Cor_real, d_beamCorrector_lin, d_P_Beams_Cor_F_real, nFreq, nBeams, nBeams); SAFE_CALL(hipDeviceSynchronize(), "Kernel Launch Failed"); hipLaunchKernelGGL(( gpu_matrix_mult), dim3(dimGrid_Beam), dim3(dimBlock), 0, 0, d_P_Beams_Cor_imag, d_beamCorrector_lin, d_P_Beams_Cor_F_imag, nFreq, nBeams, nBeams); SAFE_CALL(hipDeviceSynchronize(), "Kernel Launch Failed"); //Copy back data from destination device meory SAFE_CALL(hipMemcpy(P_Beams_Cor_real_tmp, d_P_Beams_Cor_F_real, P_Beams_Cor_Bytes, hipMemcpyDeviceToHost), "CUDA Memcpy Failed"); SAFE_CALL(hipMemcpy(P_Beams_Cor_imag_tmp, d_P_Beams_Cor_F_imag, P_Beams_Cor_Bytes, hipMemcpyDeviceToHost), "CUDA Memcpy Failed"); SAFE_CALL(hipDeviceSynchronize(), "Kernel Launch Failed"); // Return for (size_t beam = 0; beam < nBeams; beam ++) for (size_t f = 0; f < nFreq; f++) P_Beams_F[beam][f] = Complex(P_Beams_Cor_real_tmp[f * nBeams + beam] / beamCorrectorSum, P_Beams_Cor_imag_tmp[f * nBeams + beam] / beamCorrectorSum); // Free memory hipFree(d_P_Beams_Cor_imag); hipFree(d_P_Beams_Cor_real); hipFree(d_P_Beams_Cor_F_imag); hipFree(d_P_Beams_Cor_F_real); hipFree(d_beamCorrector_lin); hipHostFree(P_Beams_Cor_real); hipHostFree(P_Beams_Cor_imag); hipHostFree(P_Beams_Cor_real_tmp); hipHostFree(P_Beams_Cor_imag_tmp); hipHostFree(beamCorrector_lin); // For calc time measure if (debugFlag) { stop = std::chrono::high_resolution_clock::now(); duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start); printf("GPU Window & Correction %lld/100 [s]\n", static_cast<long long int>(duration.count() / 10000)); start = std::chrono::high_resolution_clock::now(); } //#################################################// //################### FFT #####################// //#################################################// SAFE_CALL(hipDeviceSynchronize(), "Kernel Launch Failed"); const int DATASIZE = nFreq; const int BATCH = nBeams; // --- Host side input data allocation and initialization hipfftComplex *hostInputData = (hipfftComplex *)malloc( DATASIZE * BATCH * sizeof(hipfftComplex)); for (int beam = 0; beam < BATCH; beam++) { for (int f = 0; f < DATASIZE; f++) { if (f < nFreq) hostInputData[beam * DATASIZE + f] = make_cuComplex(P_Beams_F[beam][f].real() * 1.0f, P_Beams_F[beam][f].imag() * 1.0f); else hostInputData[beam * DATASIZE + f] = (make_cuComplex(0.f, 0.f)); // zero padding } } // --- Device side input data allocation and initialization hipfftComplex *deviceInputData; SAFE_CALL(hipMalloc((void **)&deviceInputData, DATASIZE * BATCH * sizeof(hipfftComplex)), "FFT CUDA Malloc Failed"); SAFE_CALL(hipMemcpy(deviceInputData, hostInputData, DATASIZE * BATCH * sizeof(hipfftComplex), hipMemcpyHostToDevice), "FFT CUDA Memcopy Failed"); // --- Host side output data allocation hipfftComplex *hostOutputData = (hipfftComplex *)malloc(DATASIZE * BATCH * sizeof(hipfftComplex)); // --- Device side output data allocation hipfftComplex *deviceOutputData; hipMalloc((void **)&deviceOutputData, DATASIZE * BATCH * sizeof(hipfftComplex)); // --- Batched 1D FFTs hipfftHandle handle; int rank = 1; // --- 1D FFTs int n[] = {DATASIZE}; // --- Size of the Fourier transform // --- Distance between two successive input/output elements int istride = 1, ostride = 1; int idist = DATASIZE, odist = DATASIZE; // --- Distance between batches // --- Input/Output size with pitch (ignored for 1D transforms) int inembed[] = {0}; int onembed[] = {0}; int batch = BATCH; // --- Number of batched executions hipfftPlanMany(&handle, rank, n, inembed, istride, idist, onembed, ostride, odist, HIPFFT_C2C, batch); hipfftExecC2C(handle, deviceInputData, deviceOutputData, HIPFFT_FORWARD); // --- Device->Host copy of the results SAFE_CALL(hipMemcpy(hostOutputData, deviceOutputData, DATASIZE * BATCH * sizeof(hipfftComplex), hipMemcpyDeviceToHost), "FFT CUDA Memcopy Failed"); hipfftDestroy(handle); hipFree(deviceOutputData); hipFree(deviceInputData); free(hostInputData); free(hostOutputData); for (int beam = 0; beam < BATCH; beam++) { for (int f = 0; f < nFreq; f++) { P_Beams_F[beam][f] = Complex(hostOutputData[beam * DATASIZE + f].x * delta_f, hostOutputData[beam * DATASIZE + f].y * delta_f); } } // For calc time measure if (debugFlag) { stop = std::chrono::high_resolution_clock::now(); duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start); printf("GPU FFT Calc Time %lld/100 [s]\n", static_cast<long long int>(duration.count() / 10000)); } return P_Beams_F; } } // namespace NpsGazeboSonar
4d6b308ef931caeffc912f8eacc129298a09dfdb.cu
/* * Copyright 2020 Naval Postgraduate School * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <nps_uw_multibeam_sonar/sonar_calculation_cuda.cuh> // #include <math.h> #include <assert.h> // For complex numbers #include <thrust/complex.h> #include <cuComplex.h> // For rand() function #include <unistd.h> #include <curand.h> #include <curand_kernel.h> // For FFT #include <cufft.h> #include <cufftw.h> #include <thrust/device_vector.h> #include <list> #include <chrono> #define BLOCK_SIZE 32 static inline void _safe_cuda_call(cudaError err, const char *msg, const char *file_name, const int line_number) { if (err != cudaSuccess) { fprintf(stderr, "%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n", msg, file_name, line_number, cudaGetErrorString(err)); std::cin.get(); exit(EXIT_FAILURE); } } #define SAFE_CALL(call, msg) _safe_cuda_call((call), (msg), __FILE__, __LINE__) /////////////////////////////////////////////////////////////////////////// // Incident Angle Calculation Function // incidence angle is target's normal angle accounting for the ray's azimuth // and elevation __device__ float compute_incidence(float azimuth, float elevation, float *normal) { // ray normal from camera azimuth and elevation float camera_x = cosf(-azimuth) * cosf(elevation); float camera_y = sinf(-azimuth) * cosf(elevation); float camera_z = sinf(elevation); float ray_normal[3] = {camera_x, camera_y, camera_z}; // target normal with axes compensated to camera axes float target_normal[3] = {normal[2], -normal[0], -normal[1]}; // dot product float dot_product = ray_normal[0] * target_normal[0] + ray_normal[1] * target_normal[1] + ray_normal[2] * target_normal[2]; return M_PI - acosf(dot_product); } /////////////////////////////////////////////////////////////////////////// __device__ __host__ float unnormalized_sinc(float t) { if (abs(t) < 1E-8) return 1.0; else return sin(t) / t; } /////////////////////////////////////////////////////////////////////////// template <typename T> __global__ void column_sums_reduce(const T *__restrict__ in, T *__restrict__ out, size_t width, size_t height) { __shared__ T sdata[BLOCK_SIZE][BLOCK_SIZE + 1]; size_t idx = threadIdx.x + blockDim.x * blockIdx.x; size_t width_stride = gridDim.x * blockDim.x; size_t full_width = (width & (~((unsigned long long)(BLOCK_SIZE - 1)))) + ((width & (BLOCK_SIZE - 1)) ? BLOCK_SIZE : 0); // round up to next block for (size_t w = idx; w < full_width; w += width_stride) { // grid-stride loop across matrix width sdata[threadIdx.y][threadIdx.x] = 0; size_t in_ptr = w + threadIdx.y * width; for (size_t h = threadIdx.y; h < height; h += BLOCK_SIZE) { // block-stride loop across matrix height sdata[threadIdx.y][threadIdx.x] += (w < width) ? in[in_ptr] : 0; in_ptr += width * BLOCK_SIZE; } __syncthreads(); T my_val = sdata[threadIdx.x][threadIdx.y]; for (int i = warpSize >> 1; i > 0; i >>= 1) // warp-wise parallel sum reduction my_val += __shfl_xor_sync(0xFFFFFFFFU, my_val, i); __syncthreads(); if (threadIdx.x == 0) sdata[0][threadIdx.y] = my_val; __syncthreads(); if ((threadIdx.y == 0) && ((w) < width)) out[w] = sdata[0][threadIdx.x]; } } __global__ void gpu_matrix_mult(float *a, float *b, float *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0; if (col < k && row < m) { for (int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } __global__ void gpu_diag_matrix_mult(float *Val, int *RowPtr, float *diagVals, int total_rows) { const int row = threadIdx.x + blockIdx.x * blockDim.x; if (row < total_rows) { for (int i = RowPtr[row]; i < RowPtr[row + 1]; i++) { Val[i] = diagVals[row] * Val[i]; } } } /////////////////////////////////////////////////////////////////////////// // Sonar Claculation Function __global__ void sonar_calculation(thrust::complex<float> *P_Beams, float *depth_image, float *normal_image, int width, int height, int depth_image_step, int normal_image_step, float *rand_image, int rand_image_step, float *reflectivity_image, int reflectivity_image_step, float hPixelSize, float vPixelSize, float hFOV, float vFOV, float beam_azimuthAngleWidth, float beam_elevationAngleWidth, float ray_azimuthAngleWidth, float *ray_elevationAngles, float ray_elevationAngleWidth, float soundSpeed, float sourceTerm, int nBeams, int nRays, int raySkips, float sonarFreq, float delta_f, int nFreq, float bandwidth, float maxDistance, float attenuation, float area_scaler) { // 2D Index of current thread const int beam = blockIdx.x * blockDim.x + threadIdx.x; const int ray = blockIdx.y * blockDim.y + threadIdx.y; //Only valid threads perform memory I/O if ((beam < width) && (ray < height) && (ray % raySkips == 0)) { // Location of the image pixel const int depth_index = ray * depth_image_step / sizeof(float) + beam; const int normal_index = ray * normal_image_step / sizeof(float) + (3 * beam); const int rand_index = ray * rand_image_step / sizeof(float) + (2 * beam); const int reflectivity_index = ray * reflectivity_image_step / sizeof(float) + beam; // Input parameters for ray processing float distance = depth_image[depth_index] * 1.0f; float normal[3] = {normal_image[normal_index], normal_image[normal_index + 1], normal_image[normal_index + 2]}; // Beam pattern // only one column of rays for each beam at beam center, interference calculated later float azimuthBeamPattern = 1.0; float elevationBeamPattern = 1.0; // float elevationBeamPattern = abs(unnormalized_sinc(M_PI * 0.884 // / (beam_elevationAngleWidth) * sin(ray_elevationAngles[ray]))); // printf("angles %f", ray_elevationAngles[ray]); // incidence angle (taking that of normal_image) float incidence = acos(normal[2]); // compute_incidence(ray_azimuthAngle, ray_elevationAngle, normal); // ----- Point scattering model ------ // // Gaussian noise generated using opencv RNG float xi_z = rand_image[rand_index]; float xi_y = rand_image[rand_index + 1]; // Calculate amplitude thrust::complex<float> randomAmps = thrust::complex<float>(xi_z / sqrt(2.0), xi_y / sqrt(2.0)); thrust::complex<float> lambert_sqrt = thrust::complex<float>(sqrt(reflectivity_image[reflectivity_index]) * cos(incidence), 0.0); thrust::complex<float> beamPattern = thrust::complex<float>(azimuthBeamPattern * elevationBeamPattern, 0.0); thrust::complex<float> targetArea_sqrt = thrust::complex<float>(sqrt(distance * area_scaler), 0.0); thrust::complex<float> propagationTerm = thrust::complex<float>(1.0 / pow(distance, 2.0) * exp(-2.0 * attenuation * distance), 0.0); thrust::complex<float> amplitude = randomAmps * thrust::complex<float>(sourceTerm, 0.0) * propagationTerm * beamPattern * lambert_sqrt * targetArea_sqrt; // Max distance cut-off if (distance > maxDistance) amplitude = thrust::complex<float>(0.0, 0.0); // Summation of Echo returned from a signal (frequency domain) for (size_t f = 0; f < nFreq; f++) { float freq; if (nFreq % 2 == 0) freq = delta_f * (-nFreq / 2.0 + f*1.0f + 1.0); else freq = delta_f * (-(nFreq - 1) / 2.0 + f*1.0f + 1.0); float kw = 2.0 * M_PI * freq / soundSpeed; // wave vector // Transmit spectrum, frequency domain thrust::complex<float> kernel = exp(thrust::complex<float>(0.0f, 2.0f * distance * kw)) * amplitude; P_Beams[beam * nFreq * (int)(nRays / raySkips) + (int)(ray / raySkips) * nFreq + f] = thrust::complex<float>(kernel.real() , kernel.imag()); } } } /////////////////////////////////////////////////////////////////////////// namespace NpsGazeboSonar { // CUDA Device Checker Wrapper void check_cuda_init_wrapper(void) { // Check CUDA device cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(error)); exit(-1); } } // Sonar Claculation Function Wrapper CArray2D sonar_calculation_wrapper(const cv::Mat &depth_image, const cv::Mat &normal_image, const cv::Mat &rand_image, double _hPixelSize, double _vPixelSize, double _hFOV, double _vFOV, double _beam_azimuthAngleWidth, double _beam_elevationAngleWidth, double _ray_azimuthAngleWidth, float *_ray_elevationAngles, double _ray_elevationAngleWidth, double _soundSpeed, double _maxDistance, double _sourceLevel, int _nBeams, int _nRays, int _raySkips, double _sonarFreq, double _bandwidth, int _nFreq, const cv::Mat &reflectivity_image, double _attenuation, float *window, float **beamCorrector, float beamCorrectorSum, bool debugFlag) { auto start = std::chrono::high_resolution_clock::now(); auto stop = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start); if (debugFlag) start = std::chrono::high_resolution_clock::now(); // ---- Allocation of properties parameters ---- // const float hPixelSize = (float)_hPixelSize; const float vPixelSize = (float)_vPixelSize; const float hFOV = (float)_hFOV; const float vFOV = (float)_vFOV; const float beam_elevationAngleWidth = (float)_beam_elevationAngleWidth; const float beam_azimuthAngleWidth = (float)_beam_azimuthAngleWidth; const float ray_elevationAngleWidth = (float)_ray_elevationAngleWidth; const float ray_azimuthAngleWidth = (float)_ray_azimuthAngleWidth; const float soundSpeed = (float)_soundSpeed; const float maxDistance = (float)_maxDistance; const float sonarFreq = (float)_sonarFreq; const float bandwidth = (float)_bandwidth; const float attenuation = (float)_attenuation; const int nBeams = _nBeams; const int nRays = _nRays; const int nFreq = _nFreq; const int raySkips = _raySkips; //#######################################################// //############### Sonar Calculation ################// //#######################################################// // --------- Calculation parameters --------- // const float max_distance = maxDistance; // Signal const float delta_f = bandwidth/nFreq; // Precalculation const float area_scaler = ray_azimuthAngleWidth * ray_elevationAngleWidth; const float sourceLevel = (float)_sourceLevel; // db re 1 muPa; const float pref = 1e-6; // 1 micro pascal (muPa); const float sourceTerm = sqrt(pow(10, (sourceLevel / 10))) * pref; // source term // --------- Allocate GPU memory for image --------- // //Calculate total number of bytes of input and output image const int depth_image_Bytes = depth_image.step * depth_image.rows; const int normal_image_Bytes = normal_image.step * normal_image.rows; const int rand_image_Bytes = rand_image.step * rand_image.rows; const int reflectivity_image_Bytes = reflectivity_image.step * reflectivity_image.rows; const int ray_elevationAngles_Bytes = sizeof(float) * nRays; //Allocate device memory float *d_depth_image, *d_normal_image, *d_rand_image, *d_reflectivity_image, *ray_elevationAngles, *d_ray_elevationAngles; SAFE_CALL(cudaMalloc((void **)&d_depth_image, depth_image_Bytes), "CUDA Malloc Failed"); SAFE_CALL(cudaMalloc((void **)&d_normal_image, normal_image_Bytes), "CUDA Malloc Failed"); SAFE_CALL(cudaMalloc((void **)&d_rand_image, rand_image_Bytes), "CUDA Malloc Failed"); SAFE_CALL(cudaMalloc((void **)&d_reflectivity_image, reflectivity_image_Bytes), "CUDA Malloc Failed"); cudaMallocHost((void **)&ray_elevationAngles, ray_elevationAngles_Bytes); SAFE_CALL(cudaMalloc((void **)&d_ray_elevationAngles, ray_elevationAngles_Bytes), "CUDA Malloc Failed"); for (size_t ray = 0; ray < nRays; ray ++) ray_elevationAngles[ray] = _ray_elevationAngles[ray]; //Copy data from OpenCV input image to device memory SAFE_CALL(cudaMemcpy(d_depth_image, depth_image.ptr(), depth_image_Bytes, cudaMemcpyHostToDevice), "CUDA Memcpy Failed"); SAFE_CALL(cudaMemcpy(d_normal_image, normal_image.ptr(), normal_image_Bytes, cudaMemcpyHostToDevice),"CUDA Memcpy Failed"); SAFE_CALL(cudaMemcpy(d_rand_image, rand_image.ptr(), rand_image_Bytes, cudaMemcpyHostToDevice),"CUDA Memcpy Failed"); SAFE_CALL(cudaMemcpy(d_reflectivity_image, reflectivity_image.ptr(), reflectivity_image_Bytes, cudaMemcpyHostToDevice), "CUDA Memcpy Failed"); SAFE_CALL(cudaMemcpy(d_ray_elevationAngles, ray_elevationAngles, ray_elevationAngles_Bytes, cudaMemcpyHostToDevice), "CUDA Memcpy Failed"); //Specify a reasonable block size const dim3 block(BLOCK_SIZE, BLOCK_SIZE); //Calculate grid size to cover the whole image const dim3 grid((depth_image.cols + block.x - 1) / block.x, (depth_image.rows + block.y - 1) / block.y); // Beam data array thrust::complex<float> *P_Beams; thrust::complex<float> *d_P_Beams; const int P_Beams_N = nBeams * (int)(nRays / raySkips) * (nFreq + 1); const int P_Beams_Bytes = sizeof(thrust::complex<float>) * P_Beams_N; SAFE_CALL(cudaMallocHost((void **)&P_Beams, P_Beams_Bytes), "CUDA Malloc Failed"); SAFE_CALL(cudaMalloc((void **)&d_P_Beams, P_Beams_Bytes), "CUDA Malloc Failed"); //Launch the beamor conversion kernel sonar_calculation<<<grid, block>>>(d_P_Beams, d_depth_image, d_normal_image, normal_image.cols, normal_image.rows, depth_image.step, normal_image.step, d_rand_image, rand_image.step, d_reflectivity_image, reflectivity_image.step, hPixelSize, vPixelSize, hFOV, vFOV, beam_azimuthAngleWidth, beam_elevationAngleWidth, ray_azimuthAngleWidth, d_ray_elevationAngles, ray_elevationAngleWidth, soundSpeed, sourceTerm, nBeams, nRays, raySkips, sonarFreq, delta_f, nFreq, bandwidth, max_distance, attenuation, area_scaler); //Synchronize to check for any kernel launch errors SAFE_CALL(cudaDeviceSynchronize(), "Kernel Launch Failed"); //Copy back data from destination device meory to OpenCV output image SAFE_CALL(cudaMemcpy(P_Beams, d_P_Beams, P_Beams_Bytes, cudaMemcpyDeviceToHost), "CUDA Memcpy Failed"); // Free GPU memory cudaFree(d_depth_image); cudaFree(d_normal_image); cudaFree(d_rand_image); cudaFree(d_reflectivity_image); cudaFree(d_P_Beams); cudaFree(d_ray_elevationAngles); cudaFreeHost(ray_elevationAngles); // For calc time measure if (debugFlag) { stop = std::chrono::high_resolution_clock::now(); duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start); printf("GPU Sonar Computation Time %lld/100 [s]\n", static_cast<long long int>(duration.count() / 10000)); start = std::chrono::high_resolution_clock::now(); } //########################################################// //######### Summation, Culling and windowing #########// //########################################################// // Preallocate an array for return CArray2D P_Beams_F(CArray(nFreq), nBeams); // GPU grids and rows unsigned int grid_rows, grid_cols; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // GPU Ray summation using column sum float *P_Ray_real, *P_Ray_imag; float *d_P_Ray_real, *d_P_Ray_imag; const int P_Ray_N = (int)(nRays / raySkips) * (nFreq); const int P_Ray_Bytes = sizeof(float) * P_Ray_N; float *P_Ray_F_real, *P_Ray_F_imag; float *d_P_Ray_F_real, *d_P_Ray_F_imag; const int P_Ray_F_N = (nFreq)*1; const int P_Ray_F_Bytes = sizeof(float) * P_Ray_F_N; cudaMallocHost((void **)&P_Ray_real, P_Ray_Bytes); cudaMallocHost((void **)&P_Ray_imag, P_Ray_Bytes); cudaMallocHost((void **)&P_Ray_F_real, P_Ray_F_Bytes); cudaMallocHost((void **)&P_Ray_F_imag, P_Ray_F_Bytes); SAFE_CALL(cudaMalloc((void **)&d_P_Ray_real, P_Ray_Bytes), "CUDA Malloc Failed"); SAFE_CALL(cudaMalloc((void **)&d_P_Ray_imag, P_Ray_Bytes), "CUDA Malloc Failed"); SAFE_CALL(cudaMalloc((void **)&d_P_Ray_F_real, P_Ray_F_Bytes), "CUDA Malloc Failed"); SAFE_CALL(cudaMalloc((void **)&d_P_Ray_F_imag, P_Ray_F_Bytes), "CUDA Malloc Failed"); dim3 dimGrid_Ray((nFreq + BLOCK_SIZE - 1) / BLOCK_SIZE); for (size_t beam = 0; beam < nBeams; beam ++) { for (size_t ray = 0; ray < (int)(nRays / raySkips); ray++) { for (size_t f = 0; f < nFreq; f++) { P_Ray_real[ray * nFreq + f] = P_Beams[beam * nFreq * (int)(nRays / raySkips) + ray * nFreq + f].real(); P_Ray_imag[ray * nFreq + f] = P_Beams[beam * nFreq * (int)(nRays / raySkips) + ray * nFreq + f].imag(); } } SAFE_CALL(cudaMemcpy(d_P_Ray_real, P_Ray_real, P_Ray_Bytes, cudaMemcpyHostToDevice), "CUDA Memcpy Failed"); SAFE_CALL(cudaMemcpy(d_P_Ray_imag, P_Ray_imag, P_Ray_Bytes, cudaMemcpyHostToDevice), "CUDA Memcpy Failed"); column_sums_reduce<<<dimGrid_Ray, dimBlock>>>(d_P_Ray_real, d_P_Ray_F_real, nFreq, (int)(nRays / raySkips)); column_sums_reduce<<<dimGrid_Ray, dimBlock>>>(d_P_Ray_imag, d_P_Ray_F_imag, nFreq, (int)(nRays / raySkips)); SAFE_CALL(cudaMemcpy(P_Ray_F_real, d_P_Ray_F_real, P_Ray_F_Bytes, cudaMemcpyDeviceToHost), "CUDA Memcpy Failed"); SAFE_CALL(cudaMemcpy(P_Ray_F_imag, d_P_Ray_F_imag, P_Ray_F_Bytes, cudaMemcpyDeviceToHost), "CUDA Memcpy Failed"); SAFE_CALL(cudaDeviceSynchronize(), "Kernel Launch Failed"); for (size_t f = 0; f < nFreq; f++) P_Beams_F[beam][f] = Complex(P_Ray_F_real[f], P_Ray_F_imag[f]); } // free memory cudaFreeHost(P_Beams); cudaFreeHost(P_Ray_real); cudaFreeHost(P_Ray_imag); cudaFreeHost(P_Ray_F_real); cudaFreeHost(P_Ray_F_imag); cudaFree(d_P_Ray_real); cudaFree(d_P_Ray_imag); cudaFree(d_P_Ray_F_real); cudaFree(d_P_Ray_F_imag); if (debugFlag) { stop = std::chrono::high_resolution_clock::now(); duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start); printf("Sonar Ray Summation %lld/100 [s]\n", static_cast<long long int>(duration.count() / 10000)); start = std::chrono::high_resolution_clock::now(); } // -------------- Beam culling correction -----------------// // beamCorrector and beamCorrectorSum is precalculated at parent cpp float *P_Beams_Cor_real, *P_Beams_Cor_imag; // float *P_Beams_Cor_F_real, *P_Beams_Cor_F_imag; float *P_Beams_Cor_real_tmp, *P_Beams_Cor_imag_tmp; float *d_P_Beams_Cor_real, *d_P_Beams_Cor_imag; float *d_P_Beams_Cor_F_real, *d_P_Beams_Cor_F_imag; const int P_Beams_Cor_N = nBeams * nFreq; const int P_Beams_Cor_Bytes = sizeof(float) * P_Beams_Cor_N; cudaMallocHost((void **)&P_Beams_Cor_real, P_Beams_Cor_Bytes); cudaMallocHost((void **)&P_Beams_Cor_imag, P_Beams_Cor_Bytes); cudaMallocHost((void **)&P_Beams_Cor_real_tmp, P_Beams_Cor_Bytes); cudaMallocHost((void **)&P_Beams_Cor_imag_tmp, P_Beams_Cor_Bytes); // cudaMallocHost((void **)&P_Beams_Cor_F_real, P_Beams_Cor_Bytes); // cudaMallocHost((void **)&P_Beams_Cor_F_imag, P_Beams_Cor_Bytes); SAFE_CALL(cudaMalloc((void **)&d_P_Beams_Cor_real, P_Beams_Cor_Bytes), "CUDA Malloc Failed"); SAFE_CALL(cudaMalloc((void **)&d_P_Beams_Cor_imag, P_Beams_Cor_Bytes), "CUDA Malloc Failed"); SAFE_CALL(cudaMalloc((void **)&d_P_Beams_Cor_F_real, P_Beams_Cor_Bytes), "CUDA Malloc Failed"); SAFE_CALL(cudaMalloc((void **)&d_P_Beams_Cor_F_imag, P_Beams_Cor_Bytes), "CUDA Malloc Failed"); float *beamCorrector_lin, *d_beamCorrector_lin; const int beamCorrector_lin_N = nBeams * nBeams; const int beamCorrector_lin_Bytes = sizeof(float) * beamCorrector_lin_N; cudaMallocHost((void **)&beamCorrector_lin, beamCorrector_lin_Bytes); SAFE_CALL(cudaMalloc((void **)&d_beamCorrector_lin, beamCorrector_lin_Bytes), "CUDA Malloc Failed"); // (nfreq x nBeams) * (nBeams x nBeams) = (nfreq x nBeams) for (size_t beam = 0; beam < nBeams; beam ++) { for (size_t f = 0; f < nFreq; f++) { P_Beams_Cor_real[f * nBeams + beam] = P_Beams_F[beam][f].real() * 1.0f; P_Beams_Cor_imag[f * nBeams + beam] = P_Beams_F[beam][f].imag() * 1.0f; } for (size_t beam_other = 0; beam_other < nBeams; beam_other ++) beamCorrector_lin[beam_other * nBeams + beam] = beamCorrector[beam][beam_other]; } SAFE_CALL(cudaMemcpy(d_P_Beams_Cor_real, P_Beams_Cor_real, P_Beams_Cor_Bytes, cudaMemcpyHostToDevice), "CUDA Memcpy Failed"); SAFE_CALL(cudaMemcpy(d_P_Beams_Cor_imag, P_Beams_Cor_imag, P_Beams_Cor_Bytes, cudaMemcpyHostToDevice), "CUDA Memcpy Failed"); SAFE_CALL(cudaMemcpy(d_beamCorrector_lin, beamCorrector_lin, beamCorrector_lin_Bytes, cudaMemcpyHostToDevice), "CUDA Memcpy Failed"); grid_rows = (nFreq + BLOCK_SIZE - 1) / BLOCK_SIZE; grid_cols = (nBeams + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid_Beam(grid_cols, grid_rows); gpu_matrix_mult<<<dimGrid_Beam, dimBlock>>>(d_P_Beams_Cor_real, d_beamCorrector_lin, d_P_Beams_Cor_F_real, nFreq, nBeams, nBeams); SAFE_CALL(cudaDeviceSynchronize(), "Kernel Launch Failed"); gpu_matrix_mult<<<dimGrid_Beam, dimBlock>>>(d_P_Beams_Cor_imag, d_beamCorrector_lin, d_P_Beams_Cor_F_imag, nFreq, nBeams, nBeams); SAFE_CALL(cudaDeviceSynchronize(), "Kernel Launch Failed"); //Copy back data from destination device meory SAFE_CALL(cudaMemcpy(P_Beams_Cor_real_tmp, d_P_Beams_Cor_F_real, P_Beams_Cor_Bytes, cudaMemcpyDeviceToHost), "CUDA Memcpy Failed"); SAFE_CALL(cudaMemcpy(P_Beams_Cor_imag_tmp, d_P_Beams_Cor_F_imag, P_Beams_Cor_Bytes, cudaMemcpyDeviceToHost), "CUDA Memcpy Failed"); SAFE_CALL(cudaDeviceSynchronize(), "Kernel Launch Failed"); // Return for (size_t beam = 0; beam < nBeams; beam ++) for (size_t f = 0; f < nFreq; f++) P_Beams_F[beam][f] = Complex(P_Beams_Cor_real_tmp[f * nBeams + beam] / beamCorrectorSum, P_Beams_Cor_imag_tmp[f * nBeams + beam] / beamCorrectorSum); // Free memory cudaFree(d_P_Beams_Cor_imag); cudaFree(d_P_Beams_Cor_real); cudaFree(d_P_Beams_Cor_F_imag); cudaFree(d_P_Beams_Cor_F_real); cudaFree(d_beamCorrector_lin); cudaFreeHost(P_Beams_Cor_real); cudaFreeHost(P_Beams_Cor_imag); cudaFreeHost(P_Beams_Cor_real_tmp); cudaFreeHost(P_Beams_Cor_imag_tmp); cudaFreeHost(beamCorrector_lin); // For calc time measure if (debugFlag) { stop = std::chrono::high_resolution_clock::now(); duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start); printf("GPU Window & Correction %lld/100 [s]\n", static_cast<long long int>(duration.count() / 10000)); start = std::chrono::high_resolution_clock::now(); } //#################################################// //################### FFT #####################// //#################################################// SAFE_CALL(cudaDeviceSynchronize(), "Kernel Launch Failed"); const int DATASIZE = nFreq; const int BATCH = nBeams; // --- Host side input data allocation and initialization cufftComplex *hostInputData = (cufftComplex *)malloc( DATASIZE * BATCH * sizeof(cufftComplex)); for (int beam = 0; beam < BATCH; beam++) { for (int f = 0; f < DATASIZE; f++) { if (f < nFreq) hostInputData[beam * DATASIZE + f] = make_cuComplex(P_Beams_F[beam][f].real() * 1.0f, P_Beams_F[beam][f].imag() * 1.0f); else hostInputData[beam * DATASIZE + f] = (make_cuComplex(0.f, 0.f)); // zero padding } } // --- Device side input data allocation and initialization cufftComplex *deviceInputData; SAFE_CALL(cudaMalloc((void **)&deviceInputData, DATASIZE * BATCH * sizeof(cufftComplex)), "FFT CUDA Malloc Failed"); SAFE_CALL(cudaMemcpy(deviceInputData, hostInputData, DATASIZE * BATCH * sizeof(cufftComplex), cudaMemcpyHostToDevice), "FFT CUDA Memcopy Failed"); // --- Host side output data allocation cufftComplex *hostOutputData = (cufftComplex *)malloc(DATASIZE * BATCH * sizeof(cufftComplex)); // --- Device side output data allocation cufftComplex *deviceOutputData; cudaMalloc((void **)&deviceOutputData, DATASIZE * BATCH * sizeof(cufftComplex)); // --- Batched 1D FFTs cufftHandle handle; int rank = 1; // --- 1D FFTs int n[] = {DATASIZE}; // --- Size of the Fourier transform // --- Distance between two successive input/output elements int istride = 1, ostride = 1; int idist = DATASIZE, odist = DATASIZE; // --- Distance between batches // --- Input/Output size with pitch (ignored for 1D transforms) int inembed[] = {0}; int onembed[] = {0}; int batch = BATCH; // --- Number of batched executions cufftPlanMany(&handle, rank, n, inembed, istride, idist, onembed, ostride, odist, CUFFT_C2C, batch); cufftExecC2C(handle, deviceInputData, deviceOutputData, CUFFT_FORWARD); // --- Device->Host copy of the results SAFE_CALL(cudaMemcpy(hostOutputData, deviceOutputData, DATASIZE * BATCH * sizeof(cufftComplex), cudaMemcpyDeviceToHost), "FFT CUDA Memcopy Failed"); cufftDestroy(handle); cudaFree(deviceOutputData); cudaFree(deviceInputData); free(hostInputData); free(hostOutputData); for (int beam = 0; beam < BATCH; beam++) { for (int f = 0; f < nFreq; f++) { P_Beams_F[beam][f] = Complex(hostOutputData[beam * DATASIZE + f].x * delta_f, hostOutputData[beam * DATASIZE + f].y * delta_f); } } // For calc time measure if (debugFlag) { stop = std::chrono::high_resolution_clock::now(); duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start); printf("GPU FFT Calc Time %lld/100 [s]\n", static_cast<long long int>(duration.count() / 10000)); } return P_Beams_F; } } // namespace NpsGazeboSonar
14f00c86d077781a1865358b27be677b65cc972d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../include/cuda_bordering.h" __global__ void bordering_rect(uchar *d_input, int height, int width, int top, int bottom, int left, int right, uchar color, uchar *d_output) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; // each thread handle 4 pixels for (int i = row; i < height; i += blockDim.y*gridDim.y) // stride by 4 byte for (int j = col; j < width; j += blockDim.x*gridDim.x) { if (i < top || height - i < bottom || j < left || width - j < right) d_output[i*width + j] = color; // top or bottom else d_output[i*width + j] = d_input[i*width + j]; } } void cudaBordering(const cv::Mat & input, int top, int bottom, int left, int right, uchar color, cv::Mat & output) { output = cv::Mat(input.size(), CV_8U, cv::Scalar(0)); // define block size and dim3 block_size(THREAD_MULTIPLE, 6); // divide the image into 16 grids, smaller grid do more things, improve performance a lot. dim3 grid_size(input.cols / (4 * block_size.x), input.rows / (4 * block_size.y)); uchar *d_input, *d_output; hipStream_t stream; CUDA_CALL(hipStreamCreate(&stream)); CUDA_CALL(hipMalloc(&d_input, sizeof(uchar)*input.cols*input.rows)); CUDA_CALL(hipMemcpyAsync(d_input, input.data, sizeof(uchar)*input.cols*input.rows, hipMemcpyHostToDevice, stream)); CUDA_CALL(hipMalloc(&d_output, sizeof(uchar)*input.cols*input.rows)); // calling kernel hipLaunchKernelGGL(( bordering_rect) , dim3(grid_size), dim3(block_size), 0, stream, d_input, input.rows, input.cols, top, bottom, left, right, color, d_output); CUDA_CALL(hipDeviceSynchronize()); CUDA_CALL(hipMemcpy(output.data, d_output, sizeof(uchar)*output.cols*output.rows, hipMemcpyDeviceToHost)); // resources releasing CUDA_CALL(hipStreamDestroy(stream)); CUDA_CALL(hipFree(d_input)); CUDA_CALL(hipFree(d_output)); } __global__ void bordering_circle(uchar *d_input, int height, int width, float radius, uchar color, uchar *d_output) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; // each thread handle 4 pixels for (int i = row; i < height; i += blockDim.y*gridDim.y) // stride by 4 byte for (int j = col; j < width; j += blockDim.x*gridDim.x) { float x = j - width / 2, y = height / 2 - i; float circle = powf(x, 2.0f) + powf(y, 2.0f); float r2 = powf(radius, 2.0f); if (circle <= r2) d_output[i*width + j] = d_input[i*width + j]; else d_output[i*width + j] = color; } } void cudaBorderingCircle(const cv::Mat & input, float radius, uchar color, cv::Mat & output) { output = cv::Mat(input.size(), CV_8U, cv::Scalar(0)); // define block size and dim3 block_size(THREAD_MULTIPLE, 6); // divide the image into 16 grids, smaller grid do more things, improve performance a lot. dim3 grid_size(input.cols / (4 * block_size.x), input.rows / (4 * block_size.y)); uchar *d_input, *d_output; hipStream_t stream; CUDA_CALL(hipStreamCreate(&stream)); CUDA_CALL(hipMalloc(&d_input, sizeof(uchar)*input.cols*input.rows)); CUDA_CALL(hipMemcpyAsync(d_input, input.data, sizeof(uchar)*input.cols*input.rows, hipMemcpyHostToDevice, stream)); CUDA_CALL(hipMalloc(&d_output, sizeof(uchar)*input.cols*input.rows)); // calling kernel hipLaunchKernelGGL(( bordering_circle) , dim3(grid_size), dim3(block_size), 0, stream , d_input, input.rows, input.cols, radius, color, d_output); CUDA_CALL(hipDeviceSynchronize()); CUDA_CALL(hipMemcpy(output.data, d_output, sizeof(uchar)*output.cols*output.rows, hipMemcpyDeviceToHost)); // resources releasing CUDA_CALL(hipStreamDestroy(stream)); CUDA_CALL(hipFree(d_input)); CUDA_CALL(hipFree(d_output)); } __global__ void bordering_ellipse(uchar *d_input, int height, int width, float radius_x, float radius_y, uchar color, uchar *d_output) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; // each thread handle 4 pixels for (int i = row; i < height; i += blockDim.y*gridDim.y) // stride by 4 byte for (int j = col; j < width; j += blockDim.x*gridDim.x) { float x = j - width / 2, y = height / 2 - i; float x_2 = powf(x, 2.0f); float y_2 = powf(y, 2.0f); float rx_2 = powf(radius_x, 2.0f); float ry_2 = powf(radius_y, 2.0f); float ellipse = x_2 / rx_2 + y_2 / ry_2; if (ellipse <= 1.0f) d_output[i*width + j] = d_input[i*width + j]; else d_output[i*width + j] = color; } } void cudaBorderingEllipse(const cv::Mat & input, float radius_x, float radius_y, uchar color, cv::Mat & output) { output = cv::Mat(input.size(), CV_8U, cv::Scalar(0)); // define block size and dim3 block_size(THREAD_MULTIPLE, 6); // divide the image into 16 grids, smaller grid do more things, improve performance a lot. dim3 grid_size(input.cols / (4 * block_size.x), input.rows / (4 * block_size.y)); uchar *d_input, *d_output; hipStream_t stream; CUDA_CALL(hipStreamCreate(&stream)); CUDA_CALL(hipMalloc(&d_input, sizeof(uchar)*input.cols*input.rows)); CUDA_CALL(hipMemcpyAsync(d_input, input.data, sizeof(uchar)*input.cols*input.rows, hipMemcpyHostToDevice, stream)); CUDA_CALL(hipMalloc(&d_output, sizeof(uchar)*input.cols*input.rows)); // calling kernel hipLaunchKernelGGL(( bordering_ellipse) , dim3(grid_size), dim3(block_size), 0, stream, d_input, input.rows, input.cols, radius_x, radius_y, color, d_output); CUDA_CALL(hipDeviceSynchronize()); CUDA_CALL(hipMemcpy(output.data, d_output, sizeof(uchar)*output.cols*output.rows, hipMemcpyDeviceToHost)); // resources releasing CUDA_CALL(hipStreamDestroy(stream)); CUDA_CALL(hipFree(d_input)); CUDA_CALL(hipFree(d_output)); }
14f00c86d077781a1865358b27be677b65cc972d.cu
#include "../include/cuda_bordering.h" __global__ void bordering_rect(uchar *d_input, int height, int width, int top, int bottom, int left, int right, uchar color, uchar *d_output) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; // each thread handle 4 pixels for (int i = row; i < height; i += blockDim.y*gridDim.y) // stride by 4 byte for (int j = col; j < width; j += blockDim.x*gridDim.x) { if (i < top || height - i < bottom || j < left || width - j < right) d_output[i*width + j] = color; // top or bottom else d_output[i*width + j] = d_input[i*width + j]; } } void cudaBordering(const cv::Mat & input, int top, int bottom, int left, int right, uchar color, cv::Mat & output) { output = cv::Mat(input.size(), CV_8U, cv::Scalar(0)); // define block size and dim3 block_size(THREAD_MULTIPLE, 6); // divide the image into 16 grids, smaller grid do more things, improve performance a lot. dim3 grid_size(input.cols / (4 * block_size.x), input.rows / (4 * block_size.y)); uchar *d_input, *d_output; cudaStream_t stream; CUDA_CALL(cudaStreamCreate(&stream)); CUDA_CALL(cudaMalloc(&d_input, sizeof(uchar)*input.cols*input.rows)); CUDA_CALL(cudaMemcpyAsync(d_input, input.data, sizeof(uchar)*input.cols*input.rows, cudaMemcpyHostToDevice, stream)); CUDA_CALL(cudaMalloc(&d_output, sizeof(uchar)*input.cols*input.rows)); // calling kernel bordering_rect <<<grid_size, block_size, 0, stream>>> (d_input, input.rows, input.cols, top, bottom, left, right, color, d_output); CUDA_CALL(cudaDeviceSynchronize()); CUDA_CALL(cudaMemcpy(output.data, d_output, sizeof(uchar)*output.cols*output.rows, cudaMemcpyDeviceToHost)); // resources releasing CUDA_CALL(cudaStreamDestroy(stream)); CUDA_CALL(cudaFree(d_input)); CUDA_CALL(cudaFree(d_output)); } __global__ void bordering_circle(uchar *d_input, int height, int width, float radius, uchar color, uchar *d_output) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; // each thread handle 4 pixels for (int i = row; i < height; i += blockDim.y*gridDim.y) // stride by 4 byte for (int j = col; j < width; j += blockDim.x*gridDim.x) { float x = j - width / 2, y = height / 2 - i; float circle = powf(x, 2.0f) + powf(y, 2.0f); float r2 = powf(radius, 2.0f); if (circle <= r2) d_output[i*width + j] = d_input[i*width + j]; else d_output[i*width + j] = color; } } void cudaBorderingCircle(const cv::Mat & input, float radius, uchar color, cv::Mat & output) { output = cv::Mat(input.size(), CV_8U, cv::Scalar(0)); // define block size and dim3 block_size(THREAD_MULTIPLE, 6); // divide the image into 16 grids, smaller grid do more things, improve performance a lot. dim3 grid_size(input.cols / (4 * block_size.x), input.rows / (4 * block_size.y)); uchar *d_input, *d_output; cudaStream_t stream; CUDA_CALL(cudaStreamCreate(&stream)); CUDA_CALL(cudaMalloc(&d_input, sizeof(uchar)*input.cols*input.rows)); CUDA_CALL(cudaMemcpyAsync(d_input, input.data, sizeof(uchar)*input.cols*input.rows, cudaMemcpyHostToDevice, stream)); CUDA_CALL(cudaMalloc(&d_output, sizeof(uchar)*input.cols*input.rows)); // calling kernel bordering_circle <<<grid_size, block_size, 0, stream >>> (d_input, input.rows, input.cols, radius, color, d_output); CUDA_CALL(cudaDeviceSynchronize()); CUDA_CALL(cudaMemcpy(output.data, d_output, sizeof(uchar)*output.cols*output.rows, cudaMemcpyDeviceToHost)); // resources releasing CUDA_CALL(cudaStreamDestroy(stream)); CUDA_CALL(cudaFree(d_input)); CUDA_CALL(cudaFree(d_output)); } __global__ void bordering_ellipse(uchar *d_input, int height, int width, float radius_x, float radius_y, uchar color, uchar *d_output) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; // each thread handle 4 pixels for (int i = row; i < height; i += blockDim.y*gridDim.y) // stride by 4 byte for (int j = col; j < width; j += blockDim.x*gridDim.x) { float x = j - width / 2, y = height / 2 - i; float x_2 = powf(x, 2.0f); float y_2 = powf(y, 2.0f); float rx_2 = powf(radius_x, 2.0f); float ry_2 = powf(radius_y, 2.0f); float ellipse = x_2 / rx_2 + y_2 / ry_2; if (ellipse <= 1.0f) d_output[i*width + j] = d_input[i*width + j]; else d_output[i*width + j] = color; } } void cudaBorderingEllipse(const cv::Mat & input, float radius_x, float radius_y, uchar color, cv::Mat & output) { output = cv::Mat(input.size(), CV_8U, cv::Scalar(0)); // define block size and dim3 block_size(THREAD_MULTIPLE, 6); // divide the image into 16 grids, smaller grid do more things, improve performance a lot. dim3 grid_size(input.cols / (4 * block_size.x), input.rows / (4 * block_size.y)); uchar *d_input, *d_output; cudaStream_t stream; CUDA_CALL(cudaStreamCreate(&stream)); CUDA_CALL(cudaMalloc(&d_input, sizeof(uchar)*input.cols*input.rows)); CUDA_CALL(cudaMemcpyAsync(d_input, input.data, sizeof(uchar)*input.cols*input.rows, cudaMemcpyHostToDevice, stream)); CUDA_CALL(cudaMalloc(&d_output, sizeof(uchar)*input.cols*input.rows)); // calling kernel bordering_ellipse <<<grid_size, block_size, 0, stream>>> (d_input, input.rows, input.cols, radius_x, radius_y, color, d_output); CUDA_CALL(cudaDeviceSynchronize()); CUDA_CALL(cudaMemcpy(output.data, d_output, sizeof(uchar)*output.cols*output.rows, cudaMemcpyDeviceToHost)); // resources releasing CUDA_CALL(cudaStreamDestroy(stream)); CUDA_CALL(cudaFree(d_input)); CUDA_CALL(cudaFree(d_output)); }
7c0e2c5341e9e4f9355013018da248037c2a3b7c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime_api.h> #include <hiprand/hiprand.h> #include "hiprand/hiprand_kernel.h" #include <assert.h> // L should be (multiple of (THR_NUMBER - 2) ) + 2 #define L 114 const int AREA = L*L; const int NTOT = (L-2)*(L-2); // #define T 6. // #define T 0.1 // #define T 2.26918531421 #define T_CYCLE_START 1.5 #define T_CYCLE_END 3.0 #define T_CYCLE_STEP 0.04 #define SINGLETEMP 3.5 int n_temps = ( T_CYCLE_END - T_CYCLE_START )/ (T_CYCLE_STEP); #define J 1. #define SEED 1000 struct measure_plan { int steps_repeat; int t_max_sim; int t_measure_wait; int t_measure_interval; } static PLAN = { .steps_repeat = 1, .t_max_sim = 251, .t_measure_wait = 50, .t_measure_interval = 20 }; // print history true/false #define HISTORY 1 const int THR_NUMBER = 30; const int BLOCK_NUMBER = ( L-2)/( THR_NUMBER - 2 ); const dim3 BLOCKS( BLOCK_NUMBER, BLOCK_NUMBER ); const dim3 THREADS( THR_NUMBER, THR_NUMBER ); // average tracker struct struct avg_tr { float sum; float sum_squares; int n; }; struct avg_tr new_avg_tr(int locn) { struct avg_tr a = { .sum = 0, .sum_squares = 0, .n = locn}; return a; } void update_avg(struct avg_tr * tr_p, float newval) { tr_p->sum += newval; tr_p->sum_squares += (newval*newval); } // __device__ static inline void dev_update_avg(struct avg_tr * tr_p, float newval) { // tr_p->sum += newval; // tr_p->sum_squares += (newval*newval); // } float average( struct avg_tr tr) { return (tr.sum)/((float) tr.n) ; } float stdev( struct avg_tr tr) { return sqrt( ( tr.sum_squares)/((float) tr.n) - pow(( (tr.sum)/((float) tr.n) ),2) ); } float variance( struct avg_tr tr) { return ( ( tr.sum_squares)/((float) tr.n) - pow(( (tr.sum)/((float) tr.n) ),2) ); } // RNG init kernel __global__ void initRNG(hiprandState_t * const rngStates, const unsigned int seed) { // Determine thread ID int blockId = blockIdx.x+ blockIdx.y * gridDim.x; int tid = blockId * (blockDim.x * blockDim.y)+ (threadIdx.y * blockDim.x)+ threadIdx.x; // Initialise the RNG hiprand_init(seed, tid, 0, &rngStates[tid]); } // static inline float unitrand(){ // return (float)rand() / (float)RAND_MAX; // } __device__ static inline float dev_unitrand( hiprandState_t * const rngStates, unsigned int tid ){ hiprandState_t localState = rngStates[tid]; float val = hiprand_uniform(&localState); rngStates[tid] = localState; return val; } void init_random(char grid[L*L]) { for(int x = 0; x<L; x++) { for(int y = 0; y<L; y++) { grid[x+y*L] = rand() & 1; } } } void init_t0(char grid[L*L]) { for(int x = 0; x<L; x++) { for(int y = 0; y<L; y++) { grid[x+y*L] = 0; } } } void dump(char grid[L*L]) { for(int x = 0; x<L; x++) { for(int y = 0; y<L; y++) { // if(grid[x+y*L] == 0) printf(""); // else printf(""); if(grid[x+y*L] == 0) printf(" "); else printf(""); // printf("%i", grid[x+y*L]); } printf("\n"); } printf("\n"); } __device__ void dev_dump(char grid[L*L]) { for(int x = 0; x<L; x++) { for(int y = 0; y<L; y++) { // if(grid[x+y*L] == 0) printf(""); // else printf(""); if(grid[x+y*L] == 0) printf(" "); else printf(""); // printf("%i", grid[x+y*L]); } printf("\n"); } printf("\n"); } struct coords { int x; int y; }; __device__ static inline coords dev_get_thread_coords() { struct coords thread_coords; thread_coords.x = blockIdx.x*( THR_NUMBER - 2 ) + ( threadIdx.x ) ; thread_coords.y = blockIdx.y*( THR_NUMBER - 2 ) + ( threadIdx.y ) ; return thread_coords; } // can segfault __device__ static inline char dev_shared_grid_step(char shared_grid[THR_NUMBER*THR_NUMBER], int x, int y, int xstep, int ystep) { return shared_grid[(x+xstep) + (y+ystep)*THR_NUMBER]; } // segfault if applied to an edge spin, call only on the inner THR_NUMBER-1 grid __device__ void dev_update_spin_shared(char dev_shared_grid[ THR_NUMBER*THR_NUMBER ], int x, int y , hiprandState_t * const rngStates, unsigned int tid, double temperature ) { char s0 = dev_shared_grid[x+y*THR_NUMBER]; char j1 = s0 ^ dev_shared_grid_step(dev_shared_grid, x, y, 1, 0); char j2 = s0 ^ dev_shared_grid_step(dev_shared_grid, x, y, -1, 0); char j3 = s0 ^ dev_shared_grid_step(dev_shared_grid, x, y, 0, 1); char j4 = s0 ^ dev_shared_grid_step(dev_shared_grid, x, y, 0, -1); float dh = (float) ( -((j1 + j2 + j3 + j4) *2 -4)*2*J ); float p = exp( -dh / temperature); float ur = dev_unitrand(rngStates, tid); if(ur < p ) { dev_shared_grid[x+y*THR_NUMBER] = !dev_shared_grid[x+y*THR_NUMBER]; } } __device__ void dev_update_grid_shared(char grid[L*L], hiprandState_t * const rngStates, double temperature ) { // the first argument here is the GLOBAL grid // thread coords relative to the GLOBAL grid struct coords glob_coords = dev_get_thread_coords(); int glob_x = glob_coords.x; int glob_y = glob_coords.y; // Determine thread ID (for RNG) int blockId = blockIdx.x+ blockIdx.y * gridDim.x; int tid = blockId * (blockDim.x * blockDim.y)+ (threadIdx.y * blockDim.x)+ threadIdx.x; __shared__ char shared_grid[ THR_NUMBER*THR_NUMBER ]; shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] = grid[(glob_x )+ (glob_y )*L ]; // check formulas __syncthreads(); // thread coords relative to the shared grid int shared_x = threadIdx.x; int shared_y = threadIdx.y; // macro-checkboards // macro-white if( (blockIdx.x + (blockIdx.y)%2)%2 == 0 ) { ///////////// // checkboards // update only in the inner 30x30 block of threads, because the edge threads aren't mapped to any grid spins if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 && threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) { // white if( (glob_x + glob_y%2)%2 == 0 ) { dev_update_spin_shared( shared_grid, shared_x, shared_y, rngStates, tid, temperature ); } } __syncthreads(); if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 && threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) { // black if( (glob_x + glob_y%2)%2 == 1 ) { dev_update_spin_shared( shared_grid, shared_x, shared_y, rngStates, tid, temperature ); } } __syncthreads(); if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 && threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) { grid[(glob_x )+ (glob_y )*L ] = shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] ; } ////////// } __syncthreads(); // macro-black if( (blockIdx.x + (blockIdx.y)%2)%2 == 1 ) { ////////// // checkboards // update only in the inner 30x30 block of threads, because the edge threads aren't mapped to any grid spins if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 && threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) { // white if( (glob_x + glob_y%2)%2 == 0 ) { dev_update_spin_shared( shared_grid, shared_x, shared_y, rngStates, tid, temperature ); } } __syncthreads(); if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 && threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) { // black if( (glob_x + glob_y%2)%2 == 1 ) { dev_update_spin_shared( shared_grid, shared_x, shared_y, rngStates, tid, temperature ); } } __syncthreads(); if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 && threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) { grid[(glob_x )+ (glob_y )*L ] = shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] ; } ////////// } } __device__ void dev_update_magnetization_tracker(char dev_grid[L*L], float * dev_single_run_avg, int * dev_partial_res ) { struct coords glob_coords = dev_get_thread_coords(); int glob_x = glob_coords.x; int glob_y = glob_coords.y; if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 && threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) { int spin = (int) dev_grid[glob_x+glob_y*L]; atomicAdd(dev_partial_res, spin ); } __syncthreads(); if ( blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0 && threadIdx.y == 0) { float val = ( ((float) (*dev_partial_res) *2 ) - NTOT ) / (float) NTOT; /*fl__*/ *dev_single_run_avg += val; *dev_partial_res = 0; } } __global__ /*fl__*/ void dev_measure_cycle_kernel(struct measure_plan pl, char * dev_grid, hiprandState_t * const rngStates, float * dev_single_run_avg, int * dev_partial_res , double temperature ) { // INNER SIM LOOPS int ksim=0; for( ; ksim<pl.t_measure_wait; ksim++) { dev_update_grid_shared(dev_grid, rngStates, temperature); } // end thermalization for( ; ksim<pl.t_max_sim; ksim++) { dev_update_grid_shared(dev_grid, rngStates, temperature); ////////////measures if( ksim % pl.t_measure_interval == 0) { dev_update_magnetization_tracker(dev_grid, dev_single_run_avg, dev_partial_res ); } } // END INNER SIM LOOPS } void parall_measure_cycle(char startgrid[L*L], struct measure_plan pl, char * dev_grid, hiprandState_t * const rngStates, FILE *resf, double temperature ) { //OUTER REP LOOP ////////////measures float n_measures_per_sim = (float) ((pl.t_max_sim - pl.t_measure_wait)/pl.t_measure_interval); struct avg_tr outer_avg_tr = new_avg_tr(pl.steps_repeat); // extra space needed by dev_update_magnetization_tracker int * dev_partial_res; hipMalloc(&dev_partial_res, sizeof(int)); for( int krep=0; krep< pl.steps_repeat; krep++) { /*fl__*/ float single_run_avg = 0.; /*fl__*/ float * dev_single_run_avg; /*fl__*/ hipMalloc(&dev_single_run_avg, sizeof(float)); /*fl__*/ hipMemcpy(dev_single_run_avg, &single_run_avg, sizeof(float), hipMemcpyHostToDevice); // printf("seeding with %i\n", SEED+krep); // initialize starting grid on the device for this sim hipMemcpy(dev_grid, startgrid, L*L*sizeof(char), hipMemcpyHostToDevice); /*fl__*/hipLaunchKernelGGL(( dev_measure_cycle_kernel), dim3(BLOCKS), dim3(THREADS), 0, 0, pl, dev_grid, rngStates, dev_single_run_avg, dev_partial_res, temperature ); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("ERROR: %s\n", hipGetErrorString(err)); } // bring back results to CPU /*fl__*/ hipMemcpy(&single_run_avg, dev_single_run_avg, sizeof(float), hipMemcpyDeviceToHost); /*fl__*/ float lres = single_run_avg/(n_measures_per_sim); // /*fl__*/ float lstdev = stdev(single_run_avg); if (HISTORY) printf(" temperature: %f\n", temperature); if (HISTORY) printf("# average of simulation %i:\n %f\n", krep+1, lres); update_avg(&outer_avg_tr, lres); char endgrid[L*L]; hipMemcpy(endgrid, dev_grid, L*L*sizeof(char), hipMemcpyDeviceToHost); if (HISTORY) dump(endgrid); /*fl__*/ hipFree(dev_single_run_avg); } // END OUTER REP LOOP ////////////measures fprintf(resf, "%f ", temperature); fprintf(resf, "%f ", average(outer_avg_tr)); fprintf(resf, "%f\n", stdev(outer_avg_tr)); hipFree(dev_partial_res); } int main() { // L should be (multiple of THR_NUMBER -2) + 2 assert( ((L-2)% (THR_NUMBER-2) )== 0 ); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); FILE *resf = fopen("results.txt", "w"); fprintf(resf, "# gpu1\n"); fprintf(resf, "# parameters:\n# linear_size: %i\n", L); fprintf(resf, "# coupling: %f\n# repetitions: %i\n", J, PLAN.steps_repeat); fprintf(resf, "# simulation_t_max: %i\n# thermalization_time: %i\n# time_between_measurements: %i\n# base_random_seed: %i\n", PLAN.t_max_sim, PLAN.t_measure_wait, PLAN.t_measure_interval, SEED); fprintf(resf, "# extra:\n# area: %i\n# active_spins_excluding_boundaries:%i\n", AREA, NTOT); fprintf(resf, "\n"); fprintf(resf, "# columns: temperature - average magnetization - uncertainty \n"); srand(SEED); // hiprand init // Allocate memory for RNG states hiprandState_t *d_rngStates = 0; hipMalloc((void **)&d_rngStates, THR_NUMBER*THR_NUMBER*BLOCK_NUMBER*BLOCK_NUMBER*sizeof(hiprandState_t)); // Initialise RNG hipLaunchKernelGGL(( initRNG), dim3(BLOCKS), dim3(THREADS), 0, 0, d_rngStates, SEED); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("ERROR: %s\n", hipGetErrorString(err)); } // device grid char * dev_grid; hipMalloc(&dev_grid, L*L*sizeof(char)); char startgrid[L*L]; init_t0(startgrid); // if (HISTORY) printf("starting grid:\n"); // if (HISTORY) dump(startgrid); // // // temp cycle: // for( double kt=T_CYCLE_START; kt<T_CYCLE_END; kt+=T_CYCLE_STEP ) { // parall_measure_cycle(startgrid, PLAN, dev_grid, d_rngStates, resf, kt); // } // only 1: parall_measure_cycle(startgrid, PLAN, dev_grid, d_rngStates, resf, SINGLETEMP); hipFree(d_rngStates); hipFree(dev_grid); hipEventRecord(stop); hipEventSynchronize(stop); float total_time = 0; hipEventElapsedTime(&total_time, start, stop); FILE *timef = fopen("time.txt", "w"); long int total_flips = ((long int)(n_temps))* ((long int)((PLAN.steps_repeat))) * ((long int)(PLAN.t_max_sim)) * ((long int)(NTOT)); fprintf(timef, "# gpu1\n"); fprintf(timef, "# total execution time (milliseconds):\n"); fprintf(timef, "%f\n", total_time); fprintf(timef, "# total spin flips performed:\n"); fprintf(timef, "%li\n", total_flips); fprintf(timef, "# average spin flips per millisecond:\n"); fprintf(timef, "%Lf\n", ((long double) total_flips )/( (long double) total_time ) ); fclose(timef); fclose(resf); return 0; }
7c0e2c5341e9e4f9355013018da248037c2a3b7c.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime_api.h> #include <curand.h> #include "curand_kernel.h" #include <assert.h> // L should be (multiple of (THR_NUMBER - 2) ) + 2 #define L 114 const int AREA = L*L; const int NTOT = (L-2)*(L-2); // #define T 6. // #define T 0.1 // #define T 2.26918531421 #define T_CYCLE_START 1.5 #define T_CYCLE_END 3.0 #define T_CYCLE_STEP 0.04 #define SINGLETEMP 3.5 int n_temps = ( T_CYCLE_END - T_CYCLE_START )/ (T_CYCLE_STEP); #define J 1. #define SEED 1000 struct measure_plan { int steps_repeat; int t_max_sim; int t_measure_wait; int t_measure_interval; } static PLAN = { .steps_repeat = 1, .t_max_sim = 251, .t_measure_wait = 50, .t_measure_interval = 20 }; // print history true/false #define HISTORY 1 const int THR_NUMBER = 30; const int BLOCK_NUMBER = ( L-2)/( THR_NUMBER - 2 ); const dim3 BLOCKS( BLOCK_NUMBER, BLOCK_NUMBER ); const dim3 THREADS( THR_NUMBER, THR_NUMBER ); // average tracker struct struct avg_tr { float sum; float sum_squares; int n; }; struct avg_tr new_avg_tr(int locn) { struct avg_tr a = { .sum = 0, .sum_squares = 0, .n = locn}; return a; } void update_avg(struct avg_tr * tr_p, float newval) { tr_p->sum += newval; tr_p->sum_squares += (newval*newval); } // __device__ static inline void dev_update_avg(struct avg_tr * tr_p, float newval) { // tr_p->sum += newval; // tr_p->sum_squares += (newval*newval); // } float average( struct avg_tr tr) { return (tr.sum)/((float) tr.n) ; } float stdev( struct avg_tr tr) { return sqrt( ( tr.sum_squares)/((float) tr.n) - pow(( (tr.sum)/((float) tr.n) ),2) ); } float variance( struct avg_tr tr) { return ( ( tr.sum_squares)/((float) tr.n) - pow(( (tr.sum)/((float) tr.n) ),2) ); } // RNG init kernel __global__ void initRNG(curandState * const rngStates, const unsigned int seed) { // Determine thread ID int blockId = blockIdx.x+ blockIdx.y * gridDim.x; int tid = blockId * (blockDim.x * blockDim.y)+ (threadIdx.y * blockDim.x)+ threadIdx.x; // Initialise the RNG curand_init(seed, tid, 0, &rngStates[tid]); } // static inline float unitrand(){ // return (float)rand() / (float)RAND_MAX; // } __device__ static inline float dev_unitrand( curandState * const rngStates, unsigned int tid ){ curandState localState = rngStates[tid]; float val = curand_uniform(&localState); rngStates[tid] = localState; return val; } void init_random(char grid[L*L]) { for(int x = 0; x<L; x++) { for(int y = 0; y<L; y++) { grid[x+y*L] = rand() & 1; } } } void init_t0(char grid[L*L]) { for(int x = 0; x<L; x++) { for(int y = 0; y<L; y++) { grid[x+y*L] = 0; } } } void dump(char grid[L*L]) { for(int x = 0; x<L; x++) { for(int y = 0; y<L; y++) { // if(grid[x+y*L] == 0) printf("•"); // else printf("◘"); if(grid[x+y*L] == 0) printf(" "); else printf("█"); // printf("%i", grid[x+y*L]); } printf("\n"); } printf("\n"); } __device__ void dev_dump(char grid[L*L]) { for(int x = 0; x<L; x++) { for(int y = 0; y<L; y++) { // if(grid[x+y*L] == 0) printf("•"); // else printf("◘"); if(grid[x+y*L] == 0) printf(" "); else printf("█"); // printf("%i", grid[x+y*L]); } printf("\n"); } printf("\n"); } struct coords { int x; int y; }; __device__ static inline coords dev_get_thread_coords() { struct coords thread_coords; thread_coords.x = blockIdx.x*( THR_NUMBER - 2 ) + ( threadIdx.x ) ; thread_coords.y = blockIdx.y*( THR_NUMBER - 2 ) + ( threadIdx.y ) ; return thread_coords; } // can segfault __device__ static inline char dev_shared_grid_step(char shared_grid[THR_NUMBER*THR_NUMBER], int x, int y, int xstep, int ystep) { return shared_grid[(x+xstep) + (y+ystep)*THR_NUMBER]; } // segfault if applied to an edge spin, call only on the inner THR_NUMBER-1 grid __device__ void dev_update_spin_shared(char dev_shared_grid[ THR_NUMBER*THR_NUMBER ], int x, int y , curandState * const rngStates, unsigned int tid, double temperature ) { char s0 = dev_shared_grid[x+y*THR_NUMBER]; char j1 = s0 ^ dev_shared_grid_step(dev_shared_grid, x, y, 1, 0); char j2 = s0 ^ dev_shared_grid_step(dev_shared_grid, x, y, -1, 0); char j3 = s0 ^ dev_shared_grid_step(dev_shared_grid, x, y, 0, 1); char j4 = s0 ^ dev_shared_grid_step(dev_shared_grid, x, y, 0, -1); float dh = (float) ( -((j1 + j2 + j3 + j4) *2 -4)*2*J ); float p = exp( -dh / temperature); float ur = dev_unitrand(rngStates, tid); if(ur < p ) { dev_shared_grid[x+y*THR_NUMBER] = !dev_shared_grid[x+y*THR_NUMBER]; } } __device__ void dev_update_grid_shared(char grid[L*L], curandState * const rngStates, double temperature ) { // the first argument here is the GLOBAL grid // thread coords relative to the GLOBAL grid struct coords glob_coords = dev_get_thread_coords(); int glob_x = glob_coords.x; int glob_y = glob_coords.y; // Determine thread ID (for RNG) int blockId = blockIdx.x+ blockIdx.y * gridDim.x; int tid = blockId * (blockDim.x * blockDim.y)+ (threadIdx.y * blockDim.x)+ threadIdx.x; __shared__ char shared_grid[ THR_NUMBER*THR_NUMBER ]; shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] = grid[(glob_x )+ (glob_y )*L ]; // check formulas __syncthreads(); // thread coords relative to the shared grid int shared_x = threadIdx.x; int shared_y = threadIdx.y; // macro-checkboards // macro-white if( (blockIdx.x + (blockIdx.y)%2)%2 == 0 ) { ///////////// // checkboards // update only in the inner 30x30 block of threads, because the edge threads aren't mapped to any grid spins if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 && threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) { // white if( (glob_x + glob_y%2)%2 == 0 ) { dev_update_spin_shared( shared_grid, shared_x, shared_y, rngStates, tid, temperature ); } } __syncthreads(); if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 && threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) { // black if( (glob_x + glob_y%2)%2 == 1 ) { dev_update_spin_shared( shared_grid, shared_x, shared_y, rngStates, tid, temperature ); } } __syncthreads(); if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 && threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) { grid[(glob_x )+ (glob_y )*L ] = shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] ; } ////////// } __syncthreads(); // macro-black if( (blockIdx.x + (blockIdx.y)%2)%2 == 1 ) { ////////// // checkboards // update only in the inner 30x30 block of threads, because the edge threads aren't mapped to any grid spins if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 && threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) { // white if( (glob_x + glob_y%2)%2 == 0 ) { dev_update_spin_shared( shared_grid, shared_x, shared_y, rngStates, tid, temperature ); } } __syncthreads(); if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 && threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) { // black if( (glob_x + glob_y%2)%2 == 1 ) { dev_update_spin_shared( shared_grid, shared_x, shared_y, rngStates, tid, temperature ); } } __syncthreads(); if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 && threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) { grid[(glob_x )+ (glob_y )*L ] = shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] ; } ////////// } } __device__ void dev_update_magnetization_tracker(char dev_grid[L*L], float * dev_single_run_avg, int * dev_partial_res ) { struct coords glob_coords = dev_get_thread_coords(); int glob_x = glob_coords.x; int glob_y = glob_coords.y; if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 && threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) { int spin = (int) dev_grid[glob_x+glob_y*L]; atomicAdd(dev_partial_res, spin ); } __syncthreads(); if ( blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0 && threadIdx.y == 0) { float val = ( ((float) (*dev_partial_res) *2 ) - NTOT ) / (float) NTOT; /*fl__*/ *dev_single_run_avg += val; *dev_partial_res = 0; } } __global__ /*fl__*/ void dev_measure_cycle_kernel(struct measure_plan pl, char * dev_grid, curandState * const rngStates, float * dev_single_run_avg, int * dev_partial_res , double temperature ) { // INNER SIM LOOPS int ksim=0; for( ; ksim<pl.t_measure_wait; ksim++) { dev_update_grid_shared(dev_grid, rngStates, temperature); } // end thermalization for( ; ksim<pl.t_max_sim; ksim++) { dev_update_grid_shared(dev_grid, rngStates, temperature); ////////////measures if( ksim % pl.t_measure_interval == 0) { dev_update_magnetization_tracker(dev_grid, dev_single_run_avg, dev_partial_res ); } } // END INNER SIM LOOPS } void parall_measure_cycle(char startgrid[L*L], struct measure_plan pl, char * dev_grid, curandState * const rngStates, FILE *resf, double temperature ) { //OUTER REP LOOP ////////////measures float n_measures_per_sim = (float) ((pl.t_max_sim - pl.t_measure_wait)/pl.t_measure_interval); struct avg_tr outer_avg_tr = new_avg_tr(pl.steps_repeat); // extra space needed by dev_update_magnetization_tracker int * dev_partial_res; cudaMalloc(&dev_partial_res, sizeof(int)); for( int krep=0; krep< pl.steps_repeat; krep++) { /*fl__*/ float single_run_avg = 0.; /*fl__*/ float * dev_single_run_avg; /*fl__*/ cudaMalloc(&dev_single_run_avg, sizeof(float)); /*fl__*/ cudaMemcpy(dev_single_run_avg, &single_run_avg, sizeof(float), cudaMemcpyHostToDevice); // printf("seeding with %i\n", SEED+krep); // initialize starting grid on the device for this sim cudaMemcpy(dev_grid, startgrid, L*L*sizeof(char), cudaMemcpyHostToDevice); /*fl__*/ dev_measure_cycle_kernel<<<BLOCKS, THREADS>>>(pl, dev_grid, rngStates, dev_single_run_avg, dev_partial_res, temperature ); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(err)); } // bring back results to CPU /*fl__*/ cudaMemcpy(&single_run_avg, dev_single_run_avg, sizeof(float), cudaMemcpyDeviceToHost); /*fl__*/ float lres = single_run_avg/(n_measures_per_sim); // /*fl__*/ float lstdev = stdev(single_run_avg); if (HISTORY) printf(" temperature: %f\n", temperature); if (HISTORY) printf("# average of simulation %i:\n %f\n", krep+1, lres); update_avg(&outer_avg_tr, lres); char endgrid[L*L]; cudaMemcpy(endgrid, dev_grid, L*L*sizeof(char), cudaMemcpyDeviceToHost); if (HISTORY) dump(endgrid); /*fl__*/ cudaFree(dev_single_run_avg); } // END OUTER REP LOOP ////////////measures fprintf(resf, "%f ", temperature); fprintf(resf, "%f ", average(outer_avg_tr)); fprintf(resf, "%f\n", stdev(outer_avg_tr)); cudaFree(dev_partial_res); } int main() { // L should be (multiple of THR_NUMBER -2) + 2 assert( ((L-2)% (THR_NUMBER-2) )== 0 ); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); FILE *resf = fopen("results.txt", "w"); fprintf(resf, "# gpu1\n"); fprintf(resf, "# parameters:\n# linear_size: %i\n", L); fprintf(resf, "# coupling: %f\n# repetitions: %i\n", J, PLAN.steps_repeat); fprintf(resf, "# simulation_t_max: %i\n# thermalization_time: %i\n# time_between_measurements: %i\n# base_random_seed: %i\n", PLAN.t_max_sim, PLAN.t_measure_wait, PLAN.t_measure_interval, SEED); fprintf(resf, "# extra:\n# area: %i\n# active_spins_excluding_boundaries:%i\n", AREA, NTOT); fprintf(resf, "\n"); fprintf(resf, "# columns: temperature - average magnetization - uncertainty \n"); srand(SEED); // curand init // Allocate memory for RNG states curandState *d_rngStates = 0; cudaMalloc((void **)&d_rngStates, THR_NUMBER*THR_NUMBER*BLOCK_NUMBER*BLOCK_NUMBER*sizeof(curandState)); // Initialise RNG initRNG<<<BLOCKS, THREADS>>>(d_rngStates, SEED); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(err)); } // device grid char * dev_grid; cudaMalloc(&dev_grid, L*L*sizeof(char)); char startgrid[L*L]; init_t0(startgrid); // if (HISTORY) printf("starting grid:\n"); // if (HISTORY) dump(startgrid); // // // temp cycle: // for( double kt=T_CYCLE_START; kt<T_CYCLE_END; kt+=T_CYCLE_STEP ) { // parall_measure_cycle(startgrid, PLAN, dev_grid, d_rngStates, resf, kt); // } // only 1: parall_measure_cycle(startgrid, PLAN, dev_grid, d_rngStates, resf, SINGLETEMP); cudaFree(d_rngStates); cudaFree(dev_grid); cudaEventRecord(stop); cudaEventSynchronize(stop); float total_time = 0; cudaEventElapsedTime(&total_time, start, stop); FILE *timef = fopen("time.txt", "w"); long int total_flips = ((long int)(n_temps))* ((long int)((PLAN.steps_repeat))) * ((long int)(PLAN.t_max_sim)) * ((long int)(NTOT)); fprintf(timef, "# gpu1\n"); fprintf(timef, "# total execution time (milliseconds):\n"); fprintf(timef, "%f\n", total_time); fprintf(timef, "# total spin flips performed:\n"); fprintf(timef, "%li\n", total_flips); fprintf(timef, "# average spin flips per millisecond:\n"); fprintf(timef, "%Lf\n", ((long double) total_flips )/( (long double) total_time ) ); fclose(timef); fclose(resf); return 0; }
67f565dc7d39b454aa338740c10b5afa7058e259.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "modulated_deform_conv_cuda_kernel.cuh" #include "pytorch_cuda_helper.hpp" void modulated_deformable_im2col_cuda( const Tensor data_im, const Tensor data_offset, const Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, Tensor data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] { const scalar_t *data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(THREADS_PER_BLOCK), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, data_col_); })); AT_CUDA_CHECK(hipGetLastError()); } void modulated_deformable_col2im_cuda( const Tensor data_col, const Tensor data_offset, const Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, Tensor grad_im) { const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] { const scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t *grad_im_ = grad_im.data_ptr<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(THREADS_PER_BLOCK), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, deformable_group, height_col, width_col, grad_im_); })); AT_CUDA_CHECK(hipGetLastError()); } void modulated_deformable_col2im_coord_cuda( const Tensor data_col, const Tensor data_im, const Tensor data_offset, const Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, Tensor grad_offset, Tensor grad_mask) { const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] { const scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t *data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data_ptr<scalar_t>(); scalar_t *grad_mask_ = grad_mask.data_ptr<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(THREADS_PER_BLOCK), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, grad_offset_, grad_mask_); })); AT_CUDA_CHECK(hipGetLastError()); } void ModulatedDeformConvForwardCUDAKernelLauncher( Tensor input, Tensor weight, Tensor bias, Tensor ones, Tensor offset, Tensor mask, Tensor output, Tensor columns, int kernel_h, int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int group, const int deformable_group, const bool with_bias) { at::DeviceGuard guard(input.device()); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); if (channels != channels_kernel * group) AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; if (ones.ndimension() != 2 || ones.size(0) * ones.size(1) < height_out * width_out) { // Resize plane and fill with ones... ones = at::ones({height_out, width_out}, input.options()); } // resize output output = output.view({batch, channels_out, height_out, width_out}).zero_(); // resize temporary columns columns = at::zeros({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); output = output.view({output.size(0), group, output.size(1) / group, output.size(2), output.size(3)}); for (int b = 0; b < batch; b++) { modulated_deformable_im2col_cuda( input[b], offset[b], mask[b], 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns); // divide into group weight = weight.view({group, weight.size(0) / group, weight.size(1), weight.size(2), weight.size(3)}); columns = columns.view({group, columns.size(0) / group, columns.size(1)}); for (int g = 0; g < group; g++) { output[b][g] = output[b][g] .flatten(1) .addmm_(weight[g].flatten(1), columns[g]) .view_as(output[b][g]); } weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), weight.size(3), weight.size(4)}); columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)}); } output = output.view({output.size(0), output.size(1) * output.size(2), output.size(3), output.size(4)}); if (with_bias) { output += bias.view({1, bias.size(0), 1, 1}); } } void ModulatedDeformConvBackwardCUDAKernelLauncher( Tensor input, Tensor weight, Tensor bias, Tensor ones, Tensor offset, Tensor mask, Tensor columns, Tensor grad_input, Tensor grad_weight, Tensor grad_bias, Tensor grad_offset, Tensor grad_mask, Tensor grad_output, int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int group, int deformable_group, const bool with_bias) { at::DeviceGuard guard(input.device()); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); if (channels != channels_kernel * group) AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; if (ones.ndimension() != 2 || ones.size(0) * ones.size(1) < height_out * width_out) { // Resize plane and fill with ones... ones = at::ones({height_out, width_out}, input.options()); } grad_input = grad_input.view({batch, channels, height, width}); columns = at::zeros({channels * kernel_h * kernel_w, height_out * width_out}, input.options()); grad_output = grad_output.view({grad_output.size(0), group, grad_output.size(1) / group, grad_output.size(2), grad_output.size(3)}); for (int b = 0; b < batch; b++) { // divide int group columns = columns.view({group, columns.size(0) / group, columns.size(1)}); weight = weight.view({group, weight.size(0) / group, weight.size(1), weight.size(2), weight.size(3)}); for (int g = 0; g < group; g++) { columns[g].addmm_(weight[g].flatten(1).transpose(0, 1), grad_output[b][g].flatten(1), 0.0f, 1.0f); } columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)}); weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), weight.size(3), weight.size(4)}); // gradient w.r.t. input coordinate data modulated_deformable_col2im_coord_cuda( columns, input[b], offset[b], mask[b], 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_offset[b], grad_mask[b]); // gradient w.r.t. input data modulated_deformable_col2im_cuda( columns, offset[b], mask[b], 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_input[b]); // gradient w.r.t. weight, dWeight should accumulate across the batch and // group modulated_deformable_im2col_cuda( input[b], offset[b], mask[b], 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns); columns = columns.view({group, columns.size(0) / group, columns.size(1)}); grad_weight = grad_weight.view({group, grad_weight.size(0) / group, grad_weight.size(1), grad_weight.size(2), grad_weight.size(3)}); if (with_bias) grad_bias = grad_bias.view({group, grad_bias.size(0) / group}); for (int g = 0; g < group; g++) { grad_weight[g] = grad_weight[g] .flatten(1) .addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1)) .view_as(grad_weight[g]); if (with_bias) { grad_bias[g] = grad_bias[g] .view({-1, 1}) .addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1})) .view(-1); } } columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)}); grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1), grad_weight.size(2), grad_weight.size(3), grad_weight.size(4)}); if (with_bias) grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)}); } grad_output = grad_output.view({grad_output.size(0) * grad_output.size(1), grad_output.size(2), grad_output.size(3), grad_output.size(4)}); }
67f565dc7d39b454aa338740c10b5afa7058e259.cu
#include "modulated_deform_conv_cuda_kernel.cuh" #include "pytorch_cuda_helper.hpp" void modulated_deformable_im2col_cuda( const Tensor data_im, const Tensor data_offset, const Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, Tensor data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] { const scalar_t *data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); modulated_deformable_im2col_gpu_kernel<<< GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>( num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, data_col_); })); AT_CUDA_CHECK(cudaGetLastError()); } void modulated_deformable_col2im_cuda( const Tensor data_col, const Tensor data_offset, const Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, Tensor grad_im) { const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] { const scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t *grad_im_ = grad_im.data_ptr<scalar_t>(); modulated_deformable_col2im_gpu_kernel<<< GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>( num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, deformable_group, height_col, width_col, grad_im_); })); AT_CUDA_CHECK(cudaGetLastError()); } void modulated_deformable_col2im_coord_cuda( const Tensor data_col, const Tensor data_im, const Tensor data_offset, const Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, Tensor grad_offset, Tensor grad_mask) { const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] { const scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t *data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data_ptr<scalar_t>(); scalar_t *grad_mask_ = grad_mask.data_ptr<scalar_t>(); modulated_deformable_col2im_coord_gpu_kernel<<< GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>( num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, grad_offset_, grad_mask_); })); AT_CUDA_CHECK(cudaGetLastError()); } void ModulatedDeformConvForwardCUDAKernelLauncher( Tensor input, Tensor weight, Tensor bias, Tensor ones, Tensor offset, Tensor mask, Tensor output, Tensor columns, int kernel_h, int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int group, const int deformable_group, const bool with_bias) { at::DeviceGuard guard(input.device()); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); if (channels != channels_kernel * group) AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; if (ones.ndimension() != 2 || ones.size(0) * ones.size(1) < height_out * width_out) { // Resize plane and fill with ones... ones = at::ones({height_out, width_out}, input.options()); } // resize output output = output.view({batch, channels_out, height_out, width_out}).zero_(); // resize temporary columns columns = at::zeros({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); output = output.view({output.size(0), group, output.size(1) / group, output.size(2), output.size(3)}); for (int b = 0; b < batch; b++) { modulated_deformable_im2col_cuda( input[b], offset[b], mask[b], 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns); // divide into group weight = weight.view({group, weight.size(0) / group, weight.size(1), weight.size(2), weight.size(3)}); columns = columns.view({group, columns.size(0) / group, columns.size(1)}); for (int g = 0; g < group; g++) { output[b][g] = output[b][g] .flatten(1) .addmm_(weight[g].flatten(1), columns[g]) .view_as(output[b][g]); } weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), weight.size(3), weight.size(4)}); columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)}); } output = output.view({output.size(0), output.size(1) * output.size(2), output.size(3), output.size(4)}); if (with_bias) { output += bias.view({1, bias.size(0), 1, 1}); } } void ModulatedDeformConvBackwardCUDAKernelLauncher( Tensor input, Tensor weight, Tensor bias, Tensor ones, Tensor offset, Tensor mask, Tensor columns, Tensor grad_input, Tensor grad_weight, Tensor grad_bias, Tensor grad_offset, Tensor grad_mask, Tensor grad_output, int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int group, int deformable_group, const bool with_bias) { at::DeviceGuard guard(input.device()); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); if (channels != channels_kernel * group) AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; if (ones.ndimension() != 2 || ones.size(0) * ones.size(1) < height_out * width_out) { // Resize plane and fill with ones... ones = at::ones({height_out, width_out}, input.options()); } grad_input = grad_input.view({batch, channels, height, width}); columns = at::zeros({channels * kernel_h * kernel_w, height_out * width_out}, input.options()); grad_output = grad_output.view({grad_output.size(0), group, grad_output.size(1) / group, grad_output.size(2), grad_output.size(3)}); for (int b = 0; b < batch; b++) { // divide int group columns = columns.view({group, columns.size(0) / group, columns.size(1)}); weight = weight.view({group, weight.size(0) / group, weight.size(1), weight.size(2), weight.size(3)}); for (int g = 0; g < group; g++) { columns[g].addmm_(weight[g].flatten(1).transpose(0, 1), grad_output[b][g].flatten(1), 0.0f, 1.0f); } columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)}); weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), weight.size(3), weight.size(4)}); // gradient w.r.t. input coordinate data modulated_deformable_col2im_coord_cuda( columns, input[b], offset[b], mask[b], 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_offset[b], grad_mask[b]); // gradient w.r.t. input data modulated_deformable_col2im_cuda( columns, offset[b], mask[b], 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_input[b]); // gradient w.r.t. weight, dWeight should accumulate across the batch and // group modulated_deformable_im2col_cuda( input[b], offset[b], mask[b], 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns); columns = columns.view({group, columns.size(0) / group, columns.size(1)}); grad_weight = grad_weight.view({group, grad_weight.size(0) / group, grad_weight.size(1), grad_weight.size(2), grad_weight.size(3)}); if (with_bias) grad_bias = grad_bias.view({group, grad_bias.size(0) / group}); for (int g = 0; g < group; g++) { grad_weight[g] = grad_weight[g] .flatten(1) .addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1)) .view_as(grad_weight[g]); if (with_bias) { grad_bias[g] = grad_bias[g] .view({-1, 1}) .addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1})) .view(-1); } } columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)}); grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1), grad_weight.size(2), grad_weight.size(3), grad_weight.size(4)}); if (with_bias) grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)}); } grad_output = grad_output.view({grad_output.size(0) * grad_output.size(1), grad_output.size(2), grad_output.size(3), grad_output.size(4)}); }
88d2c49642d92b97dc19ded89f37d66683e89f29.hip
// !!! This is a file automatically generated by hipify!!! #include "common/book.h" #include "common/cpu_anim.h" #include <hip/hip_runtime.h> #include <cuda_device_runtime_api.h> #include <hip/hip_runtime.h> #define DIM 1024 #define PI 3.1415926535897932f #define MAX_TEMP 1.0f #define MIN_TEMP 0.0001f #define SPEED 0.25f struct DataBlock { unsigned char *output_bitmap; float *dev_in_src; float *dev_out_src; float *dev_const_src; CPUAnimBitmap *bitmap; hipEvent_t start, stop; float total_time; float frames; }; __global__ void copy_const_kernel(float *iptr, float const *cptr) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; if (cptr[offset] != 0) { iptr[offset] = cptr[offset]; } } __global__ void blend_kernel(float *out_src, float const *in_src) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; int left = offset - 1; int right = offset + 1; if (x == 0) { ++left; } if (x == DIM - 1) { --right; } int top = offset - DIM; int bottom = offset + DIM; if (y == 0) { top += DIM; } if (y == DIM - 1) { bottom -= DIM; } out_src[offset] = in_src[offset] + SPEED * (in_src[top] + in_src[bottom] + in_src[left] + in_src[right] - 4 * in_src[offset]); } void anim_gpu(DataBlock *d, int ticks) { HANDLE_ERROR(hipEventRecord(d->start, 0)); dim3 blocks(DIM / 16, DIM / 16); dim3 threads(16, 16); for (int i = 0; i < 90; ++i) { hipLaunchKernelGGL(( copy_const_kernel), dim3(blocks), dim3(threads), 0, 0, d->dev_in_src, d->dev_const_src); hipLaunchKernelGGL(( blend_kernel), dim3(blocks), dim3(threads), 0, 0, d->dev_out_src, d->dev_in_src); swap(d->dev_in_src, d->dev_out_src); } hipLaunchKernelGGL(( float_to_color), dim3(blocks), dim3(threads), 0, 0, d->output_bitmap, d->dev_in_src); HANDLE_ERROR(hipMemcpy(d->bitmap->get_ptr(), d->output_bitmap, d->bitmap->image_size(), hipMemcpyDeviceToHost)); HANDLE_ERROR(hipEventRecord(d->stop, 0)); HANDLE_ERROR(hipEventSynchronize(d->stop)); float elapsed_time; HANDLE_ERROR(hipEventElapsedTime(&elapsed_time, d->start, d->stop)); d->total_time += elapsed_time; ++d->frames; printf("Average time per frame: %3.1f ms\n", d->total_time / d->frames); } void anim_exit(DataBlock *d) { hipFree(d->dev_in_src); hipFree(d->dev_out_src); hipFree(d->dev_const_src); HANDLE_ERROR(hipEventDestroy(d->start)); HANDLE_ERROR(hipEventDestroy(d->stop)); } int main(void) { DataBlock data; CPUAnimBitmap bitmap(DIM, DIM, &data); data.bitmap = &bitmap; data.total_time = 0; data.frames = 0; HANDLE_ERROR(hipEventCreate(&data.start)); HANDLE_ERROR(hipEventCreate(&data.stop)); HANDLE_ERROR(hipMalloc((void **)&data.output_bitmap, bitmap.image_size())); HANDLE_ERROR(hipMalloc((void **)&data.dev_in_src, bitmap.image_size())); HANDLE_ERROR(hipMalloc((void **)&data.dev_out_src, bitmap.image_size())); HANDLE_ERROR(hipMalloc((void **)&data.dev_const_src, bitmap.image_size())); float *temp = (float *)malloc(bitmap.image_size()); for (int i = 0; i < DIM * DIM; ++i) { temp[i] = 0; int const x = i % DIM; int const y = i / DIM; if (x > 300 && x < 600 && y > 310 && y < 601) { temp[i] = MAX_TEMP; } } temp[DIM * 100 + 100] = (MAX_TEMP + MIN_TEMP) / 2; temp[DIM * 700 + 100] = MIN_TEMP; temp[DIM * 300 + 300] = MIN_TEMP; temp[DIM * 200 + 700] = MIN_TEMP; for (int y = 800; y < 900; ++y) { for (int x = 400; x < 500; ++x) { temp[x + y * DIM] = MIN_TEMP; } } HANDLE_ERROR(hipMemcpy(data.dev_const_src, temp, bitmap.image_size(), hipMemcpyHostToDevice)); for (int y = 800; y < DIM; ++y) { for (int x = 0; x < 200; ++x) { temp[x + y * DIM] = MAX_TEMP; } } HANDLE_ERROR(hipMemcpy(data.dev_in_src, temp, bitmap.image_size(), hipMemcpyHostToDevice)); free(temp); bitmap.anim_and_exit((void (*)(void *, int))anim_gpu, (void (*)(void *))anim_exit); return 0; }
88d2c49642d92b97dc19ded89f37d66683e89f29.cu
#include "common/book.h" #include "common/cpu_anim.h" #include <cuda.h> #include <cuda_device_runtime_api.h> #include <cuda_runtime.h> #define DIM 1024 #define PI 3.1415926535897932f #define MAX_TEMP 1.0f #define MIN_TEMP 0.0001f #define SPEED 0.25f struct DataBlock { unsigned char *output_bitmap; float *dev_in_src; float *dev_out_src; float *dev_const_src; CPUAnimBitmap *bitmap; cudaEvent_t start, stop; float total_time; float frames; }; __global__ void copy_const_kernel(float *iptr, float const *cptr) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; if (cptr[offset] != 0) { iptr[offset] = cptr[offset]; } } __global__ void blend_kernel(float *out_src, float const *in_src) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; int left = offset - 1; int right = offset + 1; if (x == 0) { ++left; } if (x == DIM - 1) { --right; } int top = offset - DIM; int bottom = offset + DIM; if (y == 0) { top += DIM; } if (y == DIM - 1) { bottom -= DIM; } out_src[offset] = in_src[offset] + SPEED * (in_src[top] + in_src[bottom] + in_src[left] + in_src[right] - 4 * in_src[offset]); } void anim_gpu(DataBlock *d, int ticks) { HANDLE_ERROR(cudaEventRecord(d->start, 0)); dim3 blocks(DIM / 16, DIM / 16); dim3 threads(16, 16); for (int i = 0; i < 90; ++i) { copy_const_kernel<<<blocks, threads>>>(d->dev_in_src, d->dev_const_src); blend_kernel<<<blocks, threads>>>(d->dev_out_src, d->dev_in_src); swap(d->dev_in_src, d->dev_out_src); } float_to_color<<<blocks, threads>>>(d->output_bitmap, d->dev_in_src); HANDLE_ERROR(cudaMemcpy(d->bitmap->get_ptr(), d->output_bitmap, d->bitmap->image_size(), cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaEventRecord(d->stop, 0)); HANDLE_ERROR(cudaEventSynchronize(d->stop)); float elapsed_time; HANDLE_ERROR(cudaEventElapsedTime(&elapsed_time, d->start, d->stop)); d->total_time += elapsed_time; ++d->frames; printf("Average time per frame: %3.1f ms\n", d->total_time / d->frames); } void anim_exit(DataBlock *d) { cudaFree(d->dev_in_src); cudaFree(d->dev_out_src); cudaFree(d->dev_const_src); HANDLE_ERROR(cudaEventDestroy(d->start)); HANDLE_ERROR(cudaEventDestroy(d->stop)); } int main(void) { DataBlock data; CPUAnimBitmap bitmap(DIM, DIM, &data); data.bitmap = &bitmap; data.total_time = 0; data.frames = 0; HANDLE_ERROR(cudaEventCreate(&data.start)); HANDLE_ERROR(cudaEventCreate(&data.stop)); HANDLE_ERROR(cudaMalloc((void **)&data.output_bitmap, bitmap.image_size())); HANDLE_ERROR(cudaMalloc((void **)&data.dev_in_src, bitmap.image_size())); HANDLE_ERROR(cudaMalloc((void **)&data.dev_out_src, bitmap.image_size())); HANDLE_ERROR(cudaMalloc((void **)&data.dev_const_src, bitmap.image_size())); float *temp = (float *)malloc(bitmap.image_size()); for (int i = 0; i < DIM * DIM; ++i) { temp[i] = 0; int const x = i % DIM; int const y = i / DIM; if (x > 300 && x < 600 && y > 310 && y < 601) { temp[i] = MAX_TEMP; } } temp[DIM * 100 + 100] = (MAX_TEMP + MIN_TEMP) / 2; temp[DIM * 700 + 100] = MIN_TEMP; temp[DIM * 300 + 300] = MIN_TEMP; temp[DIM * 200 + 700] = MIN_TEMP; for (int y = 800; y < 900; ++y) { for (int x = 400; x < 500; ++x) { temp[x + y * DIM] = MIN_TEMP; } } HANDLE_ERROR(cudaMemcpy(data.dev_const_src, temp, bitmap.image_size(), cudaMemcpyHostToDevice)); for (int y = 800; y < DIM; ++y) { for (int x = 0; x < 200; ++x) { temp[x + y * DIM] = MAX_TEMP; } } HANDLE_ERROR(cudaMemcpy(data.dev_in_src, temp, bitmap.image_size(), cudaMemcpyHostToDevice)); free(temp); bitmap.anim_and_exit((void (*)(void *, int))anim_gpu, (void (*)(void *))anim_exit); return 0; }
a3f45d744bc7ab5e4c97b8bc9add228dec87785f.hip
// !!! This is a file automatically generated by hipify!!! /* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Dynamic Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <algorithm> #include <memory> #include <vector> #include "hvr/HW3/utils.h" #include <float.h> #include <limits.h> #include <math.h> #include <stdio.h> // Kernel for calculating minimum and maximum value of given array (output array // from reduction of 1/4 the size of the original array) M3_DLL __global__ void findMinMax(const float* const d_logLuminance, float* d_logLum_Min, float* d_logLum_Max, float* d_min_Lums, float* d_max_Lums, int pxCount) { const int absId = blockDim.x * blockIdx.x + threadIdx.x; const int tId = threadIdx.x; float d_min = d_logLuminance[0]; float d_max = d_logLuminance[0]; for (int i = 0; i < 4; i++) { if (absId < pxCount) { d_min = fminf(d_min, d_logLuminance[absId + i * blockDim.x * gridDim.x]); d_max = fmaxf(d_max, d_logLuminance[absId + i * blockDim.x * gridDim.x]); } } d_logLum_Min[absId] = d_min; d_logLum_Max[absId] = d_max; __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tId < s) { d_logLum_Min[absId] = fminf(d_logLum_Min[absId], d_logLum_Min[absId + s]); d_logLum_Max[absId] = fmaxf(d_logLum_Max[absId], d_logLum_Max[absId + s]); } __syncthreads(); } if (tId == 0) { d_min_Lums[blockIdx.x] = d_logLum_Min[absId]; d_max_Lums[blockIdx.x] = d_logLum_Max[absId]; } } // Kernel for calculating the histogram M3_DLL __global__ void calcHisto(unsigned int* d_histo, const float* const d_logLuminance, float min_logLum, float lumRange, const size_t numBins, const unsigned int pxCount) { const int tId = threadIdx.x; extern __shared__ unsigned int sh_bin[]; int bin = 0; int loopCount = (pxCount + blockDim.x - 1) / blockDim.x; // use each block as a single bin and process through the entire array to get // a single bin value for each block for (int i = 0; i < loopCount; i++) { if ((tId + i * blockDim.x) < pxCount) { if (blockIdx.x == int((d_logLuminance[tId + i * blockDim.x] - min_logLum) / lumRange * numBins)) { bin++; } } __syncthreads(); } sh_bin[tId] = bin; for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tId < s) { sh_bin[tId] = sh_bin[tId] + sh_bin[tId + s]; } __syncthreads(); } if (tId == 0) { d_histo[blockIdx.x] = sh_bin[0]; } } // Kernel for calculating the cdf from given histogram M3_DLL __global__ void calcCDF(unsigned int* const d_cdf, unsigned int* d_histo, const size_t numBins, const int cycles) { const int absId = blockDim.x * blockIdx.x + threadIdx.x; const int tId = threadIdx.x; extern __shared__ unsigned int prevSum[]; prevSum[tId] = 0; // calculate the total sum of the values before current block for (int i = 0; i < blockIdx.x; i++) { prevSum[tId] += d_histo[tId + i * blockDim.x]; } __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tId < s) { prevSum[tId] += prevSum[tId + s]; } __syncthreads(); } // calculate the inclusive scan with Hillis Steele scan int j = 1; for (int i = 0; i < cycles; i++) { if ((tId - j) >= 0) { d_histo[absId] += d_histo[absId - j]; } j = j * 2; __syncthreads(); } d_histo[absId] += prevSum[0]; __syncthreads(); // copy the result into a excludive scan array. d_cdf[0] = 0; // this piece of code is error prone to gpu code creating race conditions. // if (absId > 0) { // d_cdf[absId] = d_histo[absId - 1]; //} if (absId < (blockDim.x * gridDim.x - 1)) { d_cdf[absId + 1] = d_histo[absId]; } } M3_DLL void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float& min_logLum, float& max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { // TODO /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum 2) subtract them to find the range 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ const unsigned int pxCount = numRows * numCols; const int pxSize = pxCount * sizeof(float); const int blockSize = 512; const int gridSize = (pxCount + blockSize * 4 - 1) / (blockSize * 4); float *d_min_Lums, *d_max_Lums, *d_logLum_Min, *d_logLum_Max; // std::unique_ptr<float[]> h_min_Lums(new // float[gridSize]);//std::make_unique<float[]>(gridSize); checkCudaErrors(hipMalloc((void**)&d_min_Lums, gridSize * sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_max_Lums, gridSize * sizeof(float))); checkCudaErrors( hipMalloc((void**)&d_logLum_Min, pxCount * sizeof(float) / 4)); checkCudaErrors( hipMalloc((void**)&d_logLum_Max, pxCount * sizeof(float) / 4)); // printf("blockSize is %i\n", blockSize); // printf("gridSize is %i\n", gridSize); // printf("pxCount is %i\n", pxCount); // printf("pxSize is %i\n", pxSize); // printf("numBins is %i\n", numBins); hipLaunchKernelGGL(( findMinMax), dim3(gridSize), dim3(blockSize), 0, 0, d_logLuminance, d_logLum_Min, d_logLum_Max, d_min_Lums, d_max_Lums, pxCount); hipDeviceSynchronize(); float min_Lums = 10; float max_Lums = -10; float* h_min_Lums = new float[gridSize]; float* h_max_Lums = new float[gridSize]; // copy back arrays of minimum and maximum values for further reduction on cpu // to a single minimum and maximum checkCudaErrors(hipMemcpy(h_min_Lums, d_min_Lums, gridSize * sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_max_Lums, d_max_Lums, gridSize * sizeof(float), hipMemcpyDeviceToHost)); for (int i = 0; i < gridSize; i++) { min_Lums = std::min<float>(h_min_Lums[i], min_Lums); max_Lums = std::max<float>(h_max_Lums[i], max_Lums); } min_logLum = min_Lums; max_logLum = max_Lums; float lumRange = max_logLum - min_logLum; // printf("min is %f\n", min_logLum); // printf("max is %f\n", max_logLum); // printf("range is %f\n", lumRange); // unsigned int * h_histo = new unsigned int[numBins]; // unsigned int * h_cdf = new unsigned int[numBins]; // for (int i = 0; i < numBins; i++) { // h_histo[i] = 0; // h_cdf[i] = 0; //} unsigned int* d_histo; // std::vector<unsigned int> binCollect = {}; unsigned int sharedMemSize = blockSize * sizeof(unsigned int); checkCudaErrors(hipMalloc((void**)&d_histo, numBins * sizeof(unsigned int))); hipLaunchKernelGGL(( calcHisto), dim3(numBins), dim3(blockSize), sharedMemSize, 0, d_histo, d_logLuminance, min_logLum, lumRange, numBins, pxCount); hipDeviceSynchronize(); // checkCudaErrors(hipMemcpy(h_histo, d_histo, numBins * sizeof(unsigned // int), hipMemcpyDeviceToHost)); // // for (int i = 1; i < numBins; i++) { // h_cdf[i] = h_cdf[i - 1] + h_histo[i - 1]; // printf("cdf_cpu at %i is %i.\n", i, h_cdf[i]); // printf("Histo at %i is %i.\n", i, h_histo[i]); //} // this section of code replace the log2 function that is too expensive. // calculate how many loops the Hillis Steele scan is required for given array // size int cycles = 0; for (int i = numBins; i != 0; i >>= 1) { cycles++; } cycles -= 1; int modCheck = 1 << cycles; if (numBins > modCheck) { cycles += 1; } // same as above but much more expensive // int cycles = int(log2(float(numBins))); // if (int(log2(float(numBins))*10) > int(log2(float(numBins))*10)) { // cycles = int(log2(float(numBins)))+1; //} int cdfGridSize = numBins / blockSize; hipLaunchKernelGGL(( calcCDF), dim3(cdfGridSize), dim3(blockSize), sharedMemSize, 0, d_cdf, d_histo, numBins, cycles); hipDeviceSynchronize(); // unsigned int * h_cdf = new unsigned int[numBins]; // checkCudaErrors(hipMemcpy(h_cdf, d_cdf, numBins * sizeof(unsigned int), // hipMemcpyDeviceToHost)); // for (int i = 0; i < numBins; i++) { // printf("cdf_gpu at %i is %i.\n", i, h_cdf[i]); //} // delete[] h_min_lums; // delete[] h_max_lums; // delete[] h_histo; // delete[] h_cdf; }
a3f45d744bc7ab5e4c97b8bc9add228dec87785f.cu
/* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Dynamic Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <algorithm> #include <memory> #include <vector> #include "hvr/HW3/utils.h" #include <float.h> #include <limits.h> #include <math.h> #include <stdio.h> // Kernel for calculating minimum and maximum value of given array (output array // from reduction of 1/4 the size of the original array) M3_DLL __global__ void findMinMax(const float* const d_logLuminance, float* d_logLum_Min, float* d_logLum_Max, float* d_min_Lums, float* d_max_Lums, int pxCount) { const int absId = blockDim.x * blockIdx.x + threadIdx.x; const int tId = threadIdx.x; float d_min = d_logLuminance[0]; float d_max = d_logLuminance[0]; for (int i = 0; i < 4; i++) { if (absId < pxCount) { d_min = fminf(d_min, d_logLuminance[absId + i * blockDim.x * gridDim.x]); d_max = fmaxf(d_max, d_logLuminance[absId + i * blockDim.x * gridDim.x]); } } d_logLum_Min[absId] = d_min; d_logLum_Max[absId] = d_max; __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tId < s) { d_logLum_Min[absId] = fminf(d_logLum_Min[absId], d_logLum_Min[absId + s]); d_logLum_Max[absId] = fmaxf(d_logLum_Max[absId], d_logLum_Max[absId + s]); } __syncthreads(); } if (tId == 0) { d_min_Lums[blockIdx.x] = d_logLum_Min[absId]; d_max_Lums[blockIdx.x] = d_logLum_Max[absId]; } } // Kernel for calculating the histogram M3_DLL __global__ void calcHisto(unsigned int* d_histo, const float* const d_logLuminance, float min_logLum, float lumRange, const size_t numBins, const unsigned int pxCount) { const int tId = threadIdx.x; extern __shared__ unsigned int sh_bin[]; int bin = 0; int loopCount = (pxCount + blockDim.x - 1) / blockDim.x; // use each block as a single bin and process through the entire array to get // a single bin value for each block for (int i = 0; i < loopCount; i++) { if ((tId + i * blockDim.x) < pxCount) { if (blockIdx.x == int((d_logLuminance[tId + i * blockDim.x] - min_logLum) / lumRange * numBins)) { bin++; } } __syncthreads(); } sh_bin[tId] = bin; for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tId < s) { sh_bin[tId] = sh_bin[tId] + sh_bin[tId + s]; } __syncthreads(); } if (tId == 0) { d_histo[blockIdx.x] = sh_bin[0]; } } // Kernel for calculating the cdf from given histogram M3_DLL __global__ void calcCDF(unsigned int* const d_cdf, unsigned int* d_histo, const size_t numBins, const int cycles) { const int absId = blockDim.x * blockIdx.x + threadIdx.x; const int tId = threadIdx.x; extern __shared__ unsigned int prevSum[]; prevSum[tId] = 0; // calculate the total sum of the values before current block for (int i = 0; i < blockIdx.x; i++) { prevSum[tId] += d_histo[tId + i * blockDim.x]; } __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tId < s) { prevSum[tId] += prevSum[tId + s]; } __syncthreads(); } // calculate the inclusive scan with Hillis Steele scan int j = 1; for (int i = 0; i < cycles; i++) { if ((tId - j) >= 0) { d_histo[absId] += d_histo[absId - j]; } j = j * 2; __syncthreads(); } d_histo[absId] += prevSum[0]; __syncthreads(); // copy the result into a excludive scan array. d_cdf[0] = 0; // this piece of code is error prone to gpu code creating race conditions. // if (absId > 0) { // d_cdf[absId] = d_histo[absId - 1]; //} if (absId < (blockDim.x * gridDim.x - 1)) { d_cdf[absId + 1] = d_histo[absId]; } } M3_DLL void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float& min_logLum, float& max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { // TODO /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum 2) subtract them to find the range 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ const unsigned int pxCount = numRows * numCols; const int pxSize = pxCount * sizeof(float); const int blockSize = 512; const int gridSize = (pxCount + blockSize * 4 - 1) / (blockSize * 4); float *d_min_Lums, *d_max_Lums, *d_logLum_Min, *d_logLum_Max; // std::unique_ptr<float[]> h_min_Lums(new // float[gridSize]);//std::make_unique<float[]>(gridSize); checkCudaErrors(cudaMalloc((void**)&d_min_Lums, gridSize * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_max_Lums, gridSize * sizeof(float))); checkCudaErrors( cudaMalloc((void**)&d_logLum_Min, pxCount * sizeof(float) / 4)); checkCudaErrors( cudaMalloc((void**)&d_logLum_Max, pxCount * sizeof(float) / 4)); // printf("blockSize is %i\n", blockSize); // printf("gridSize is %i\n", gridSize); // printf("pxCount is %i\n", pxCount); // printf("pxSize is %i\n", pxSize); // printf("numBins is %i\n", numBins); findMinMax<<<gridSize, blockSize>>>(d_logLuminance, d_logLum_Min, d_logLum_Max, d_min_Lums, d_max_Lums, pxCount); cudaDeviceSynchronize(); float min_Lums = 10; float max_Lums = -10; float* h_min_Lums = new float[gridSize]; float* h_max_Lums = new float[gridSize]; // copy back arrays of minimum and maximum values for further reduction on cpu // to a single minimum and maximum checkCudaErrors(cudaMemcpy(h_min_Lums, d_min_Lums, gridSize * sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_max_Lums, d_max_Lums, gridSize * sizeof(float), cudaMemcpyDeviceToHost)); for (int i = 0; i < gridSize; i++) { min_Lums = std::min<float>(h_min_Lums[i], min_Lums); max_Lums = std::max<float>(h_max_Lums[i], max_Lums); } min_logLum = min_Lums; max_logLum = max_Lums; float lumRange = max_logLum - min_logLum; // printf("min is %f\n", min_logLum); // printf("max is %f\n", max_logLum); // printf("range is %f\n", lumRange); // unsigned int * h_histo = new unsigned int[numBins]; // unsigned int * h_cdf = new unsigned int[numBins]; // for (int i = 0; i < numBins; i++) { // h_histo[i] = 0; // h_cdf[i] = 0; //} unsigned int* d_histo; // std::vector<unsigned int> binCollect = {}; unsigned int sharedMemSize = blockSize * sizeof(unsigned int); checkCudaErrors(cudaMalloc((void**)&d_histo, numBins * sizeof(unsigned int))); calcHisto<<<numBins, blockSize, sharedMemSize>>>( d_histo, d_logLuminance, min_logLum, lumRange, numBins, pxCount); cudaDeviceSynchronize(); // checkCudaErrors(cudaMemcpy(h_histo, d_histo, numBins * sizeof(unsigned // int), cudaMemcpyDeviceToHost)); // // for (int i = 1; i < numBins; i++) { // h_cdf[i] = h_cdf[i - 1] + h_histo[i - 1]; // printf("cdf_cpu at %i is %i.\n", i, h_cdf[i]); // printf("Histo at %i is %i.\n", i, h_histo[i]); //} // this section of code replace the log2 function that is too expensive. // calculate how many loops the Hillis Steele scan is required for given array // size int cycles = 0; for (int i = numBins; i != 0; i >>= 1) { cycles++; } cycles -= 1; int modCheck = 1 << cycles; if (numBins > modCheck) { cycles += 1; } // same as above but much more expensive // int cycles = int(log2(float(numBins))); // if (int(log2(float(numBins))*10) > int(log2(float(numBins))*10)) { // cycles = int(log2(float(numBins)))+1; //} int cdfGridSize = numBins / blockSize; calcCDF<<<cdfGridSize, blockSize, sharedMemSize>>>( d_cdf, d_histo, numBins, cycles); cudaDeviceSynchronize(); // unsigned int * h_cdf = new unsigned int[numBins]; // checkCudaErrors(cudaMemcpy(h_cdf, d_cdf, numBins * sizeof(unsigned int), // cudaMemcpyDeviceToHost)); // for (int i = 0; i < numBins; i++) { // printf("cdf_gpu at %i is %i.\n", i, h_cdf[i]); //} // delete[] h_min_lums; // delete[] h_max_lums; // delete[] h_histo; // delete[] h_cdf; }
8a20c1168b7b47bbfc9e503b91c7d7163c17f455.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <iostream> #include "data_manager.h" #include "gpu_util.h" #include "gpu_benchmark.h" #define BLOCK_SIZE 32 __global__ void naiveMulKernel(size_t pitch_A, size_t pitch_B, size_t pitch_C, float* cuda_A, float* cuda_B, float* cuda_C, int k); void naiveGPU_gemm_execute(GemmRun<float>* run) { size_t pitch_A, pitch_B, pitch_C, cuda_lda, cuda_ldb, cuda_ldc; float* cuda_A; float* cuda_B; float* cuda_C; init_cuda_matrices(run, &pitch_A, &pitch_B, &pitch_C, &cuda_A, &cuda_B, &cuda_C); cuda_lda = pitch_A / sizeof(float); cuda_ldb = pitch_B / sizeof(float); cuda_ldc = pitch_C / sizeof(float); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(run->n / dimBlock.x, run->m / dimBlock.y); hipLaunchKernelGGL(( naiveMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, cuda_lda, cuda_ldb, cuda_ldc, cuda_A, cuda_B, cuda_C, run->k); hipDeviceSynchronize(); deinit_cuda_matrices(run, pitch_C, cuda_A, cuda_B, cuda_C); } __global__ void naiveMulKernel(size_t pitch_A, size_t pitch_B, size_t pitch_C, float* cuda_A, float* cuda_B, float* cuda_C, int k) { /* Taken from the CUDA Developer Documentation. Section 3.2.3 Shared Memory */ // Each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < k; ++e) Cvalue += cuda_A[row * pitch_A + e] * cuda_B[e * pitch_B + col]; cuda_C[row * pitch_C + col] = Cvalue; }
8a20c1168b7b47bbfc9e503b91c7d7163c17f455.cu
#include <cstdio> #include <iostream> #include "data_manager.h" #include "gpu_util.h" #include "gpu_benchmark.h" #define BLOCK_SIZE 32 __global__ void naiveMulKernel(size_t pitch_A, size_t pitch_B, size_t pitch_C, float* cuda_A, float* cuda_B, float* cuda_C, int k); void naiveGPU_gemm_execute(GemmRun<float>* run) { size_t pitch_A, pitch_B, pitch_C, cuda_lda, cuda_ldb, cuda_ldc; float* cuda_A; float* cuda_B; float* cuda_C; init_cuda_matrices(run, &pitch_A, &pitch_B, &pitch_C, &cuda_A, &cuda_B, &cuda_C); cuda_lda = pitch_A / sizeof(float); cuda_ldb = pitch_B / sizeof(float); cuda_ldc = pitch_C / sizeof(float); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(run->n / dimBlock.x, run->m / dimBlock.y); naiveMulKernel<<<dimGrid, dimBlock>>>(cuda_lda, cuda_ldb, cuda_ldc, cuda_A, cuda_B, cuda_C, run->k); cudaDeviceSynchronize(); deinit_cuda_matrices(run, pitch_C, cuda_A, cuda_B, cuda_C); } __global__ void naiveMulKernel(size_t pitch_A, size_t pitch_B, size_t pitch_C, float* cuda_A, float* cuda_B, float* cuda_C, int k) { /* Taken from the CUDA Developer Documentation. Section 3.2.3 Shared Memory */ // Each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < k; ++e) Cvalue += cuda_A[row * pitch_A + e] * cuda_B[e * pitch_B + col]; cuda_C[row * pitch_C + col] = Cvalue; }
9df4b875fbc047c803944a3411c9e213e60e1d18.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @precisions normal z -> s d c @author Mark Gates */ #include "magma_internal.h" #define NB 64 /* Matrix is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void zsymmetrize_lower( int m, magmaDoubleComplex *dA, int ldda ) { // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaDoubleComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaDoubleComplex *dAend = dA + i*ldda; // end at diagonal dA(i,i) while( dA < dAend ) { *dAT = MAGMA_Z_CONJ(*dA); // upper := lower dA += ldda; dAT += 1; } *dA = MAGMA_Z_MAKE( MAGMA_Z_REAL(*dA), 0 ); // make diagonal real } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void zsymmetrize_upper( int m, magmaDoubleComplex *dA, int ldda ) { // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaDoubleComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaDoubleComplex *dAend = dA + i*ldda; // end at diagonal dA(i,i) while( dA < dAend ) { *dA = MAGMA_Z_CONJ(*dAT); // lower := upper dA += ldda; dAT += 1; } *dA = MAGMA_Z_MAKE( MAGMA_Z_REAL(*dA), 0 ); // make diagonal real } } /***************************************************************************//** Purpose ------- ZSYMMETRIZE copies lower triangle to upper triangle, or vice-versa, to make dA a general representation of a symmetric matrix. In Complex, it sets the diagonal to be Real. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in,out] dA COMPLEX_16 array, dimension (LDDA,N) The m by m matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_symmetrize *******************************************************************************/ extern "C" void magmablas_zsymmetrize( magma_uplo_t uplo, magma_int_t m, magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m) ) info = -4; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 ) return; dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); if ( uplo == MagmaUpper ) { hipLaunchKernelGGL(( zsymmetrize_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda ); } else { hipLaunchKernelGGL(( zsymmetrize_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda ); } }
9df4b875fbc047c803944a3411c9e213e60e1d18.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @precisions normal z -> s d c @author Mark Gates */ #include "magma_internal.h" #define NB 64 /* Matrix is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void zsymmetrize_lower( int m, magmaDoubleComplex *dA, int ldda ) { // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaDoubleComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaDoubleComplex *dAend = dA + i*ldda; // end at diagonal dA(i,i) while( dA < dAend ) { *dAT = MAGMA_Z_CONJ(*dA); // upper := lower dA += ldda; dAT += 1; } *dA = MAGMA_Z_MAKE( MAGMA_Z_REAL(*dA), 0 ); // make diagonal real } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void zsymmetrize_upper( int m, magmaDoubleComplex *dA, int ldda ) { // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaDoubleComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaDoubleComplex *dAend = dA + i*ldda; // end at diagonal dA(i,i) while( dA < dAend ) { *dA = MAGMA_Z_CONJ(*dAT); // lower := upper dA += ldda; dAT += 1; } *dA = MAGMA_Z_MAKE( MAGMA_Z_REAL(*dA), 0 ); // make diagonal real } } /***************************************************************************//** Purpose ------- ZSYMMETRIZE copies lower triangle to upper triangle, or vice-versa, to make dA a general representation of a symmetric matrix. In Complex, it sets the diagonal to be Real. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in,out] dA COMPLEX_16 array, dimension (LDDA,N) The m by m matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_symmetrize *******************************************************************************/ extern "C" void magmablas_zsymmetrize( magma_uplo_t uplo, magma_int_t m, magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m) ) info = -4; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 ) return; dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); if ( uplo == MagmaUpper ) { zsymmetrize_upper<<< grid, threads, 0, queue->cuda_stream() >>>( m, dA, ldda ); } else { zsymmetrize_lower<<< grid, threads, 0, queue->cuda_stream() >>>( m, dA, ldda ); } }
a4ddb6fcfe8e534117ec6b8dc495355bf00c0012.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <unistd.h> #undef CUDA_IFC4C_BENCH #ifdef _OPENMP #include <omp.h> #endif #ifdef __cplusplus extern "C" { #endif #include "integ/ofmo-integ.h" #include "integ/ofmo-ifc4c.h" extern FILE* fp_prof; // from common/ofmo-prof.h #ifdef __cplusplus } #endif #include <hip/hip_runtime.h> #include "cuda-drv.h" #include "cudalib.h" #include "cuda-ifc4c.h" #include "cuda-ifc4c-calc.h" #include "cuda-fmt-m.h" // Default {#block/#SMX, #threads/WARP_SIZE} set for each 2e-type // This array will be converted to actual {#block, #threads} set // in cuda_Init_Sub() by multiplying #SMX and WARP_SIZE, respectively. int dim_ifc4c[][2] = { #if CUDA_ARCH >= 350 { 14, 7}, // ssss { 14, 8}, // ssps { 14, 8}, // sspp { 14, 6}, // ssds { 13, 8}, // ssdp { 14, 6}, // ssdd { 11, 8}, // psss { 11, 6}, // psps { 9, 8}, // pspp { 8, 7}, // psds { 9, 8}, // psdp { 0, 0}, // psdd { 6, 8}, // ppss { 4, 8}, // ppps { 3, 8}, // pppp { 3, 7}, // ppds { 0, 0}, // ppdp { 0, 0}, // ppdd { 5, 6}, // dsss { 6, 8}, // dsps { 5, 8}, // dspp { 5, 8}, // dsds { 5, 8}, // dsdp { 0, 0}, // dsdd { 4, 8}, // dpss { 3, 8}, // dpps // { 3, 8}, // dppp { 0, 0}, // dppp { 3, 7}, // dpds // { 3, 7}, // dpdp { 0, 0}, // dpdp { 0, 0}, // dpdd { 0, 0}, // ddss { 0, 0}, // ddps { 0, 0}, // ddpp { 0, 0}, // ddds { 0, 0}, // dddp { 0, 0}, // dddd #else /* FERMI */ { 12, 8}, // ssss { 12, 8}, // ssps { 12, 8}, // sspp { 12, 8}, // ssds { 12, 8}, // ssdp { 8, 6}, // ssdd { 6, 8}, // psss { 6, 6}, // psps { 6, 6}, // pspp { 6, 6}, // psds { 6, 6}, // psdp { 0, 0}, // psdd { 3, 7}, // ppss { 3, 9}, // ppps { 3, 6}, // pppp // { 0, 0}, { 3, 6}, // ppds { 0, 0}, // ppdp { 0, 0}, // ppdd { 3, 8}, // dsss { 3, 6}, // dsps { 7, 6}, // dspp { 7, 6}, // dsds { 9, 6}, // dsdp // { 0, 0}, { 0, 0}, // dsdd { 3, 6}, // dpss { 3, 6}, // dpps { 3, 6}, // dppp // { 0, 0}, { 3, 6}, // dpds // { 0, 0}, { 3, 7}, // dpdp // { 0, 0}, { 0, 0}, // dpdd { 0, 0}, // ddss { 0, 0}, // ddps { 0, 0}, // ddpp { 0, 0}, // ddds { 0, 0}, // dddp { 0, 0}, // dddd #endif }; int ifc4c_counter_ini_type[] = { // 0 for 0, 1 for nblk, 2 for nblk*NIJCSW 1, 1, 1, 1, 1, 1, // ssxx 1, 1, 1, 1, 1, 1, // psxx 1, 1, 1, 1, 1, 1, // ppxx 1, 1, 1, 1, 1, 1, // dsxx 1, 1, 1, 1, 1, 1, // dpxx 1, 1, 1, 1, 1, 1, // ddxx }; /* ------------------------------------- */ /* ---- ssss ---- */ __host__ int cuda_ifc4c_os_ssss( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; Labcd=0; hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #if 0 #ifdef CUDA_FMT_M_SM Ns = cuda_FMT_m_get_size(0)*sizeof(double); Ns += nthb * 2 * sizeof(double); #else Ns = nthb * 3 * sizeof(double); #endif #ifdef DLB_KL_SSSS Ns += nwarps * sizeof(int); #endif #endif Ns += nthb * sizeof(double); // sV Ns += nthb * sizeof(double); // SSSS #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_ssss) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- ssps ---- */ __host__ int cuda_ifc4c_os_ssps( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(1)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_ssps) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- sspp ---- */ __host__ int cuda_ifc4c_os_sspp( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(2)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_sspp) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- ssds ---- */ __host__ int cuda_ifc4c_os_ssds( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(2)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_ssds) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- ssdp ---- */ __host__ int cuda_ifc4c_os_ssdp( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(3)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_ssdp) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- ssdd ---- */ __host__ int cuda_ifc4c_os_ssdd( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(4)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_ssdd) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- psss ---- */ __host__ int cuda_ifc4c_os_psss( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(1)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif Ns += nwarps * Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_psss) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- psps ---- */ __host__ int cuda_ifc4c_os_psps( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(2)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif Ns += nwarps * Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_psps) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- pspp ---- */ __host__ int cuda_ifc4c_os_pspp( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(3)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif Ns += nwarps * Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_pspp) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- psds ---- */ __host__ int cuda_ifc4c_os_psds( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(3)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif Ns += nwarps * Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_psds) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- psdp ---- */ __host__ int cuda_ifc4c_os_psdp( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(4)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif Ns += nwarps * Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_psdp) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- ppss ---- */ __host__ int cuda_ifc4c_os_ppss( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(2)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_ppss) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- ppps ---- */ __host__ int cuda_ifc4c_os_ppps( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(3)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_ppps) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- pppp ---- */ __host__ int cuda_ifc4c_os_pppp( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(4)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_pppp) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- ppds ---- */ __host__ int cuda_ifc4c_os_ppds( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(4)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_ppds) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- dsss ---- */ __host__ int cuda_ifc4c_os_dsss( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(2)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif Ns += nwarps * Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_dsss) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- dsps ---- */ __host__ int cuda_ifc4c_os_dsps( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(3)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif //Ns += nwarps * Nab * sizeof(double); // sVw Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_dsps) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- dspp ---- */ __host__ int cuda_ifc4c_os_dspp( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(4)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif //Ns += nwarps * Nab * sizeof(double); // sVw Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_dspp) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- dsds ---- */ __host__ int cuda_ifc4c_os_dsds( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(4)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif //Ns += nwarps * Nab * sizeof(double); // sVw Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_dsds) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- dsdp ---- */ __host__ int cuda_ifc4c_os_dsdp( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(5)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif //Ns += nwarps * Nab * sizeof(double); // sVw Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_dsdp) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- dpss ---- */ __host__ int cuda_ifc4c_os_dpss( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(3)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif //Ns += nwarps * Nab * sizeof(double); // sVw Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_dpss) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- dpps ---- */ __host__ int cuda_ifc4c_os_dpps( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(4)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif //Ns += nwarps * Nab * sizeof(double); // sVw Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_dpps) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- dppp ---- */ __host__ int cuda_ifc4c_os_dppp( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(5)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif //Ns += nwarps * Nab * sizeof(double); // sVw Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_dppp) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- dpds ---- */ __host__ int cuda_ifc4c_os_dpds( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(5)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif //Ns += nwarps * Nab * sizeof(double); // sVw Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_dpds) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- dpdp ---- */ __host__ int cuda_ifc4c_os_dpdp( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); hipError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(6)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif //Ns += nwarps * Nab * sizeof(double); // sVw Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif hipLaunchKernelGGL(( gpu_ifc4c_os_dpdp) , dim3(dimGrid), dim3(dimBlock), Ns , 0, nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ static int (*cuda_host_ifc4c_calc_a[])( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) = { // original /*ofmo_ifc4c_ssss__, ofmo_ifc4c_ssps__, ofmo_ifc4c_sspp__, ofmo_ifc4c_ssds__, ofmo_ifc4c_ssdp__, ofmo_ifc4c_ssdd__, ofmo_ifc4c_psss__, ofmo_ifc4c_psps__, ofmo_ifc4c_pspp__, ofmo_ifc4c_psds__, ofmo_ifc4c_psdp__, ofmo_ifc4c_psdd__, ofmo_ifc4c_ppss__, ofmo_ifc4c_ppps__, ofmo_ifc4c_pppp__, ofmo_ifc4c_ppds__, ofmo_ifc4c_ppdp__, ofmo_ifc4c_ppdd__, ofmo_ifc4c_dsss__, ofmo_ifc4c_dsps__, ofmo_ifc4c_dspp__, ofmo_ifc4c_dsds__, ofmo_ifc4c_dsdp__, ofmo_ifc4c_dsdd__, ofmo_ifc4c_dpss__, ofmo_ifc4c_dpps__, ofmo_ifc4c_dppp__, ofmo_ifc4c_dpds__, ofmo_ifc4c_dpdp__, ofmo_ifc4c_dpdd__, ofmo_ifc4c_ddss__, ofmo_ifc4c_ddps__, ofmo_ifc4c_ddpp__, ofmo_ifc4c_ddds__, ofmo_ifc4c_dddp__, ofmo_ifc4c_dddd__,*/ // OS ofmo_ifc4c_os_ssss, ofmo_ifc4c_os_ssps, ofmo_ifc4c_os_sspp, ofmo_ifc4c_os_ssds, ofmo_ifc4c_os_ssdp, ofmo_ifc4c_os_ssdd, ofmo_ifc4c_os_psss, ofmo_ifc4c_os_psps, ofmo_ifc4c_os_pspp, ofmo_ifc4c_os_psds, ofmo_ifc4c_os_psdp, ofmo_ifc4c_os_psdd, ofmo_ifc4c_os_ppss, ofmo_ifc4c_os_ppps, ofmo_ifc4c_os_pppp, ofmo_ifc4c_os_ppds, ofmo_ifc4c_os_ppdp, ofmo_ifc4c_os_ppdd, ofmo_ifc4c_os_dsss, ofmo_ifc4c_os_dsps, ofmo_ifc4c_os_dspp, ofmo_ifc4c_os_dsds, ofmo_ifc4c_os_dsdp, ofmo_ifc4c_os_dsdd, ofmo_ifc4c_os_dpss, ofmo_ifc4c_os_dpps, ofmo_ifc4c_os_dppp, ofmo_ifc4c_os_dpds, ofmo_ifc4c_os_dpdp, ofmo_ifc4c_os_dpdd, ofmo_ifc4c_os_ddss, ofmo_ifc4c_os_ddps, ofmo_ifc4c_os_ddpp, ofmo_ifc4c_os_ddds, ofmo_ifc4c_os_dddp, ofmo_ifc4c_os_dddd, // Rys /*ofmo_ifc4c_rys_ssss, ofmo_ifc4c_rys_ssps, ofmo_ifc4c_rys_sspp, ofmo_ifc4c_rys_ssds, ofmo_ifc4c_rys_ssdp, ofmo_ifc4c_rys_ssdd, ofmo_ifc4c_rys_psss, ofmo_ifc4c_rys_psps, ofmo_ifc4c_rys_pspp, ofmo_ifc4c_rys_psds, ofmo_ifc4c_rys_psdp, ofmo_ifc4c_rys_psdd, ofmo_ifc4c_rys_ppss, ofmo_ifc4c_rys_ppps, ofmo_ifc4c_rys_pppp, ofmo_ifc4c_rys_ppds, ofmo_ifc4c_rys_ppdp, ofmo_ifc4c_rys_ppdd, ofmo_ifc4c_rys_dsss, ofmo_ifc4c_rys_dsps, ofmo_ifc4c_rys_dspp, ofmo_ifc4c_rys_dsds, ofmo_ifc4c_rys_dsdp, ofmo_ifc4c_rys_dsdd, ofmo_ifc4c_rys_dpss, ofmo_ifc4c_rys_dpps, ofmo_ifc4c_rys_dppp, ofmo_ifc4c_rys_dpds, ofmo_ifc4c_rys_dpdp, ofmo_ifc4c_rys_dpdd, ofmo_ifc4c_rys_ddss, ofmo_ifc4c_rys_ddps, ofmo_ifc4c_rys_ddpp, ofmo_ifc4c_rys_ddds, ofmo_ifc4c_rys_dddp, ofmo_ifc4c_rys_dddd,*/ }; static int (*cuda_ifc4c_calc_a[])( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) = { // OS cuda_ifc4c_os_ssss, cuda_ifc4c_os_ssps, cuda_ifc4c_os_sspp, cuda_ifc4c_os_ssds, cuda_ifc4c_os_ssdp, cuda_ifc4c_os_ssdd, cuda_ifc4c_os_psss, cuda_ifc4c_os_psps, cuda_ifc4c_os_pspp, cuda_ifc4c_os_psds, cuda_ifc4c_os_psdp, ofmo_ifc4c_os_psdd, cuda_ifc4c_os_ppss, cuda_ifc4c_os_ppps, cuda_ifc4c_os_pppp, cuda_ifc4c_os_ppds, ofmo_ifc4c_os_ppdp, ofmo_ifc4c_os_ppdd, cuda_ifc4c_os_dsss, cuda_ifc4c_os_dsps, cuda_ifc4c_os_dspp, cuda_ifc4c_os_dsds, cuda_ifc4c_os_dsdp, ofmo_ifc4c_os_dsdd, cuda_ifc4c_os_dpss, cuda_ifc4c_os_dpps, cuda_ifc4c_os_dppp, cuda_ifc4c_os_dpds, cuda_ifc4c_os_dpdp, ofmo_ifc4c_os_dpdd, ofmo_ifc4c_os_ddss, ofmo_ifc4c_os_ddps, ofmo_ifc4c_os_ddpp, ofmo_ifc4c_os_ddds, ofmo_ifc4c_os_dddp, ofmo_ifc4c_os_dddd, }; static char *sifc4c[] = { #if 0 "ssss", "ssps", "sspp", "ssds", "ssdp", "ssdd", "psss", "psps", "pspp", "psds", "psdp", "psdd", "ppss", "ppps", "pppp", "ppds", "ppdp", "ppdd", "dsss", "dsps", "dspp", "dsds", "dsdp", "dsdd", "dpss", "dpps", "dppp", "dpds", "dpdp", "dpdd", "ddss", "ddps", "ddpp", "ddds", "dddp", "dddd", #else "(ss,ss)", "(ss,ps)", "(ss,pp)", "(ss,ds)", "(ss,dp)", "(ss,dd)", "(ps,ss)", "(ps,ps)", "(ps,pp)", "(ps,ds)", "(ps,dp)", "(ps,dd)", "(pp,ss)", "(pp,ps)", "(pp,pp)", "(pp,ds)", "(pp,dp)", "(pp,dd)", "(ds,ss)", "(ds,ps)", "(ds,pp)", "(ds,ds)", "(ds,dp)", "(ds,dd)", "(dp,ss)", "(dp,ps)", "(dp,pp)", "(dp,ds)", "(dp,dp)", "(dp,dd)", "(dd,ss)", "(dd,ps)", "(dd,pp)", "(dd,ds)", "(dd,dp)", "(dd,dd)", #endif }; static double wifc4c[] = { -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, }; void cuda_print_wifc4c(void) { int i; int n = 6*6; int NDEV = cuda_get_numDevice(); // if (NDEV<=0) return; if (CUDA_ME!=0) return; #ifdef CUDA_IFC4C_BENCH #ifdef _OPENMP #pragma omp master #endif { printf("--- wifc4c ---\n"); for (i=0; i<n; i++) { if (wifc4c[i]>=0) { int nb=0, nt=0; if (NDEV>0&&dim_ifc4c[i][0]!=0) { nb = dim_ifc4c[i][0]; nt = dim_ifc4c[i][1]; } // printf("%4s %8.4f (%3d,%3d)\n",sifc4c[i],wifc4c[i],nb,nt); printf("%7s %8.4f (%3d,%3d)\n",sifc4c[i],wifc4c[i],nb,nt); } wifc4c[i]=-1.0e0; } printf("-----------\n"); fflush(stdout); } #endif /* CUDA_IFC4C_BENCH */ } __host__ int cuda_ifc4c_calc( const int idev, // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int NDEV = cuda_get_numDevice(); int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd = Lab*6 + Lcd; #if 0 #pragma omp master { int ijcs0 = leading_cs_pair_frg[Lab]; int ijcs1 = leading_cs_pair_frg[Lab+1]; int klcs0 = leading_cs_pair_mon[Lcd]; int klcs1 = leading_cs_pair_mon[Lcd+1]; int nijcs = ijcs1-ijcs0+1; int nklcs = klcs1-klcs0+1; if (fp_prof) fprintf(fp_prof, "%s: %d-%d\n", sifc4c[Labcd], nijcs, nklcs); } #endif #ifdef CUDA_IFC4C_BENCH #pragma omp master checkCudaErrors(hipDeviceSynchronize()); #pragma omp barrier double w0,w1; #pragma omp master w0 = cuda_Wtime(); #endif /* CUDA_IFC4C_BENCH */ if (NDEV==0||dim_ifc4c[Labcd][0]==0) { if (idev==0) cuda_host_ifc4c_calc_a[Labcd]( pnworkers, pworkerid, pLa, pLb, pLc, pLd, shel_atm_frg, shel_ini_frg, atom_x_frg, atom_y_frg, atom_z_frg, leading_cs_pair_frg, csp_schwarz_frg, csp_ics_frg, csp_jcs_frg, csp_leading_ps_pair_frg, psp_zeta_frg, psp_dkps_frg, psp_xiza_frg, shel_atm_mon, shel_ini_mon, atom_x_mon, atom_y_mon, atom_z_mon, leading_cs_pair_mon, csp_schwarz_mon, csp_ics_mon, csp_jcs_mon, csp_leading_ps_pair_mon, psp_zeta_mon, psp_dkps_mon, psp_xiza_mon, D_mon, V_frg ); } else { if (idev==1) cuda_ifc4c_calc_a[Labcd]( pnworkers, pworkerid, pLa, pLb, pLc, pLd, shel_atm_frg, shel_ini_frg, atom_x_frg, atom_y_frg, atom_z_frg, leading_cs_pair_frg, csp_schwarz_frg, csp_ics_frg, csp_jcs_frg, csp_leading_ps_pair_frg, psp_zeta_frg, psp_dkps_frg, psp_xiza_frg, shel_atm_mon, shel_ini_mon, atom_x_mon, atom_y_mon, atom_z_mon, leading_cs_pair_mon, csp_schwarz_mon, csp_ics_mon, csp_jcs_mon, csp_leading_ps_pair_mon, psp_zeta_mon, psp_dkps_mon, psp_xiza_mon, D_mon, V_frg ); #ifndef CUDA_IFC4C_BENCH } #else checkCudaErrors(hipDeviceSynchronize()); } #pragma omp barrier #pragma omp master { w1 = cuda_Wtime(); if (wifc4c[Labcd]<0) wifc4c[Labcd] = 0.0; wifc4c[Labcd] += w1-w0; } #endif /* CUDA_IFC4C_BENCH */ return 0; }
a4ddb6fcfe8e534117ec6b8dc495355bf00c0012.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <unistd.h> #undef CUDA_IFC4C_BENCH #ifdef _OPENMP #include <omp.h> #endif #ifdef __cplusplus extern "C" { #endif #include "integ/ofmo-integ.h" #include "integ/ofmo-ifc4c.h" extern FILE* fp_prof; // from common/ofmo-prof.h #ifdef __cplusplus } #endif #include <cuda.h> #include "cuda-drv.h" #include "cudalib.h" #include "cuda-ifc4c.h" #include "cuda-ifc4c-calc.h" #include "cuda-fmt-m.h" // Default {#block/#SMX, #threads/WARP_SIZE} set for each 2e-type // This array will be converted to actual {#block, #threads} set // in cuda_Init_Sub() by multiplying #SMX and WARP_SIZE, respectively. int dim_ifc4c[][2] = { #if CUDA_ARCH >= 350 { 14, 7}, // ssss { 14, 8}, // ssps { 14, 8}, // sspp { 14, 6}, // ssds { 13, 8}, // ssdp { 14, 6}, // ssdd { 11, 8}, // psss { 11, 6}, // psps { 9, 8}, // pspp { 8, 7}, // psds { 9, 8}, // psdp { 0, 0}, // psdd { 6, 8}, // ppss { 4, 8}, // ppps { 3, 8}, // pppp { 3, 7}, // ppds { 0, 0}, // ppdp { 0, 0}, // ppdd { 5, 6}, // dsss { 6, 8}, // dsps { 5, 8}, // dspp { 5, 8}, // dsds { 5, 8}, // dsdp { 0, 0}, // dsdd { 4, 8}, // dpss { 3, 8}, // dpps // { 3, 8}, // dppp { 0, 0}, // dppp { 3, 7}, // dpds // { 3, 7}, // dpdp { 0, 0}, // dpdp { 0, 0}, // dpdd { 0, 0}, // ddss { 0, 0}, // ddps { 0, 0}, // ddpp { 0, 0}, // ddds { 0, 0}, // dddp { 0, 0}, // dddd #else /* FERMI */ { 12, 8}, // ssss { 12, 8}, // ssps { 12, 8}, // sspp { 12, 8}, // ssds { 12, 8}, // ssdp { 8, 6}, // ssdd { 6, 8}, // psss { 6, 6}, // psps { 6, 6}, // pspp { 6, 6}, // psds { 6, 6}, // psdp { 0, 0}, // psdd { 3, 7}, // ppss { 3, 9}, // ppps { 3, 6}, // pppp // { 0, 0}, { 3, 6}, // ppds { 0, 0}, // ppdp { 0, 0}, // ppdd { 3, 8}, // dsss { 3, 6}, // dsps { 7, 6}, // dspp { 7, 6}, // dsds { 9, 6}, // dsdp // { 0, 0}, { 0, 0}, // dsdd { 3, 6}, // dpss { 3, 6}, // dpps { 3, 6}, // dppp // { 0, 0}, { 3, 6}, // dpds // { 0, 0}, { 3, 7}, // dpdp // { 0, 0}, { 0, 0}, // dpdd { 0, 0}, // ddss { 0, 0}, // ddps { 0, 0}, // ddpp { 0, 0}, // ddds { 0, 0}, // dddp { 0, 0}, // dddd #endif }; int ifc4c_counter_ini_type[] = { // 0 for 0, 1 for nblk, 2 for nblk*NIJCSW 1, 1, 1, 1, 1, 1, // ssxx 1, 1, 1, 1, 1, 1, // psxx 1, 1, 1, 1, 1, 1, // ppxx 1, 1, 1, 1, 1, 1, // dsxx 1, 1, 1, 1, 1, 1, // dpxx 1, 1, 1, 1, 1, 1, // ddxx }; /* ------------------------------------- */ /* ---- ssss ---- */ __host__ int cuda_ifc4c_os_ssss( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; Labcd=0; cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #if 0 #ifdef CUDA_FMT_M_SM Ns = cuda_FMT_m_get_size(0)*sizeof(double); Ns += nthb * 2 * sizeof(double); #else Ns = nthb * 3 * sizeof(double); #endif #ifdef DLB_KL_SSSS Ns += nwarps * sizeof(int); #endif #endif Ns += nthb * sizeof(double); // sV Ns += nthb * sizeof(double); // SSSS #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_ssss <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- ssps ---- */ __host__ int cuda_ifc4c_os_ssps( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(1)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_ssps <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- sspp ---- */ __host__ int cuda_ifc4c_os_sspp( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(2)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_sspp <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- ssds ---- */ __host__ int cuda_ifc4c_os_ssds( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(2)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_ssds <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- ssdp ---- */ __host__ int cuda_ifc4c_os_ssdp( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(3)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_ssdp <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- ssdd ---- */ __host__ int cuda_ifc4c_os_ssdd( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(4)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_ssdd <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- psss ---- */ __host__ int cuda_ifc4c_os_psss( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(1)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif Ns += nwarps * Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_psss <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- psps ---- */ __host__ int cuda_ifc4c_os_psps( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(2)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif Ns += nwarps * Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_psps <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- pspp ---- */ __host__ int cuda_ifc4c_os_pspp( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(3)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif Ns += nwarps * Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_pspp <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- psds ---- */ __host__ int cuda_ifc4c_os_psds( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(3)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif Ns += nwarps * Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_psds <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- psdp ---- */ __host__ int cuda_ifc4c_os_psdp( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(4)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif Ns += nwarps * Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_psdp <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- ppss ---- */ __host__ int cuda_ifc4c_os_ppss( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(2)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_ppss <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- ppps ---- */ __host__ int cuda_ifc4c_os_ppps( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(3)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_ppps <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- pppp ---- */ __host__ int cuda_ifc4c_os_pppp( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(4)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_pppp <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- ppds ---- */ __host__ int cuda_ifc4c_os_ppds( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(4)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_ppds <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- dsss ---- */ __host__ int cuda_ifc4c_os_dsss( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(2)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif Ns += nwarps * Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_dsss <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- dsps ---- */ __host__ int cuda_ifc4c_os_dsps( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(3)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif //Ns += nwarps * Nab * sizeof(double); // sVw Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_dsps <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- dspp ---- */ __host__ int cuda_ifc4c_os_dspp( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(4)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif //Ns += nwarps * Nab * sizeof(double); // sVw Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_dspp <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- dsds ---- */ __host__ int cuda_ifc4c_os_dsds( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(4)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif //Ns += nwarps * Nab * sizeof(double); // sVw Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_dsds <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- dsdp ---- */ __host__ int cuda_ifc4c_os_dsdp( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(5)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif //Ns += nwarps * Nab * sizeof(double); // sVw Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_dsdp <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- dpss ---- */ __host__ int cuda_ifc4c_os_dpss( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(3)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif //Ns += nwarps * Nab * sizeof(double); // sVw Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_dpss <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- dpps ---- */ __host__ int cuda_ifc4c_os_dpps( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(4)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif //Ns += nwarps * Nab * sizeof(double); // sVw Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_dpps <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- dppp ---- */ __host__ int cuda_ifc4c_os_dppp( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(5)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif //Ns += nwarps * Nab * sizeof(double); // sVw Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_dppp <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- dpds ---- */ __host__ int cuda_ifc4c_os_dpds( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(5)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif //Ns += nwarps * Nab * sizeof(double); // sVw Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_dpds <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ /* ---- dpdp ---- */ __host__ int cuda_ifc4c_os_dpdp( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int nworkers = *pnworkers; int workerid = *pworkerid; int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd=Lab*6+Lcd; int Nab = NNAO(La)*NNAO(Lb); cudaError_t err; int ret = 0; int i; int NDEV = cuda_get_numDevice(); struct dev_Data *dev; int np = cuda_get_Nprocs(); int me = cuda_get_myRank(); int nwks = np * NDEV; int iwk0 = me * NDEV; int nwarps; int nblk,nthb; size_t Ns = 0; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); // nblk = NBLOCKS; // nwarps = NTHREADS/WARP_SIZE; nblk = dim_ifc4c[Labcd][0]; nthb = dim_ifc4c[Labcd][1]; nwarps = nthb/WARP_SIZE; dim3 dimBlock(WARP_SIZE, nwarps); dim3 dimGrid(nblk); #ifdef CUDA_FMT_M_SM Ns += cuda_FMT_m_get_size(6)*sizeof(double); // tbl #endif Ns += nthb * sizeof(double); // sV #ifdef DLB_KL Ns += nwarps * sizeof(int); // klcsw #endif //Ns += nwarps * Nab * sizeof(double); // sVw Ns += Nab * sizeof(double); // sVw if (NDEV<=0) return 2; for (i=0; i<NDEV; i++) { // int iwk = iwk0 + i * NBLOCKS; int iwk = iwk0 + i; dev = cuda_SCF_get_dev_Data(i); cuda_set_Device(i); #ifdef GPU_DLB int c0 = nblk; #endif gpu_ifc4c_os_dpdp <<< dimGrid, dimBlock, Ns >>> (nwks, iwk, eps_eri, eps_ps4, eps_sch); } cuda_set_Device(0); return 0; } /* ------------------------------------- */ static int (*cuda_host_ifc4c_calc_a[])( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) = { // original /*ofmo_ifc4c_ssss__, ofmo_ifc4c_ssps__, ofmo_ifc4c_sspp__, ofmo_ifc4c_ssds__, ofmo_ifc4c_ssdp__, ofmo_ifc4c_ssdd__, ofmo_ifc4c_psss__, ofmo_ifc4c_psps__, ofmo_ifc4c_pspp__, ofmo_ifc4c_psds__, ofmo_ifc4c_psdp__, ofmo_ifc4c_psdd__, ofmo_ifc4c_ppss__, ofmo_ifc4c_ppps__, ofmo_ifc4c_pppp__, ofmo_ifc4c_ppds__, ofmo_ifc4c_ppdp__, ofmo_ifc4c_ppdd__, ofmo_ifc4c_dsss__, ofmo_ifc4c_dsps__, ofmo_ifc4c_dspp__, ofmo_ifc4c_dsds__, ofmo_ifc4c_dsdp__, ofmo_ifc4c_dsdd__, ofmo_ifc4c_dpss__, ofmo_ifc4c_dpps__, ofmo_ifc4c_dppp__, ofmo_ifc4c_dpds__, ofmo_ifc4c_dpdp__, ofmo_ifc4c_dpdd__, ofmo_ifc4c_ddss__, ofmo_ifc4c_ddps__, ofmo_ifc4c_ddpp__, ofmo_ifc4c_ddds__, ofmo_ifc4c_dddp__, ofmo_ifc4c_dddd__,*/ // OS ofmo_ifc4c_os_ssss, ofmo_ifc4c_os_ssps, ofmo_ifc4c_os_sspp, ofmo_ifc4c_os_ssds, ofmo_ifc4c_os_ssdp, ofmo_ifc4c_os_ssdd, ofmo_ifc4c_os_psss, ofmo_ifc4c_os_psps, ofmo_ifc4c_os_pspp, ofmo_ifc4c_os_psds, ofmo_ifc4c_os_psdp, ofmo_ifc4c_os_psdd, ofmo_ifc4c_os_ppss, ofmo_ifc4c_os_ppps, ofmo_ifc4c_os_pppp, ofmo_ifc4c_os_ppds, ofmo_ifc4c_os_ppdp, ofmo_ifc4c_os_ppdd, ofmo_ifc4c_os_dsss, ofmo_ifc4c_os_dsps, ofmo_ifc4c_os_dspp, ofmo_ifc4c_os_dsds, ofmo_ifc4c_os_dsdp, ofmo_ifc4c_os_dsdd, ofmo_ifc4c_os_dpss, ofmo_ifc4c_os_dpps, ofmo_ifc4c_os_dppp, ofmo_ifc4c_os_dpds, ofmo_ifc4c_os_dpdp, ofmo_ifc4c_os_dpdd, ofmo_ifc4c_os_ddss, ofmo_ifc4c_os_ddps, ofmo_ifc4c_os_ddpp, ofmo_ifc4c_os_ddds, ofmo_ifc4c_os_dddp, ofmo_ifc4c_os_dddd, // Rys /*ofmo_ifc4c_rys_ssss, ofmo_ifc4c_rys_ssps, ofmo_ifc4c_rys_sspp, ofmo_ifc4c_rys_ssds, ofmo_ifc4c_rys_ssdp, ofmo_ifc4c_rys_ssdd, ofmo_ifc4c_rys_psss, ofmo_ifc4c_rys_psps, ofmo_ifc4c_rys_pspp, ofmo_ifc4c_rys_psds, ofmo_ifc4c_rys_psdp, ofmo_ifc4c_rys_psdd, ofmo_ifc4c_rys_ppss, ofmo_ifc4c_rys_ppps, ofmo_ifc4c_rys_pppp, ofmo_ifc4c_rys_ppds, ofmo_ifc4c_rys_ppdp, ofmo_ifc4c_rys_ppdd, ofmo_ifc4c_rys_dsss, ofmo_ifc4c_rys_dsps, ofmo_ifc4c_rys_dspp, ofmo_ifc4c_rys_dsds, ofmo_ifc4c_rys_dsdp, ofmo_ifc4c_rys_dsdd, ofmo_ifc4c_rys_dpss, ofmo_ifc4c_rys_dpps, ofmo_ifc4c_rys_dppp, ofmo_ifc4c_rys_dpds, ofmo_ifc4c_rys_dpdp, ofmo_ifc4c_rys_dpdd, ofmo_ifc4c_rys_ddss, ofmo_ifc4c_rys_ddps, ofmo_ifc4c_rys_ddpp, ofmo_ifc4c_rys_ddds, ofmo_ifc4c_rys_dddp, ofmo_ifc4c_rys_dddd,*/ }; static int (*cuda_ifc4c_calc_a[])( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) = { // OS cuda_ifc4c_os_ssss, cuda_ifc4c_os_ssps, cuda_ifc4c_os_sspp, cuda_ifc4c_os_ssds, cuda_ifc4c_os_ssdp, cuda_ifc4c_os_ssdd, cuda_ifc4c_os_psss, cuda_ifc4c_os_psps, cuda_ifc4c_os_pspp, cuda_ifc4c_os_psds, cuda_ifc4c_os_psdp, ofmo_ifc4c_os_psdd, cuda_ifc4c_os_ppss, cuda_ifc4c_os_ppps, cuda_ifc4c_os_pppp, cuda_ifc4c_os_ppds, ofmo_ifc4c_os_ppdp, ofmo_ifc4c_os_ppdd, cuda_ifc4c_os_dsss, cuda_ifc4c_os_dsps, cuda_ifc4c_os_dspp, cuda_ifc4c_os_dsds, cuda_ifc4c_os_dsdp, ofmo_ifc4c_os_dsdd, cuda_ifc4c_os_dpss, cuda_ifc4c_os_dpps, cuda_ifc4c_os_dppp, cuda_ifc4c_os_dpds, cuda_ifc4c_os_dpdp, ofmo_ifc4c_os_dpdd, ofmo_ifc4c_os_ddss, ofmo_ifc4c_os_ddps, ofmo_ifc4c_os_ddpp, ofmo_ifc4c_os_ddds, ofmo_ifc4c_os_dddp, ofmo_ifc4c_os_dddd, }; static char *sifc4c[] = { #if 0 "ssss", "ssps", "sspp", "ssds", "ssdp", "ssdd", "psss", "psps", "pspp", "psds", "psdp", "psdd", "ppss", "ppps", "pppp", "ppds", "ppdp", "ppdd", "dsss", "dsps", "dspp", "dsds", "dsdp", "dsdd", "dpss", "dpps", "dppp", "dpds", "dpdp", "dpdd", "ddss", "ddps", "ddpp", "ddds", "dddp", "dddd", #else "(ss,ss)", "(ss,ps)", "(ss,pp)", "(ss,ds)", "(ss,dp)", "(ss,dd)", "(ps,ss)", "(ps,ps)", "(ps,pp)", "(ps,ds)", "(ps,dp)", "(ps,dd)", "(pp,ss)", "(pp,ps)", "(pp,pp)", "(pp,ds)", "(pp,dp)", "(pp,dd)", "(ds,ss)", "(ds,ps)", "(ds,pp)", "(ds,ds)", "(ds,dp)", "(ds,dd)", "(dp,ss)", "(dp,ps)", "(dp,pp)", "(dp,ds)", "(dp,dp)", "(dp,dd)", "(dd,ss)", "(dd,ps)", "(dd,pp)", "(dd,ds)", "(dd,dp)", "(dd,dd)", #endif }; static double wifc4c[] = { -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, }; void cuda_print_wifc4c(void) { int i; int n = 6*6; int NDEV = cuda_get_numDevice(); // if (NDEV<=0) return; if (CUDA_ME!=0) return; #ifdef CUDA_IFC4C_BENCH #ifdef _OPENMP #pragma omp master #endif { printf("--- wifc4c ---\n"); for (i=0; i<n; i++) { if (wifc4c[i]>=0) { int nb=0, nt=0; if (NDEV>0&&dim_ifc4c[i][0]!=0) { nb = dim_ifc4c[i][0]; nt = dim_ifc4c[i][1]; } // printf("%4s %8.4f (%3d,%3d)\n",sifc4c[i],wifc4c[i],nb,nt); printf("%7s %8.4f (%3d,%3d)\n",sifc4c[i],wifc4c[i],nb,nt); } wifc4c[i]=-1.0e0; } printf("-----------\n"); fflush(stdout); } #endif /* CUDA_IFC4C_BENCH */ } __host__ int cuda_ifc4c_calc( const int idev, // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int NDEV = cuda_get_numDevice(); int La = *pLa, Lb = *pLb, Lc = *pLc, Ld = *pLd; int Lab = La*(La+1)/2 + Lb; int Lcd = Lc*(Lc+1)/2 + Ld; int Labcd = Lab*6 + Lcd; #if 0 #pragma omp master { int ijcs0 = leading_cs_pair_frg[Lab]; int ijcs1 = leading_cs_pair_frg[Lab+1]; int klcs0 = leading_cs_pair_mon[Lcd]; int klcs1 = leading_cs_pair_mon[Lcd+1]; int nijcs = ijcs1-ijcs0+1; int nklcs = klcs1-klcs0+1; if (fp_prof) fprintf(fp_prof, "%s: %d-%d\n", sifc4c[Labcd], nijcs, nklcs); } #endif #ifdef CUDA_IFC4C_BENCH #pragma omp master checkCudaErrors(cudaDeviceSynchronize()); #pragma omp barrier double w0,w1; #pragma omp master w0 = cuda_Wtime(); #endif /* CUDA_IFC4C_BENCH */ if (NDEV==0||dim_ifc4c[Labcd][0]==0) { if (idev==0) cuda_host_ifc4c_calc_a[Labcd]( pnworkers, pworkerid, pLa, pLb, pLc, pLd, shel_atm_frg, shel_ini_frg, atom_x_frg, atom_y_frg, atom_z_frg, leading_cs_pair_frg, csp_schwarz_frg, csp_ics_frg, csp_jcs_frg, csp_leading_ps_pair_frg, psp_zeta_frg, psp_dkps_frg, psp_xiza_frg, shel_atm_mon, shel_ini_mon, atom_x_mon, atom_y_mon, atom_z_mon, leading_cs_pair_mon, csp_schwarz_mon, csp_ics_mon, csp_jcs_mon, csp_leading_ps_pair_mon, psp_zeta_mon, psp_dkps_mon, psp_xiza_mon, D_mon, V_frg ); } else { if (idev==1) cuda_ifc4c_calc_a[Labcd]( pnworkers, pworkerid, pLa, pLb, pLc, pLd, shel_atm_frg, shel_ini_frg, atom_x_frg, atom_y_frg, atom_z_frg, leading_cs_pair_frg, csp_schwarz_frg, csp_ics_frg, csp_jcs_frg, csp_leading_ps_pair_frg, psp_zeta_frg, psp_dkps_frg, psp_xiza_frg, shel_atm_mon, shel_ini_mon, atom_x_mon, atom_y_mon, atom_z_mon, leading_cs_pair_mon, csp_schwarz_mon, csp_ics_mon, csp_jcs_mon, csp_leading_ps_pair_mon, psp_zeta_mon, psp_dkps_mon, psp_xiza_mon, D_mon, V_frg ); #ifndef CUDA_IFC4C_BENCH } #else checkCudaErrors(cudaDeviceSynchronize()); } #pragma omp barrier #pragma omp master { w1 = cuda_Wtime(); if (wifc4c[Labcd]<0) wifc4c[Labcd] = 0.0; wifc4c[Labcd] += w1-w0; } #endif /* CUDA_IFC4C_BENCH */ return 0; }
616c8188b2a09adcc76503e32a499ddfb3d0b7c1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "yolov5_face_plugin.h" #include "stdio.h" #include <iostream> #include <cassert> #include <memory> #include<math.h> #ifndef CUDA_CHECK #define CUDA_CHECK(callstr) \ { \ hipError_t error_code = callstr; \ if (error_code != hipSuccess) { \ std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \ assert(0); \ } \ } #endif namespace nvinfer1 { yolov5FacePlugin::yolov5FacePlugin() { conf_thresh = yolov5FaceConfig::CONF_THRESH; refer_rows_1 = 3 * yolov5FaceConfig::INPUT_H * yolov5FaceConfig::INPUT_W / (8.0 * 8.0); refer_rows_2 = refer_rows_1 + 3 * yolov5FaceConfig::INPUT_H * yolov5FaceConfig::INPUT_W / (16.0 * 16.0); refer_rows_3 = refer_rows_2 + 3 * yolov5FaceConfig::INPUT_H * yolov5FaceConfig::INPUT_W / (32.0 * 32.0); std::cout<<"init decode plugin" <<std::endl; } yolov5FacePlugin::~yolov5FacePlugin() { std::cout<<"destroy yolov5_face plugin"<<std::endl; } // create the plugin at runtime from a byte stream yolov5FacePlugin::yolov5FacePlugin(const void* data, size_t length) { } void yolov5FacePlugin::serialize(void* buffer) const { } size_t yolov5FacePlugin::getSerializationSize() const { return 0; } int yolov5FacePlugin::initialize() { return 0; } Dims yolov5FacePlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) { //output the result to channel int totalsize = yolov5FaceConfig::MAX_OUT * sizeof(yolov5FaceConfig::FaceBox) / sizeof(float); return Dims3(totalsize + 1, 1, 1); } // Set plugin namespace void yolov5FacePlugin::setPluginNamespace(const char* pluginNamespace) { mPluginNamespace = pluginNamespace; } const char* yolov5FacePlugin::getPluginNamespace() const { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType yolov5FacePlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool yolov5FacePlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool yolov5FacePlugin::canBroadcastInputAcrossBatch(int inputIndex) const { return false; } void yolov5FacePlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) { } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void yolov5FacePlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) { } // Detach the plugin object from its execution context. void yolov5FacePlugin::detachFromContext() {} const char* yolov5FacePlugin::getPluginType() const { return "YOLOV5FACE_TRT"; } const char* yolov5FacePlugin::getPluginVersion() const { return "1"; } void yolov5FacePlugin::destroy() { delete this; } // Clone the plugin IPluginV2IOExt* yolov5FacePlugin::clone() const { yolov5FacePlugin *p = new yolov5FacePlugin(); p->setPluginNamespace(mPluginNamespace); return p; } __device__ float Logist(float data){ return 1./(1. + expf(-data)); }; //sigmoid func __device__ float dev_anchors_grid[3][6] = {4,5, 8,10, 13,16, 23,29, 43,55, 73,105, 146,217, 231,300, 335,433}; __device__ int dev_strides[3] = {8, 16, 32}; //, 18900 * 16 __global__ void CalDetection(const float *input, float *output, int refer_rows_1, int refer_rows_2, int refer_rows_3, int num_elem, int output_elem) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= num_elem) return; const float* curInput = input + idx * 16; //, 16 int bn_idx = idx / refer_rows_3; //batch_id, total_grid=18900 idx %= refer_rows_3; //offset idx in batch_id // float cur_conf = Logist(*(curInput + 4)); if (cur_conf <= yolov5FaceConfig::CONF_THRESH) //0.3 return; //printf("ori_val: %f, conf: %f, thresh: %f\n", *(curInput + 4), cur_conf, yolov5FaceConfig::CONF_THRESH); float *res_count = output + bn_idx * output_elem; int count = (int)atomicAdd(res_count, 1); // if (count >= yolov5FaceConfig::MAX_OUT) return; // int layer_idx = 0; if (idx > refer_rows_2) { layer_idx = 2; idx -= refer_rows_2; } else if (idx > refer_rows_1) { layer_idx = 1; idx -= refer_rows_1; } int cur_stride = dev_strides[layer_idx]; int cur_w = yolov5FaceConfig::INPUT_W / cur_stride; int cur_h = yolov5FaceConfig::INPUT_H / cur_stride; int h_idx_all = idx / cur_w; //hidx int h_idx = h_idx_all % cur_h; //grid int anchor_idx = (h_idx_all / cur_h) % 3; //anchor int w_idx = idx % cur_w; // // char* data = (char *)res_count + sizeof(float) + count * sizeof(yolov5FaceConfig::FaceBox); yolov5FaceConfig::FaceBox* det = (yolov5FaceConfig::FaceBox*)(data); //xywhs c float bw = pow((Logist(*(curInput + 2)) * 2), 2) * dev_anchors_grid[layer_idx][2 * anchor_idx]; //w float bh = pow((Logist(*(curInput + 3)) * 2), 2) * dev_anchors_grid[layer_idx][2 * anchor_idx + 1]; //h det->bbox[0] = (Logist(*(curInput)) * 2. - 0.5 + w_idx) * cur_stride - bw / 2.0; //x1 det->bbox[1] = (Logist(*(curInput + 1)) * 2. - 0.5 + h_idx) * cur_stride - bh / 2.0; //y1 det->bbox[2] = det->bbox[0] + bw; //x2 det->bbox[3] = det->bbox[1] + bh; //y2 det->bbox[4] = cur_conf * Logist(*(curInput+ 15)); //score det->conf = cur_conf; //landmarks x1 y1 -> x5 y5 det->landmarks[0] = (*(curInput+ 5)) * dev_anchors_grid[layer_idx][2 * anchor_idx] + w_idx * cur_stride; det->landmarks[1] = (*(curInput+ 6)) * dev_anchors_grid[layer_idx][2 * anchor_idx + 1] + h_idx * cur_stride; det->landmarks[2] = (*(curInput+ 7)) * dev_anchors_grid[layer_idx][2 * anchor_idx] + w_idx * cur_stride; det->landmarks[3] = (*(curInput+ 8)) * dev_anchors_grid[layer_idx][2 * anchor_idx + 1] + h_idx * cur_stride; det->landmarks[4] = (*(curInput+ 9)) * dev_anchors_grid[layer_idx][2 * anchor_idx] + w_idx * cur_stride; det->landmarks[5] = (*(curInput+ 10)) * dev_anchors_grid[layer_idx][2 * anchor_idx + 1] + h_idx * cur_stride; det->landmarks[6] = (*(curInput+ 11)) * dev_anchors_grid[layer_idx][2 * anchor_idx] + w_idx * cur_stride; det->landmarks[7] = (*(curInput+ 12)) * dev_anchors_grid[layer_idx][2 * anchor_idx + 1] + h_idx * cur_stride; det->landmarks[8] = (*(curInput+ 13)) * dev_anchors_grid[layer_idx][2 * anchor_idx] + w_idx * cur_stride; det->landmarks[9] = (*(curInput+ 14)) * dev_anchors_grid[layer_idx][2 * anchor_idx + 1] + h_idx * cur_stride; } void yolov5FacePlugin::forwardGpu(const float *const * inputs, float * output, hipStream_t stream, int batchSize) { int outputElem = 1 + yolov5FaceConfig::MAX_OUT * sizeof(yolov5FaceConfig::FaceBox) / sizeof(float); for (int idx = 0; idx < batchSize; ++idx) { CUDA_CHECK(hipMemset(output + idx * outputElem, 0, sizeof(float))); //set total_num=0 } int total_num_elem = refer_rows_3 * batchSize; //std::cout<<"total_num_elem: "<<total_num_elem << "row num: "<<row_num<<" batchsize:" << batchSize <<std::endl; CalDetection << < (total_num_elem + thread_count_ - 1) / thread_count_, thread_count_ , 0, stream >> > (inputs[0], output, refer_rows_1, refer_rows_2, refer_rows_3, total_num_elem, outputElem); } int yolov5FacePlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream) { //GPU //CUDA_CHECK(hipStreamSynchronize(stream)); forwardGpu((const float *const *)inputs, (float *)outputs[0], stream, batchSize); return 0; }; PluginFieldCollection yolov5FacePluginCreator::mFC{}; std::vector<PluginField> yolov5FacePluginCreator::mPluginAttributes; yolov5FacePluginCreator::yolov5FacePluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* yolov5FacePluginCreator::getPluginName() const { return "YOLOV5FACE_TRT"; } const char* yolov5FacePluginCreator::getPluginVersion() const { return "1"; } const PluginFieldCollection* yolov5FacePluginCreator::getFieldNames() { return &mFC; } IPluginV2IOExt* yolov5FacePluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) { yolov5FacePlugin* obj = new yolov5FacePlugin(); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* yolov5FacePluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) { // This object will be deleted when the network is destroyed, which will // call PReluPlugin::destroy() yolov5FacePlugin* obj = new yolov5FacePlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } } // __global__ void yolov5FacePreprocessKernel(const unsigned char*src,int srcWidth,int srcHeight,int srcPitch,float *dst, int dstWidth,int dstHeight, int write_x, int write_y, float resize_w, float resize_h) { double srcXf; double srcYf; int srcX; int srcY; double u; double v; int dstOffset; int y = blockIdx.y*blockDim.y+threadIdx.y; int x = blockIdx.x*blockDim.x+threadIdx.x; if(x>=dstWidth || y>=dstHeight) return; //int write_x,write_y; //float resize_w,resize_h; double r_w = dstWidth / (srcWidth*1.0); double r_h = dstHeight / (srcHeight*1.0); if (r_h > r_w) { resize_w = dstWidth; resize_h = r_w * srcHeight; write_x = 0; write_y = (dstHeight - resize_h) / 2; } else { resize_w = r_h * srcWidth; resize_h = dstHeight; write_x = (dstWidth - resize_w) / 2; write_y = 0; } if((x >= write_x) && (x < write_x + resize_w) && (y >= write_y) && (y < write_y + resize_h)) { srcXf= (x - write_x) * ((float)(srcWidth/resize_w)) ; srcYf = (y - write_y) * ((float)(srcHeight/resize_h)); srcX = (int)srcXf; srcY = (int)srcYf; u= srcXf - srcX; v = srcYf - srcY; //r chanel if(y*dstWidth+x >= dstWidth*dstHeight) { return; } if(srcY*srcPitch+srcX >= srcPitch*srcHeight || (srcY+1)*srcPitch+srcX >= srcPitch*srcHeight || srcY*srcPitch+(srcX+1) >= srcPitch*srcHeight || (srcY+1)*srcPitch+(srcX+1) >= srcPitch*srcHeight) { return; } dstOffset =(y*dstWidth+x) + 2 * dstWidth * dstHeight; if(dstOffset >= dstWidth * dstHeight * 3) { return; } dst[dstOffset] = 0; dst[dstOffset]+=(1-u)*(1-v)*src[srcY*srcPitch+srcX*3]; dst[dstOffset]+=(1-u)*v*src[(srcY+1)*srcPitch+srcX*3]; dst[dstOffset]+=u*(1-v)*src[srcY*srcPitch+(srcX+1)*3]; dst[dstOffset]+= u*v*src[(srcY+1)*srcPitch+(srcX+1)*3]; dst[dstOffset] = dst[dstOffset] / 255.0; //g chanel dstOffset =(y*dstWidth+x) + dstWidth * dstHeight; if(dstOffset >= dstWidth * dstHeight * 3) { return; } dst[dstOffset] = 0; dst[dstOffset]+=(1-u)*(1-v)*src[srcY*srcPitch+srcX*3+1]; dst[dstOffset]+=(1-u)*v*src[(srcY+1)*srcPitch+srcX*3+1]; dst[dstOffset]+=u*(1-v)*src[srcY*srcPitch+(srcX+1)*3+1]; dst[dstOffset]+= u*v*src[(srcY+1)*srcPitch+(srcX+1)*3+1]; dst[dstOffset] = dst[dstOffset] / 255.0; //b chanel dstOffset =(y*dstWidth+x) ; if(dstOffset >= dstWidth * dstHeight * 3) { return; } dst[dstOffset] = 0; dst[dstOffset]+=(1-u)*(1-v)*src[srcY*srcPitch+srcX*3+2]; dst[dstOffset]+=(1-u)*v*src[(srcY+1)*srcPitch+srcX*3+2]; dst[dstOffset]+=u*(1-v)*src[srcY*srcPitch+(srcX+1)*3+2]; dst[dstOffset]+= u*v*src[(srcY+1)*srcPitch+(srcX+1)*3+2]; dst[dstOffset] = dst[dstOffset] / 255.0; } else { if(y*dstWidth+x >= dstWidth*dstHeight) { return; } //r chanel int dstOffset =(y*dstWidth+x) + 2 * dstWidth * dstHeight; if(dstOffset >= dstWidth * dstHeight * 3) { return; } dst[dstOffset] = 128; dst[dstOffset] = dst[dstOffset] / 255.0; //g chanel dstOffset =(y*dstWidth+x) + dstWidth * dstHeight; if(dstOffset >= dstWidth * dstHeight * 3) { return; } dst[dstOffset] = 128; dst[dstOffset] = dst[dstOffset] / 255.0; //b chanel dstOffset =(y*dstWidth+x); if(dstOffset >= dstWidth * dstHeight * 3) { return; } dst[dstOffset] = 128; dst[dstOffset] = dst[dstOffset] / 255.0; } } void yolov5FacePreprocess(const unsigned char*src,int srcWidth,int srcHeight,int srcPitch, float* dst,int dstWidth, int dstHeight, int write_x, int write_y, float resize_w, float resize_h, hipStream_t stream) { int uint = 16; dim3 grid((dstWidth+uint-1)/uint,(dstHeight+uint-1)/uint); dim3 block(uint,uint); hipLaunchKernelGGL(( yolov5FacePreprocessKernel), dim3(grid),dim3(block),0,stream, src, srcWidth, srcHeight,srcPitch,dst, dstWidth, dstHeight, write_x, write_y, resize_w, resize_h); }
616c8188b2a09adcc76503e32a499ddfb3d0b7c1.cu
#include "yolov5_face_plugin.h" #include "stdio.h" #include <iostream> #include <cassert> #include <memory> #include<math.h> #ifndef CUDA_CHECK #define CUDA_CHECK(callstr) \ { \ cudaError_t error_code = callstr; \ if (error_code != cudaSuccess) { \ std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \ assert(0); \ } \ } #endif namespace nvinfer1 { yolov5FacePlugin::yolov5FacePlugin() { conf_thresh = yolov5FaceConfig::CONF_THRESH; refer_rows_1 = 3 * yolov5FaceConfig::INPUT_H * yolov5FaceConfig::INPUT_W / (8.0 * 8.0); refer_rows_2 = refer_rows_1 + 3 * yolov5FaceConfig::INPUT_H * yolov5FaceConfig::INPUT_W / (16.0 * 16.0); refer_rows_3 = refer_rows_2 + 3 * yolov5FaceConfig::INPUT_H * yolov5FaceConfig::INPUT_W / (32.0 * 32.0); std::cout<<"init decode plugin" <<std::endl; } yolov5FacePlugin::~yolov5FacePlugin() { std::cout<<"destroy yolov5_face plugin"<<std::endl; } // create the plugin at runtime from a byte stream yolov5FacePlugin::yolov5FacePlugin(const void* data, size_t length) { } void yolov5FacePlugin::serialize(void* buffer) const { } size_t yolov5FacePlugin::getSerializationSize() const { return 0; } int yolov5FacePlugin::initialize() { return 0; } Dims yolov5FacePlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) { //output the result to channel int totalsize = yolov5FaceConfig::MAX_OUT * sizeof(yolov5FaceConfig::FaceBox) / sizeof(float); return Dims3(totalsize + 1, 1, 1); } // Set plugin namespace void yolov5FacePlugin::setPluginNamespace(const char* pluginNamespace) { mPluginNamespace = pluginNamespace; } const char* yolov5FacePlugin::getPluginNamespace() const { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType yolov5FacePlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool yolov5FacePlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool yolov5FacePlugin::canBroadcastInputAcrossBatch(int inputIndex) const { return false; } void yolov5FacePlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) { } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void yolov5FacePlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) { } // Detach the plugin object from its execution context. void yolov5FacePlugin::detachFromContext() {} const char* yolov5FacePlugin::getPluginType() const { return "YOLOV5FACE_TRT"; } const char* yolov5FacePlugin::getPluginVersion() const { return "1"; } void yolov5FacePlugin::destroy() { delete this; } // Clone the plugin IPluginV2IOExt* yolov5FacePlugin::clone() const { yolov5FacePlugin *p = new yolov5FacePlugin(); p->setPluginNamespace(mPluginNamespace); return p; } __device__ float Logist(float data){ return 1./(1. + expf(-data)); }; //sigmoid func __device__ float dev_anchors_grid[3][6] = {4,5, 8,10, 13,16, 23,29, 43,55, 73,105, 146,217, 231,300, 335,433}; __device__ int dev_strides[3] = {8, 16, 32}; //计算, 输入大小 18900 * 16 __global__ void CalDetection(const float *input, float *output, int refer_rows_1, int refer_rows_2, int refer_rows_3, int num_elem, int output_elem) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= num_elem) return; const float* curInput = input + idx * 16; //每行第一个, 16暂时写死 int bn_idx = idx / refer_rows_3; //batch_id, total_grid=18900 idx %= refer_rows_3; //offset idx in batch_id // 过滤置信度 float cur_conf = Logist(*(curInput + 4)); if (cur_conf <= yolov5FaceConfig::CONF_THRESH) //0.3 return; //printf("ori_val: %f, conf: %f, thresh: %f\n", *(curInput + 4), cur_conf, yolov5FaceConfig::CONF_THRESH); float *res_count = output + bn_idx * output_elem; int count = (int)atomicAdd(res_count, 1); //总数累加 if (count >= yolov5FaceConfig::MAX_OUT) return; //判断位于哪个层 int layer_idx = 0; if (idx > refer_rows_2) { layer_idx = 2; idx -= refer_rows_2; } else if (idx > refer_rows_1) { layer_idx = 1; idx -= refer_rows_1; } int cur_stride = dev_strides[layer_idx]; int cur_w = yolov5FaceConfig::INPUT_W / cur_stride; int cur_h = yolov5FaceConfig::INPUT_H / cur_stride; int h_idx_all = idx / cur_w; //h方向堆叠的全局idx int h_idx = h_idx_all % cur_h; //grid范围内的索引 int anchor_idx = (h_idx_all / cur_h) % 3; //考虑归属哪一个anchor int w_idx = idx % cur_w; //横向对应的索引 //构造检测框 char* data = (char *)res_count + sizeof(float) + count * sizeof(yolov5FaceConfig::FaceBox); yolov5FaceConfig::FaceBox* det = (yolov5FaceConfig::FaceBox*)(data); //xywhs c float bw = pow((Logist(*(curInput + 2)) * 2), 2) * dev_anchors_grid[layer_idx][2 * anchor_idx]; //w float bh = pow((Logist(*(curInput + 3)) * 2), 2) * dev_anchors_grid[layer_idx][2 * anchor_idx + 1]; //h det->bbox[0] = (Logist(*(curInput)) * 2. - 0.5 + w_idx) * cur_stride - bw / 2.0; //x1 det->bbox[1] = (Logist(*(curInput + 1)) * 2. - 0.5 + h_idx) * cur_stride - bh / 2.0; //y1 det->bbox[2] = det->bbox[0] + bw; //x2 det->bbox[3] = det->bbox[1] + bh; //y2 det->bbox[4] = cur_conf * Logist(*(curInput+ 15)); //score det->conf = cur_conf; //landmarks x1 y1 -> x5 y5 det->landmarks[0] = (*(curInput+ 5)) * dev_anchors_grid[layer_idx][2 * anchor_idx] + w_idx * cur_stride; det->landmarks[1] = (*(curInput+ 6)) * dev_anchors_grid[layer_idx][2 * anchor_idx + 1] + h_idx * cur_stride; det->landmarks[2] = (*(curInput+ 7)) * dev_anchors_grid[layer_idx][2 * anchor_idx] + w_idx * cur_stride; det->landmarks[3] = (*(curInput+ 8)) * dev_anchors_grid[layer_idx][2 * anchor_idx + 1] + h_idx * cur_stride; det->landmarks[4] = (*(curInput+ 9)) * dev_anchors_grid[layer_idx][2 * anchor_idx] + w_idx * cur_stride; det->landmarks[5] = (*(curInput+ 10)) * dev_anchors_grid[layer_idx][2 * anchor_idx + 1] + h_idx * cur_stride; det->landmarks[6] = (*(curInput+ 11)) * dev_anchors_grid[layer_idx][2 * anchor_idx] + w_idx * cur_stride; det->landmarks[7] = (*(curInput+ 12)) * dev_anchors_grid[layer_idx][2 * anchor_idx + 1] + h_idx * cur_stride; det->landmarks[8] = (*(curInput+ 13)) * dev_anchors_grid[layer_idx][2 * anchor_idx] + w_idx * cur_stride; det->landmarks[9] = (*(curInput+ 14)) * dev_anchors_grid[layer_idx][2 * anchor_idx + 1] + h_idx * cur_stride; } void yolov5FacePlugin::forwardGpu(const float *const * inputs, float * output, cudaStream_t stream, int batchSize) { int outputElem = 1 + yolov5FaceConfig::MAX_OUT * sizeof(yolov5FaceConfig::FaceBox) / sizeof(float); for (int idx = 0; idx < batchSize; ++idx) { CUDA_CHECK(cudaMemset(output + idx * outputElem, 0, sizeof(float))); //set total_num=0 } int total_num_elem = refer_rows_3 * batchSize; //std::cout<<"total_num_elem: "<<total_num_elem << "row num: "<<row_num<<" batchsize:" << batchSize <<std::endl; CalDetection << < (total_num_elem + thread_count_ - 1) / thread_count_, thread_count_ , 0, stream >> > (inputs[0], output, refer_rows_1, refer_rows_2, refer_rows_3, total_num_elem, outputElem); } int yolov5FacePlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream) { //GPU //CUDA_CHECK(cudaStreamSynchronize(stream)); forwardGpu((const float *const *)inputs, (float *)outputs[0], stream, batchSize); return 0; }; PluginFieldCollection yolov5FacePluginCreator::mFC{}; std::vector<PluginField> yolov5FacePluginCreator::mPluginAttributes; yolov5FacePluginCreator::yolov5FacePluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* yolov5FacePluginCreator::getPluginName() const { return "YOLOV5FACE_TRT"; } const char* yolov5FacePluginCreator::getPluginVersion() const { return "1"; } const PluginFieldCollection* yolov5FacePluginCreator::getFieldNames() { return &mFC; } IPluginV2IOExt* yolov5FacePluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) { yolov5FacePlugin* obj = new yolov5FacePlugin(); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* yolov5FacePluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) { // This object will be deleted when the network is destroyed, which will // call PReluPlugin::destroy() yolov5FacePlugin* obj = new yolov5FacePlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } } //图像预处理 __global__ void yolov5FacePreprocessKernel(const unsigned char*src,int srcWidth,int srcHeight,int srcPitch,float *dst, int dstWidth,int dstHeight, int write_x, int write_y, float resize_w, float resize_h) { double srcXf; double srcYf; int srcX; int srcY; double u; double v; int dstOffset; int y = blockIdx.y*blockDim.y+threadIdx.y; int x = blockIdx.x*blockDim.x+threadIdx.x; if(x>=dstWidth || y>=dstHeight) return; //int write_x,write_y; //float resize_w,resize_h; double r_w = dstWidth / (srcWidth*1.0); double r_h = dstHeight / (srcHeight*1.0); if (r_h > r_w) { resize_w = dstWidth; resize_h = r_w * srcHeight; write_x = 0; write_y = (dstHeight - resize_h) / 2; } else { resize_w = r_h * srcWidth; resize_h = dstHeight; write_x = (dstWidth - resize_w) / 2; write_y = 0; } if((x >= write_x) && (x < write_x + resize_w) && (y >= write_y) && (y < write_y + resize_h)) { srcXf= (x - write_x) * ((float)(srcWidth/resize_w)) ; srcYf = (y - write_y) * ((float)(srcHeight/resize_h)); srcX = (int)srcXf; srcY = (int)srcYf; u= srcXf - srcX; v = srcYf - srcY; //r chanel if(y*dstWidth+x >= dstWidth*dstHeight) { return; } if(srcY*srcPitch+srcX >= srcPitch*srcHeight || (srcY+1)*srcPitch+srcX >= srcPitch*srcHeight || srcY*srcPitch+(srcX+1) >= srcPitch*srcHeight || (srcY+1)*srcPitch+(srcX+1) >= srcPitch*srcHeight) { return; } dstOffset =(y*dstWidth+x) + 2 * dstWidth * dstHeight; if(dstOffset >= dstWidth * dstHeight * 3) { return; } dst[dstOffset] = 0; dst[dstOffset]+=(1-u)*(1-v)*src[srcY*srcPitch+srcX*3]; dst[dstOffset]+=(1-u)*v*src[(srcY+1)*srcPitch+srcX*3]; dst[dstOffset]+=u*(1-v)*src[srcY*srcPitch+(srcX+1)*3]; dst[dstOffset]+= u*v*src[(srcY+1)*srcPitch+(srcX+1)*3]; dst[dstOffset] = dst[dstOffset] / 255.0; //g chanel dstOffset =(y*dstWidth+x) + dstWidth * dstHeight; if(dstOffset >= dstWidth * dstHeight * 3) { return; } dst[dstOffset] = 0; dst[dstOffset]+=(1-u)*(1-v)*src[srcY*srcPitch+srcX*3+1]; dst[dstOffset]+=(1-u)*v*src[(srcY+1)*srcPitch+srcX*3+1]; dst[dstOffset]+=u*(1-v)*src[srcY*srcPitch+(srcX+1)*3+1]; dst[dstOffset]+= u*v*src[(srcY+1)*srcPitch+(srcX+1)*3+1]; dst[dstOffset] = dst[dstOffset] / 255.0; //b chanel dstOffset =(y*dstWidth+x) ; if(dstOffset >= dstWidth * dstHeight * 3) { return; } dst[dstOffset] = 0; dst[dstOffset]+=(1-u)*(1-v)*src[srcY*srcPitch+srcX*3+2]; dst[dstOffset]+=(1-u)*v*src[(srcY+1)*srcPitch+srcX*3+2]; dst[dstOffset]+=u*(1-v)*src[srcY*srcPitch+(srcX+1)*3+2]; dst[dstOffset]+= u*v*src[(srcY+1)*srcPitch+(srcX+1)*3+2]; dst[dstOffset] = dst[dstOffset] / 255.0; } else { if(y*dstWidth+x >= dstWidth*dstHeight) { return; } //r chanel int dstOffset =(y*dstWidth+x) + 2 * dstWidth * dstHeight; if(dstOffset >= dstWidth * dstHeight * 3) { return; } dst[dstOffset] = 128; dst[dstOffset] = dst[dstOffset] / 255.0; //g chanel dstOffset =(y*dstWidth+x) + dstWidth * dstHeight; if(dstOffset >= dstWidth * dstHeight * 3) { return; } dst[dstOffset] = 128; dst[dstOffset] = dst[dstOffset] / 255.0; //b chanel dstOffset =(y*dstWidth+x); if(dstOffset >= dstWidth * dstHeight * 3) { return; } dst[dstOffset] = 128; dst[dstOffset] = dst[dstOffset] / 255.0; } } void yolov5FacePreprocess(const unsigned char*src,int srcWidth,int srcHeight,int srcPitch, float* dst,int dstWidth, int dstHeight, int write_x, int write_y, float resize_w, float resize_h, cudaStream_t stream) { int uint = 16; dim3 grid((dstWidth+uint-1)/uint,(dstHeight+uint-1)/uint); dim3 block(uint,uint); yolov5FacePreprocessKernel<<<grid,block,0,stream>>>(src, srcWidth, srcHeight,srcPitch,dst, dstWidth, dstHeight, write_x, write_y, resize_w, resize_h); }
360b3871a0f5c7ef46d7e9f7ef768e0a9e18c50a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define DAWN_GENERATED 1 #undef DAWN_BACKEND_T #define DAWN_BACKEND_T CUDA #ifndef BOOST_RESULT_OF_USE_TR1 #define BOOST_RESULT_OF_USE_TR1 1 #endif #ifndef BOOST_NO_CXX11_DECLTYPE #define BOOST_NO_CXX11_DECLTYPE 1 #endif #ifndef GRIDTOOLS_DAWN_HALO_EXTENT #define GRIDTOOLS_DAWN_HALO_EXTENT 3 #endif #ifndef BOOST_PP_VARIADICS #define BOOST_PP_VARIADICS 1 #endif #ifndef BOOST_FUSION_DONT_USE_PREPROCESSED_FILES #define BOOST_FUSION_DONT_USE_PREPROCESSED_FILES 1 #endif #ifndef BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS #define BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS 1 #endif #ifndef GT_VECTOR_LIMIT_SIZE #define GT_VECTOR_LIMIT_SIZE 30 #endif #ifndef BOOST_FUSION_INVOKE_MAX_ARITY #define BOOST_FUSION_INVOKE_MAX_ARITY GT_VECTOR_LIMIT_SIZE #endif #ifndef FUSION_MAX_VECTOR_SIZE #define FUSION_MAX_VECTOR_SIZE GT_VECTOR_LIMIT_SIZE #endif #ifndef FUSION_MAX_MAP_SIZE #define FUSION_MAX_MAP_SIZE GT_VECTOR_LIMIT_SIZE #endif #ifndef BOOST_MPL_LIMIT_VECTOR_SIZE #define BOOST_MPL_LIMIT_VECTOR_SIZE GT_VECTOR_LIMIT_SIZE #endif #include <driver-includes/gridtools_includes.hpp> using namespace gridtools::dawn; namespace dawn_generated{ namespace cuda{ __global__ void __launch_bounds__(128) generated_stencil47_ms46_kernel(const int isize, const int jsize, const int ksize, const int stride_111_1, const int stride_111_2, ::dawn::float_type * const in, ::dawn::float_type * const out) { // Start kernel const unsigned int nx = isize; const unsigned int ny = jsize; const int block_size_i = (blockIdx.x + 1) * 32 < nx ? 32 : nx - blockIdx.x * 32; const int block_size_j = (blockIdx.y + 1) * 4 < ny ? 4 : ny - blockIdx.y * 4; // computing the global position in the physical domain // In a typical cuda block we have the following regions // aa bbbbbbbb cc // aa bbbbbbbb cc // hh dddddddd ii // hh dddddddd ii // hh dddddddd ii // hh dddddddd ii // ee ffffffff gg // ee ffffffff gg // Regions b,d,f have warp (or multiple of warp size) // Size of regions a, c, h, i, e, g are determined by max_extent_t // Regions b,d,f are easily executed by dedicated warps (one warp for each line) // Regions (a,h,e) and (c,i,g) are executed by two specialized warp int iblock = 0 - 1; int jblock = 0 - 1; if(threadIdx.y < +4) { iblock = threadIdx.x; jblock = (int)threadIdx.y + 0; } // initialized iterators int idx111 = (blockIdx.x*32+iblock)*1+(blockIdx.y*4+jblock)*stride_111_1; // jump iterators to match the intersection of beginning of next interval and the parallel execution block idx111 += max(0, blockIdx.z * 4) * stride_111_2; int kleg_lower_bound = max(0,blockIdx.z*4); int kleg_upper_bound = min( ksize - 1 + 0,(blockIdx.z+1)*4-1);; for(int k = kleg_lower_bound+0; k <= kleg_upper_bound+0; ++k) { if(iblock >= 0 && iblock <= block_size_i -1 + 0 && jblock >= 0 && jblock <= block_size_j -1 + 0) { ::dawn::float_type dx; { out[idx111] = (((int) -4 * (__ldg(&(in[idx111])) + (__ldg(&(in[idx111+1*1])) + (__ldg(&(in[idx111+1*-1])) + (__ldg(&(in[idx111+stride_111_1*-1])) + __ldg(&(in[idx111+stride_111_1*1]))))))) / (dx * dx)); } } // Slide kcaches // increment iterators idx111+=stride_111_2; }} class generated { public: struct sbase : public timer_cuda { sbase(std::string name) : timer_cuda(name){} double get_time() { return total_time(); } }; struct stencil_47 : public sbase { // Members // Temporary storage typedefs using tmp_halo_t = gridtools::halo< 0,0, 0, 0, 0>; using tmp_meta_data_t = storage_traits_t::storage_info_t< 0, 5, tmp_halo_t >; using tmp_storage_t = storage_traits_t::data_store_t< ::dawn::float_type, tmp_meta_data_t>; const gridtools::dawn::domain m_dom; public: stencil_47(const gridtools::dawn::domain& dom_, int rank, int xcols, int ycols) : sbase("stencil_47"), m_dom(dom_){} static constexpr dawn::driver::cartesian_extent in_extent = {-1,1, -1,1, 0,0}; static constexpr dawn::driver::cartesian_extent out_extent = {0,0, 0,0, 0,0}; void run(storage_ijk_t in_ds, storage_ijk_t out_ds) { // starting timers start(); {; gridtools::data_view<storage_ijk_t> in= gridtools::make_device_view(in_ds); gridtools::data_view<storage_ijk_t> out= gridtools::make_device_view(out_ds); const unsigned int nx = m_dom.isize() - m_dom.iminus() - m_dom.iplus(); const unsigned int ny = m_dom.jsize() - m_dom.jminus() - m_dom.jplus(); const unsigned int nz = m_dom.ksize() - m_dom.kminus() - m_dom.kplus(); dim3 threads(32,4+0,1); const unsigned int nbx = (nx + 32 - 1) / 32; const unsigned int nby = (ny + 4 - 1) / 4; const unsigned int nbz = (m_dom.ksize()+4-1) / 4; dim3 blocks(nbx, nby, nbz); hipLaunchKernelGGL(( generated_stencil47_ms46_kernel), dim3(blocks), dim3(threads), 0, 0, nx,ny,nz,in_ds.strides()[1],in_ds.strides()[2],(in.data()+in_ds.get_storage_info_ptr()->index(in.begin<0>(), in.begin<1>(),0 )),(out.data()+out_ds.get_storage_info_ptr()->index(out.begin<0>(), out.begin<1>(),0 ))); }; // stopping timers pause(); } }; static constexpr const char* s_name = "generated"; stencil_47 m_stencil_47; public: generated(const generated&) = delete; // Members // Stencil-Data generated(const gridtools::dawn::domain& dom, int rank = 1, int xcols = 1, int ycols = 1) : m_stencil_47(dom, rank, xcols, ycols){} template<typename S> void sync_storages(S field) { field.sync(); } template<typename S0, typename ... S> void sync_storages(S0 f0, S... fields) { f0.sync(); sync_storages(fields...); } void run(storage_ijk_t in, storage_ijk_t out) { sync_storages(in,out); m_stencil_47.run(in,out); ; sync_storages(in,out); } std::string get_name() const { return std::string(s_name); } void reset_meters() { m_stencil_47.reset(); } double get_total_time() { double res = 0; res +=m_stencil_47.get_time(); return res; } }; } // namespace cuda } // namespace dawn_generated
360b3871a0f5c7ef46d7e9f7ef768e0a9e18c50a.cu
#define DAWN_GENERATED 1 #undef DAWN_BACKEND_T #define DAWN_BACKEND_T CUDA #ifndef BOOST_RESULT_OF_USE_TR1 #define BOOST_RESULT_OF_USE_TR1 1 #endif #ifndef BOOST_NO_CXX11_DECLTYPE #define BOOST_NO_CXX11_DECLTYPE 1 #endif #ifndef GRIDTOOLS_DAWN_HALO_EXTENT #define GRIDTOOLS_DAWN_HALO_EXTENT 3 #endif #ifndef BOOST_PP_VARIADICS #define BOOST_PP_VARIADICS 1 #endif #ifndef BOOST_FUSION_DONT_USE_PREPROCESSED_FILES #define BOOST_FUSION_DONT_USE_PREPROCESSED_FILES 1 #endif #ifndef BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS #define BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS 1 #endif #ifndef GT_VECTOR_LIMIT_SIZE #define GT_VECTOR_LIMIT_SIZE 30 #endif #ifndef BOOST_FUSION_INVOKE_MAX_ARITY #define BOOST_FUSION_INVOKE_MAX_ARITY GT_VECTOR_LIMIT_SIZE #endif #ifndef FUSION_MAX_VECTOR_SIZE #define FUSION_MAX_VECTOR_SIZE GT_VECTOR_LIMIT_SIZE #endif #ifndef FUSION_MAX_MAP_SIZE #define FUSION_MAX_MAP_SIZE GT_VECTOR_LIMIT_SIZE #endif #ifndef BOOST_MPL_LIMIT_VECTOR_SIZE #define BOOST_MPL_LIMIT_VECTOR_SIZE GT_VECTOR_LIMIT_SIZE #endif #include <driver-includes/gridtools_includes.hpp> using namespace gridtools::dawn; namespace dawn_generated{ namespace cuda{ __global__ void __launch_bounds__(128) generated_stencil47_ms46_kernel(const int isize, const int jsize, const int ksize, const int stride_111_1, const int stride_111_2, ::dawn::float_type * const in, ::dawn::float_type * const out) { // Start kernel const unsigned int nx = isize; const unsigned int ny = jsize; const int block_size_i = (blockIdx.x + 1) * 32 < nx ? 32 : nx - blockIdx.x * 32; const int block_size_j = (blockIdx.y + 1) * 4 < ny ? 4 : ny - blockIdx.y * 4; // computing the global position in the physical domain // In a typical cuda block we have the following regions // aa bbbbbbbb cc // aa bbbbbbbb cc // hh dddddddd ii // hh dddddddd ii // hh dddddddd ii // hh dddddddd ii // ee ffffffff gg // ee ffffffff gg // Regions b,d,f have warp (or multiple of warp size) // Size of regions a, c, h, i, e, g are determined by max_extent_t // Regions b,d,f are easily executed by dedicated warps (one warp for each line) // Regions (a,h,e) and (c,i,g) are executed by two specialized warp int iblock = 0 - 1; int jblock = 0 - 1; if(threadIdx.y < +4) { iblock = threadIdx.x; jblock = (int)threadIdx.y + 0; } // initialized iterators int idx111 = (blockIdx.x*32+iblock)*1+(blockIdx.y*4+jblock)*stride_111_1; // jump iterators to match the intersection of beginning of next interval and the parallel execution block idx111 += max(0, blockIdx.z * 4) * stride_111_2; int kleg_lower_bound = max(0,blockIdx.z*4); int kleg_upper_bound = min( ksize - 1 + 0,(blockIdx.z+1)*4-1);; for(int k = kleg_lower_bound+0; k <= kleg_upper_bound+0; ++k) { if(iblock >= 0 && iblock <= block_size_i -1 + 0 && jblock >= 0 && jblock <= block_size_j -1 + 0) { ::dawn::float_type dx; { out[idx111] = (((int) -4 * (__ldg(&(in[idx111])) + (__ldg(&(in[idx111+1*1])) + (__ldg(&(in[idx111+1*-1])) + (__ldg(&(in[idx111+stride_111_1*-1])) + __ldg(&(in[idx111+stride_111_1*1]))))))) / (dx * dx)); } } // Slide kcaches // increment iterators idx111+=stride_111_2; }} class generated { public: struct sbase : public timer_cuda { sbase(std::string name) : timer_cuda(name){} double get_time() { return total_time(); } }; struct stencil_47 : public sbase { // Members // Temporary storage typedefs using tmp_halo_t = gridtools::halo< 0,0, 0, 0, 0>; using tmp_meta_data_t = storage_traits_t::storage_info_t< 0, 5, tmp_halo_t >; using tmp_storage_t = storage_traits_t::data_store_t< ::dawn::float_type, tmp_meta_data_t>; const gridtools::dawn::domain m_dom; public: stencil_47(const gridtools::dawn::domain& dom_, int rank, int xcols, int ycols) : sbase("stencil_47"), m_dom(dom_){} static constexpr dawn::driver::cartesian_extent in_extent = {-1,1, -1,1, 0,0}; static constexpr dawn::driver::cartesian_extent out_extent = {0,0, 0,0, 0,0}; void run(storage_ijk_t in_ds, storage_ijk_t out_ds) { // starting timers start(); {; gridtools::data_view<storage_ijk_t> in= gridtools::make_device_view(in_ds); gridtools::data_view<storage_ijk_t> out= gridtools::make_device_view(out_ds); const unsigned int nx = m_dom.isize() - m_dom.iminus() - m_dom.iplus(); const unsigned int ny = m_dom.jsize() - m_dom.jminus() - m_dom.jplus(); const unsigned int nz = m_dom.ksize() - m_dom.kminus() - m_dom.kplus(); dim3 threads(32,4+0,1); const unsigned int nbx = (nx + 32 - 1) / 32; const unsigned int nby = (ny + 4 - 1) / 4; const unsigned int nbz = (m_dom.ksize()+4-1) / 4; dim3 blocks(nbx, nby, nbz); generated_stencil47_ms46_kernel<<<blocks, threads>>>(nx,ny,nz,in_ds.strides()[1],in_ds.strides()[2],(in.data()+in_ds.get_storage_info_ptr()->index(in.begin<0>(), in.begin<1>(),0 )),(out.data()+out_ds.get_storage_info_ptr()->index(out.begin<0>(), out.begin<1>(),0 ))); }; // stopping timers pause(); } }; static constexpr const char* s_name = "generated"; stencil_47 m_stencil_47; public: generated(const generated&) = delete; // Members // Stencil-Data generated(const gridtools::dawn::domain& dom, int rank = 1, int xcols = 1, int ycols = 1) : m_stencil_47(dom, rank, xcols, ycols){} template<typename S> void sync_storages(S field) { field.sync(); } template<typename S0, typename ... S> void sync_storages(S0 f0, S... fields) { f0.sync(); sync_storages(fields...); } void run(storage_ijk_t in, storage_ijk_t out) { sync_storages(in,out); m_stencil_47.run(in,out); ; sync_storages(in,out); } std::string get_name() const { return std::string(s_name); } void reset_meters() { m_stencil_47.reset(); } double get_total_time() { double res = 0; res +=m_stencil_47.get_time(); return res; } }; } // namespace cuda } // namespace dawn_generated
04a651341ece82d6cda53eb5e5f4617c1a8472ce.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "childKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( childKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( childKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( childKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
04a651341ece82d6cda53eb5e5f4617c1a8472ce.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "childKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); childKernel<<<gridBlock,threadBlock>>>(); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { childKernel<<<gridBlock,threadBlock>>>(); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { childKernel<<<gridBlock,threadBlock>>>(); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
cacaaaf633989ab395904fb2c58377decd2da11f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "celllist.h" #include <mirheo/core/logger.h> #include <mirheo/core/pvs/object_vector.h> #include <mirheo/core/pvs/particle_vector.h> #include <mirheo/core/pvs/views/pv.h> #include <mirheo/core/utils/cuda_common.h> #include <mirheo/core/utils/kernel_launch.h> #include <mirheo/core/utils/type_traits.h> #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wshadow" #include <extern/cub/hipcub/hipcub.hpp> #pragma GCC diagnostic pop #include <algorithm> namespace mirheo { namespace CellListKernels { enum {INVALID = -1}; inline __device__ bool outgoingParticle(real4 pos) { return Real3_int(pos).isMarked(); } __global__ void computeCellSizes(PVview view, CellListInfo cinfo) { const int pid = blockIdx.x * blockDim.x + threadIdx.x; if (pid >= view.size) return; real4 coo = view.readPositionNoCache(pid); // XXX: relying here only on redistribution if ( outgoingParticle(coo) ) return; int cid = cinfo.getCellId<CellListsProjection::Clamp>(coo); atomicAdd(cinfo.cellSizes + cid, 1); } __global__ void reorderPositionsAndCreateMap(PVview view, CellListInfo cinfo, real4 *outPositions) { const int pid = blockIdx.x * blockDim.x + threadIdx.x; if (pid >= view.size) return; int dstId = INVALID; // this is to allow more cache for atomics // loads / stores here need no cache real4 pos = view.readPositionNoCache(pid); int cid = cinfo.getCellId<CellListsProjection::Clamp>(pos); // XXX: relying here only on redistribution if ( !outgoingParticle(pos) ) dstId = cinfo.cellStarts[cid] + atomicAdd(cinfo.cellSizes + cid, 1); if (dstId != INVALID) writeNoCache(outPositions + dstId, pos); cinfo.order[pid] = dstId; } template <typename T> __global__ void reorderExtraDataPerParticle(int n, const T *inExtraData, CellListInfo cinfo, T *outExtraData) { int srcId = blockIdx.x * blockDim.x + threadIdx.x; if (srcId >= n) return; int dstId = cinfo.order[srcId]; if (dstId != INVALID) outExtraData[dstId] = inExtraData[srcId]; } template <typename T> __global__ void accumulateKernel(int n, T *dst, CellListInfo cinfo, const T *src) { int pid = blockIdx.x * blockDim.x + threadIdx.x; if (pid >= n) return; int srcId = cinfo.order[pid]; assert(srcId != INVALID); dst[pid] += src[srcId]; } } // namespace CellListKernels //================================================================================= // Info //================================================================================= CellListInfo::CellListInfo(real rc_, real3 localDomainSize_) : rc(rc_), localDomainSize(localDomainSize_) { ncells = make_int3( math::floor(localDomainSize / rc + 1e-6_r) ); h = make_real3(localDomainSize) / make_real3(ncells); invh = 1.0_r / h; this->rc = ::min( {h.x, h.y, h.z} ); totcells = ncells.x * ncells.y * ncells.z; } CellListInfo::CellListInfo(real3 h_, real3 localDomainSize_) : h(h_), invh(1.0_r/h_), localDomainSize(localDomainSize_) { rc = ::min( {h.x, h.y, h.z} ); ncells = make_int3( math::ceil(localDomainSize / h - 1e-6_r) ); totcells = ncells.x * ncells.y * ncells.z; } //================================================================================= // Basic cell-lists //================================================================================= CellList::CellList(ParticleVector *pv, real rc_, real3 localDomainSize_) : CellListInfo(rc_, localDomainSize_), pv_(pv), particlesDataContainer(std::make_unique<LocalParticleVector>(nullptr)) { _initialize(); } CellList::CellList(ParticleVector *pv, int3 resolution, real3 localDomainSize_) : CellListInfo(localDomainSize_ / make_real3(resolution), localDomainSize_), pv_(pv), particlesDataContainer(std::make_unique<LocalParticleVector>(nullptr)) { _initialize(); } void CellList::_initialize() { localPV = particlesDataContainer.get(); cellSizes. resize_anew(totcells + 1); cellStarts.resize_anew(totcells + 1); cellSizes. clear(defaultStream); cellStarts.clear(defaultStream); CUDA_Check( hipStreamSynchronize(defaultStream) ); debug("Initialized %s cell-list with %dx%dx%d cells and cut-off %f", pv_->getCName(), ncells.x, ncells.y, ncells.z, rc); } CellList::~CellList() = default; bool CellList::_checkNeedBuild() const { if (changedStamp == pv_->cellListStamp) { debug2("%s is already up-to-date, building skipped", makeName().c_str()); return false; } if (pv_->local()->size() == 0) { debug2("%s consists of no particles, building skipped", makeName().c_str()); return false; } return true; } void CellList::_updateExtraDataChannels(__UNUSED hipStream_t stream) { auto& pvManager = pv_->local()->dataPerParticle; auto& containerManager = particlesDataContainer->dataPerParticle; int np = pv_->local()->size(); for (const auto& namedChannel : pvManager.getSortedChannels()) { const auto& name = namedChannel.first; const auto& desc = namedChannel.second; if (desc->persistence != DataManager::PersistenceMode::Active) continue; mpark::visit([&](auto pinnedBuffPtr) { using T = typename std::remove_pointer<decltype(pinnedBuffPtr)>::type::value_type; if (!containerManager.checkChannelExists(name)) containerManager.createData<T>(name, np); }, desc->varDataPtr); } } void CellList::_computeCellSizes(hipStream_t stream) { debug2("%s : Computing cell sizes for %d particles", makeName().c_str(), pv_->local()->size()); cellSizes.clear(stream); PVview view(pv_, pv_->local()); const int nthreads = 128; SAFE_KERNEL_LAUNCH( CellListKernels::computeCellSizes, getNblocks(view.size, nthreads), nthreads, 0, stream, view, cellInfo() ); } void CellList::_computeCellStarts(hipStream_t stream) { // Scan is always working with the same number of cells // Memory requirements can't change size_t bufSize = scanBuffer.size(); if (bufSize == 0) { hipcub::DeviceScan::ExclusiveSum(nullptr, bufSize, cellSizes.devPtr(), cellStarts.devPtr(), totcells+1, stream); scanBuffer.resize_anew(bufSize); } hipcub::DeviceScan::ExclusiveSum(scanBuffer.devPtr(), bufSize, cellSizes.devPtr(), cellStarts.devPtr(), totcells+1, stream); } void CellList::_reorderPositionsAndCreateMap(hipStream_t stream) { debug2("Reordering %d %s particles", pv_->local()->size(), pv_->getCName()); PVview view(pv_, pv_->local()); order.resize_anew(view.size); particlesDataContainer->resize_anew(view.size); cellSizes.clear(stream); const int nthreads = 128; SAFE_KERNEL_LAUNCH( CellListKernels::reorderPositionsAndCreateMap, getNblocks(view.size, nthreads), nthreads, 0, stream, view, cellInfo(), particlesDataContainer->positions().devPtr() ); } void CellList::_reorderExtraDataEntry(const std::string& channelName, const DataManager::ChannelDescription *channelDesc, hipStream_t stream) { const auto& dstDesc = particlesDataContainer->dataPerParticle.getChannelDescOrDie(channelName); int np = pv_->local()->size(); debug2("%s: reordering extra data '%s'", makeName().c_str(), channelName.c_str()); mpark::visit([&](auto srcPinnedBuff) { auto dstPinnedBuff = mpark::get<decltype(srcPinnedBuff)>(dstDesc.varDataPtr); constexpr int nthreads = 128; SAFE_KERNEL_LAUNCH( CellListKernels::reorderExtraDataPerParticle, getNblocks(np, nthreads), nthreads, 0, stream, np, srcPinnedBuff->devPtr(), this->cellInfo(), dstPinnedBuff->devPtr() ); }, channelDesc->varDataPtr); } void CellList::_reorderPersistentData(hipStream_t stream) { auto srcExtraData = &pv_->local()->dataPerParticle; for (const auto& namedChannel : srcExtraData->getSortedChannels()) { const auto& name = namedChannel.first; const auto& desc = namedChannel.second; if (desc->persistence != DataManager::PersistenceMode::Active || name == ChannelNames::positions) // positions were already reordered manually continue; _reorderExtraDataEntry(name, desc, stream); } } void CellList::_build(hipStream_t stream) { _computeCellSizes(stream); _computeCellStarts(stream); _reorderPositionsAndCreateMap(stream); _reorderPersistentData(stream); changedStamp = pv_->cellListStamp; } CellListInfo CellList::cellInfo() { CellListInfo::cellSizes = cellSizes.devPtr(); CellListInfo::cellStarts = cellStarts.devPtr(); CellListInfo::order = order.devPtr(); return *((CellListInfo*)this); } void CellList::build(hipStream_t stream) { _updateExtraDataChannels(stream); if (!_checkNeedBuild()) return; debug("building %s", makeName().c_str()); _build(stream); } static void accumulateIfHasAddOperator(__UNUSED GPUcontainer *src, __UNUSED GPUcontainer *dst, __UNUSED int n, __UNUSED CellListInfo cinfo, __UNUSED hipStream_t stream) { die("Cannot accumulate entries: operator+ not supported for this type"); } // use SFINAE to choose between additionable types template <typename T, typename = void_t<decltype(std::declval<T>() + std::declval<T>())>> static void accumulateIfHasAddOperator(PinnedBuffer<T> *src, PinnedBuffer<T> *dst, int n, CellListInfo cinfo, hipStream_t stream) { const int nthreads = 128; SAFE_KERNEL_LAUNCH( CellListKernels::accumulateKernel, getNblocks(n, nthreads), nthreads, 0, stream, n, dst->devPtr(), cinfo, src->devPtr() ); } void CellList::_accumulateExtraData(const std::string& channelName, hipStream_t stream) { int n = pv_->local()->size(); const auto& pvManager = pv_->local()->dataPerParticle; const auto& contManager = localPV->dataPerParticle; const auto& pvDesc = pvManager .getChannelDescOrDie(channelName); const auto& contDesc = contManager.getChannelDescOrDie(channelName); mpark::visit([&](auto srcPinnedBuff) { auto dstPinnedBuff = mpark::get<decltype(srcPinnedBuff)>(pvDesc.varDataPtr); accumulateIfHasAddOperator(srcPinnedBuff, dstPinnedBuff, n, this->cellInfo(), stream); }, contDesc.varDataPtr); } void CellList::accumulateChannels(const std::vector<std::string>& channelNames, hipStream_t stream) { for (const auto& channelName : channelNames) { debug2("%s : accumulating channel '%s'", makeName().c_str(), channelName.c_str()); _accumulateExtraData(channelName, stream); } } void CellList::gatherChannels(const std::vector<std::string>& channelNames, hipStream_t stream) { for (auto& channelName : channelNames) { debug("%s : gathering channel '%s'", makeName().c_str(), channelName.c_str()); auto& desc = localPV->dataPerParticle.getChannelDescOrDie(channelName); _reorderExtraDataEntry(channelName, &desc, stream); // invalidate particle vector halo if any entry is active pv_->haloValid = false; } } void CellList::clearChannels(const std::vector<std::string>& channelNames, hipStream_t stream) { for (const auto& channelName : channelNames) { debug2("%s : clearing channel '%s'", makeName().c_str(), channelName.c_str()); localPV->dataPerParticle.getGenericData(channelName)->clearDevice(stream); } } LocalParticleVector* CellList::getLocalParticleVector() {return localPV;} std::string CellList::makeName() const { return "Cell List '" + pv_->getName() + "' (rc " + std::to_string(rc) + ")"; } //================================================================================= // Primary cell-lists //================================================================================= PrimaryCellList::PrimaryCellList(ParticleVector *pv, real rc_, real3 localDomainSize_) : CellList(pv, rc_, localDomainSize_) { localPV = pv_->local(); if (dynamic_cast<ObjectVector*>(pv_) != nullptr) error("Using primary cell-lists with objects is STRONGLY discouraged. This will very likely result in an error"); } PrimaryCellList::PrimaryCellList(ParticleVector *pv, int3 resolution, real3 localDomainSize_) : CellList(pv, resolution, localDomainSize_) { localPV = pv_->local(); if (dynamic_cast<ObjectVector*>(pv_) != nullptr) error("Using primary cell-lists with objects is STRONGLY discouraged. This will very likely result in an error"); } PrimaryCellList::~PrimaryCellList() = default; void PrimaryCellList::build(hipStream_t stream) { // Reqired here to avoid ptr swap if building didn't actually happen if (!_checkNeedBuild()) return; CellList::build(stream); if (pv_->local()->size() == 0) { debug2("%s consists of no particles, cell-list building skipped", pv_->getCName()); return; } // Now we need the new size of particles array. int newSize; CUDA_Check( hipMemcpyAsync(&newSize, cellStarts.devPtr() + totcells, sizeof(int), hipMemcpyDeviceToHost, stream) ); CUDA_Check( hipStreamSynchronize(stream) ); debug2("%s : reordering completed, new size of %s particle vector is %d", makeName().c_str(), pv_->getCName(), newSize); particlesDataContainer->resize(newSize, stream); _swapPersistentExtraData(); pv_->local()->resize(newSize, stream); } void PrimaryCellList::accumulateChannels(__UNUSED const std::vector<std::string>& channelNames, __UNUSED hipStream_t stream) {} void PrimaryCellList::gatherChannels(const std::vector<std::string>& channelNames, __UNUSED hipStream_t stream) { // do not need to reorder data, but still invalidate halo if (!channelNames.empty()) pv_->haloValid = false; } template <typename T> static void swap(const std::string& channelName, DataManager& pvManager, DataManager& containerManager) { std::swap(*pvManager .getData<T>(channelName), *containerManager.getData<T>(channelName)); } void PrimaryCellList::_swapPersistentExtraData() { auto& pvManager = pv_->local()->dataPerParticle; auto& containerManager = particlesDataContainer->dataPerParticle; for (const auto& namedChannel : pvManager.getSortedChannels()) { const auto& name = namedChannel.first; const auto& desc = namedChannel.second; if (desc->persistence != DataManager::PersistenceMode::Active) continue; const auto& descCont = containerManager.getChannelDescOrDie(name); mpark::visit([&](auto pinnedBufferPv) { auto pinnedBufferCont = mpark::get<decltype(pinnedBufferPv)>(descCont.varDataPtr); std::swap(*pinnedBufferPv, *pinnedBufferCont); }, desc->varDataPtr); } } std::string PrimaryCellList::makeName() const { return "Primary " + CellList::makeName(); } } // namespace mirheo
cacaaaf633989ab395904fb2c58377decd2da11f.cu
#include "celllist.h" #include <mirheo/core/logger.h> #include <mirheo/core/pvs/object_vector.h> #include <mirheo/core/pvs/particle_vector.h> #include <mirheo/core/pvs/views/pv.h> #include <mirheo/core/utils/cuda_common.h> #include <mirheo/core/utils/kernel_launch.h> #include <mirheo/core/utils/type_traits.h> #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wshadow" #include <extern/cub/cub/device/device_scan.cuh> #pragma GCC diagnostic pop #include <algorithm> namespace mirheo { namespace CellListKernels { enum {INVALID = -1}; inline __device__ bool outgoingParticle(real4 pos) { return Real3_int(pos).isMarked(); } __global__ void computeCellSizes(PVview view, CellListInfo cinfo) { const int pid = blockIdx.x * blockDim.x + threadIdx.x; if (pid >= view.size) return; real4 coo = view.readPositionNoCache(pid); // XXX: relying here only on redistribution if ( outgoingParticle(coo) ) return; int cid = cinfo.getCellId<CellListsProjection::Clamp>(coo); atomicAdd(cinfo.cellSizes + cid, 1); } __global__ void reorderPositionsAndCreateMap(PVview view, CellListInfo cinfo, real4 *outPositions) { const int pid = blockIdx.x * blockDim.x + threadIdx.x; if (pid >= view.size) return; int dstId = INVALID; // this is to allow more cache for atomics // loads / stores here need no cache real4 pos = view.readPositionNoCache(pid); int cid = cinfo.getCellId<CellListsProjection::Clamp>(pos); // XXX: relying here only on redistribution if ( !outgoingParticle(pos) ) dstId = cinfo.cellStarts[cid] + atomicAdd(cinfo.cellSizes + cid, 1); if (dstId != INVALID) writeNoCache(outPositions + dstId, pos); cinfo.order[pid] = dstId; } template <typename T> __global__ void reorderExtraDataPerParticle(int n, const T *inExtraData, CellListInfo cinfo, T *outExtraData) { int srcId = blockIdx.x * blockDim.x + threadIdx.x; if (srcId >= n) return; int dstId = cinfo.order[srcId]; if (dstId != INVALID) outExtraData[dstId] = inExtraData[srcId]; } template <typename T> __global__ void accumulateKernel(int n, T *dst, CellListInfo cinfo, const T *src) { int pid = blockIdx.x * blockDim.x + threadIdx.x; if (pid >= n) return; int srcId = cinfo.order[pid]; assert(srcId != INVALID); dst[pid] += src[srcId]; } } // namespace CellListKernels //================================================================================= // Info //================================================================================= CellListInfo::CellListInfo(real rc_, real3 localDomainSize_) : rc(rc_), localDomainSize(localDomainSize_) { ncells = make_int3( math::floor(localDomainSize / rc + 1e-6_r) ); h = make_real3(localDomainSize) / make_real3(ncells); invh = 1.0_r / h; this->rc = std::min( {h.x, h.y, h.z} ); totcells = ncells.x * ncells.y * ncells.z; } CellListInfo::CellListInfo(real3 h_, real3 localDomainSize_) : h(h_), invh(1.0_r/h_), localDomainSize(localDomainSize_) { rc = std::min( {h.x, h.y, h.z} ); ncells = make_int3( math::ceil(localDomainSize / h - 1e-6_r) ); totcells = ncells.x * ncells.y * ncells.z; } //================================================================================= // Basic cell-lists //================================================================================= CellList::CellList(ParticleVector *pv, real rc_, real3 localDomainSize_) : CellListInfo(rc_, localDomainSize_), pv_(pv), particlesDataContainer(std::make_unique<LocalParticleVector>(nullptr)) { _initialize(); } CellList::CellList(ParticleVector *pv, int3 resolution, real3 localDomainSize_) : CellListInfo(localDomainSize_ / make_real3(resolution), localDomainSize_), pv_(pv), particlesDataContainer(std::make_unique<LocalParticleVector>(nullptr)) { _initialize(); } void CellList::_initialize() { localPV = particlesDataContainer.get(); cellSizes. resize_anew(totcells + 1); cellStarts.resize_anew(totcells + 1); cellSizes. clear(defaultStream); cellStarts.clear(defaultStream); CUDA_Check( cudaStreamSynchronize(defaultStream) ); debug("Initialized %s cell-list with %dx%dx%d cells and cut-off %f", pv_->getCName(), ncells.x, ncells.y, ncells.z, rc); } CellList::~CellList() = default; bool CellList::_checkNeedBuild() const { if (changedStamp == pv_->cellListStamp) { debug2("%s is already up-to-date, building skipped", makeName().c_str()); return false; } if (pv_->local()->size() == 0) { debug2("%s consists of no particles, building skipped", makeName().c_str()); return false; } return true; } void CellList::_updateExtraDataChannels(__UNUSED cudaStream_t stream) { auto& pvManager = pv_->local()->dataPerParticle; auto& containerManager = particlesDataContainer->dataPerParticle; int np = pv_->local()->size(); for (const auto& namedChannel : pvManager.getSortedChannels()) { const auto& name = namedChannel.first; const auto& desc = namedChannel.second; if (desc->persistence != DataManager::PersistenceMode::Active) continue; mpark::visit([&](auto pinnedBuffPtr) { using T = typename std::remove_pointer<decltype(pinnedBuffPtr)>::type::value_type; if (!containerManager.checkChannelExists(name)) containerManager.createData<T>(name, np); }, desc->varDataPtr); } } void CellList::_computeCellSizes(cudaStream_t stream) { debug2("%s : Computing cell sizes for %d particles", makeName().c_str(), pv_->local()->size()); cellSizes.clear(stream); PVview view(pv_, pv_->local()); const int nthreads = 128; SAFE_KERNEL_LAUNCH( CellListKernels::computeCellSizes, getNblocks(view.size, nthreads), nthreads, 0, stream, view, cellInfo() ); } void CellList::_computeCellStarts(cudaStream_t stream) { // Scan is always working with the same number of cells // Memory requirements can't change size_t bufSize = scanBuffer.size(); if (bufSize == 0) { cub::DeviceScan::ExclusiveSum(nullptr, bufSize, cellSizes.devPtr(), cellStarts.devPtr(), totcells+1, stream); scanBuffer.resize_anew(bufSize); } cub::DeviceScan::ExclusiveSum(scanBuffer.devPtr(), bufSize, cellSizes.devPtr(), cellStarts.devPtr(), totcells+1, stream); } void CellList::_reorderPositionsAndCreateMap(cudaStream_t stream) { debug2("Reordering %d %s particles", pv_->local()->size(), pv_->getCName()); PVview view(pv_, pv_->local()); order.resize_anew(view.size); particlesDataContainer->resize_anew(view.size); cellSizes.clear(stream); const int nthreads = 128; SAFE_KERNEL_LAUNCH( CellListKernels::reorderPositionsAndCreateMap, getNblocks(view.size, nthreads), nthreads, 0, stream, view, cellInfo(), particlesDataContainer->positions().devPtr() ); } void CellList::_reorderExtraDataEntry(const std::string& channelName, const DataManager::ChannelDescription *channelDesc, cudaStream_t stream) { const auto& dstDesc = particlesDataContainer->dataPerParticle.getChannelDescOrDie(channelName); int np = pv_->local()->size(); debug2("%s: reordering extra data '%s'", makeName().c_str(), channelName.c_str()); mpark::visit([&](auto srcPinnedBuff) { auto dstPinnedBuff = mpark::get<decltype(srcPinnedBuff)>(dstDesc.varDataPtr); constexpr int nthreads = 128; SAFE_KERNEL_LAUNCH( CellListKernels::reorderExtraDataPerParticle, getNblocks(np, nthreads), nthreads, 0, stream, np, srcPinnedBuff->devPtr(), this->cellInfo(), dstPinnedBuff->devPtr() ); }, channelDesc->varDataPtr); } void CellList::_reorderPersistentData(cudaStream_t stream) { auto srcExtraData = &pv_->local()->dataPerParticle; for (const auto& namedChannel : srcExtraData->getSortedChannels()) { const auto& name = namedChannel.first; const auto& desc = namedChannel.second; if (desc->persistence != DataManager::PersistenceMode::Active || name == ChannelNames::positions) // positions were already reordered manually continue; _reorderExtraDataEntry(name, desc, stream); } } void CellList::_build(cudaStream_t stream) { _computeCellSizes(stream); _computeCellStarts(stream); _reorderPositionsAndCreateMap(stream); _reorderPersistentData(stream); changedStamp = pv_->cellListStamp; } CellListInfo CellList::cellInfo() { CellListInfo::cellSizes = cellSizes.devPtr(); CellListInfo::cellStarts = cellStarts.devPtr(); CellListInfo::order = order.devPtr(); return *((CellListInfo*)this); } void CellList::build(cudaStream_t stream) { _updateExtraDataChannels(stream); if (!_checkNeedBuild()) return; debug("building %s", makeName().c_str()); _build(stream); } static void accumulateIfHasAddOperator(__UNUSED GPUcontainer *src, __UNUSED GPUcontainer *dst, __UNUSED int n, __UNUSED CellListInfo cinfo, __UNUSED cudaStream_t stream) { die("Cannot accumulate entries: operator+ not supported for this type"); } // use SFINAE to choose between additionable types template <typename T, typename = void_t<decltype(std::declval<T>() + std::declval<T>())>> static void accumulateIfHasAddOperator(PinnedBuffer<T> *src, PinnedBuffer<T> *dst, int n, CellListInfo cinfo, cudaStream_t stream) { const int nthreads = 128; SAFE_KERNEL_LAUNCH( CellListKernels::accumulateKernel, getNblocks(n, nthreads), nthreads, 0, stream, n, dst->devPtr(), cinfo, src->devPtr() ); } void CellList::_accumulateExtraData(const std::string& channelName, cudaStream_t stream) { int n = pv_->local()->size(); const auto& pvManager = pv_->local()->dataPerParticle; const auto& contManager = localPV->dataPerParticle; const auto& pvDesc = pvManager .getChannelDescOrDie(channelName); const auto& contDesc = contManager.getChannelDescOrDie(channelName); mpark::visit([&](auto srcPinnedBuff) { auto dstPinnedBuff = mpark::get<decltype(srcPinnedBuff)>(pvDesc.varDataPtr); accumulateIfHasAddOperator(srcPinnedBuff, dstPinnedBuff, n, this->cellInfo(), stream); }, contDesc.varDataPtr); } void CellList::accumulateChannels(const std::vector<std::string>& channelNames, cudaStream_t stream) { for (const auto& channelName : channelNames) { debug2("%s : accumulating channel '%s'", makeName().c_str(), channelName.c_str()); _accumulateExtraData(channelName, stream); } } void CellList::gatherChannels(const std::vector<std::string>& channelNames, cudaStream_t stream) { for (auto& channelName : channelNames) { debug("%s : gathering channel '%s'", makeName().c_str(), channelName.c_str()); auto& desc = localPV->dataPerParticle.getChannelDescOrDie(channelName); _reorderExtraDataEntry(channelName, &desc, stream); // invalidate particle vector halo if any entry is active pv_->haloValid = false; } } void CellList::clearChannels(const std::vector<std::string>& channelNames, cudaStream_t stream) { for (const auto& channelName : channelNames) { debug2("%s : clearing channel '%s'", makeName().c_str(), channelName.c_str()); localPV->dataPerParticle.getGenericData(channelName)->clearDevice(stream); } } LocalParticleVector* CellList::getLocalParticleVector() {return localPV;} std::string CellList::makeName() const { return "Cell List '" + pv_->getName() + "' (rc " + std::to_string(rc) + ")"; } //================================================================================= // Primary cell-lists //================================================================================= PrimaryCellList::PrimaryCellList(ParticleVector *pv, real rc_, real3 localDomainSize_) : CellList(pv, rc_, localDomainSize_) { localPV = pv_->local(); if (dynamic_cast<ObjectVector*>(pv_) != nullptr) error("Using primary cell-lists with objects is STRONGLY discouraged. This will very likely result in an error"); } PrimaryCellList::PrimaryCellList(ParticleVector *pv, int3 resolution, real3 localDomainSize_) : CellList(pv, resolution, localDomainSize_) { localPV = pv_->local(); if (dynamic_cast<ObjectVector*>(pv_) != nullptr) error("Using primary cell-lists with objects is STRONGLY discouraged. This will very likely result in an error"); } PrimaryCellList::~PrimaryCellList() = default; void PrimaryCellList::build(cudaStream_t stream) { // Reqired here to avoid ptr swap if building didn't actually happen if (!_checkNeedBuild()) return; CellList::build(stream); if (pv_->local()->size() == 0) { debug2("%s consists of no particles, cell-list building skipped", pv_->getCName()); return; } // Now we need the new size of particles array. int newSize; CUDA_Check( cudaMemcpyAsync(&newSize, cellStarts.devPtr() + totcells, sizeof(int), cudaMemcpyDeviceToHost, stream) ); CUDA_Check( cudaStreamSynchronize(stream) ); debug2("%s : reordering completed, new size of %s particle vector is %d", makeName().c_str(), pv_->getCName(), newSize); particlesDataContainer->resize(newSize, stream); _swapPersistentExtraData(); pv_->local()->resize(newSize, stream); } void PrimaryCellList::accumulateChannels(__UNUSED const std::vector<std::string>& channelNames, __UNUSED cudaStream_t stream) {} void PrimaryCellList::gatherChannels(const std::vector<std::string>& channelNames, __UNUSED cudaStream_t stream) { // do not need to reorder data, but still invalidate halo if (!channelNames.empty()) pv_->haloValid = false; } template <typename T> static void swap(const std::string& channelName, DataManager& pvManager, DataManager& containerManager) { std::swap(*pvManager .getData<T>(channelName), *containerManager.getData<T>(channelName)); } void PrimaryCellList::_swapPersistentExtraData() { auto& pvManager = pv_->local()->dataPerParticle; auto& containerManager = particlesDataContainer->dataPerParticle; for (const auto& namedChannel : pvManager.getSortedChannels()) { const auto& name = namedChannel.first; const auto& desc = namedChannel.second; if (desc->persistence != DataManager::PersistenceMode::Active) continue; const auto& descCont = containerManager.getChannelDescOrDie(name); mpark::visit([&](auto pinnedBufferPv) { auto pinnedBufferCont = mpark::get<decltype(pinnedBufferPv)>(descCont.varDataPtr); std::swap(*pinnedBufferPv, *pinnedBufferCont); }, desc->varDataPtr); } } std::string PrimaryCellList::makeName() const { return "Primary " + CellList::makeName(); } } // namespace mirheo
fb6478d3a332d501fcd737be7e9e4883aa77333b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/Dispatch.h> #include <ATen/AccumulateType.h> #include <ATen/div_rtn.h> #include <ATen/hip/HIPBlas.h> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/native/ConvUtils.h> #include <ATen/native/hip/block_reduce.cuh> #include <ATen/native/Resize.h> #include <ATen/native/IndexingUtils.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/_conv_depthwise2d_native.h> #endif namespace at::native { namespace { using at::cuda::detail::CUDA_NUM_THREADS; using at::cuda::detail::GET_BLOCKS; template <typename scalar_t, int ndim, template <typename U> class PtrTraits = DefaultPtrTraits> PackedTensorAccessor32<scalar_t, ndim, PtrTraits> dummy_packed_accessor32() { std::array<int64_t, ndim> zeros{}; return {nullptr, zeros.data(), zeros.data()}; } template <int kSize, typename scalar_t, typename index_t> __global__ void conv_depthwise2d_forward_kernel( const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> input, PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> output, const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> weight, const PackedTensorAccessor32<scalar_t, 1, DefaultPtrTraits> bias, bool biasEnabled, index_t totalElements, const int outputChannels, const int depthwiseMultiplier, const int inputWidth, const int inputHeight, const int outputWidth, const int outputHeight, const int kernelWidth, const int kernelHeight, const int strideWidth, const int strideHeight, const int padWidth, const int padHeight, const int dilationWidth, const int dilationHeight) { using acc_t = at::acc_type<scalar_t, true>; const int KW_LIMIT = (kSize != 0) ? kSize : kernelWidth; const int KH_LIMIT = (kSize != 0) ? kSize : kernelHeight; CUDA_KERNEL_LOOP_TYPE(linearIndex, totalElements, index_t) { //calculate n,c,h,w indices, replacing modulos by divide and multiply add, //result is same as would be in the code below //const int n = linearIndex / batchStride; //batchStride = outputChannels * outputHeight * outputWidth //const int c = (linearIndex / channelStride) % outputChannels; //channelStride = outputHeight * outputWidth //const int h = (linearIndex / outputWidth) % outputHeight; //const int w = linearIndex % outputWidth; int indtmp1 = linearIndex/outputWidth; const int w = linearIndex - indtmp1 * outputWidth; int indtmp2 = indtmp1/outputHeight; const int h = indtmp1 - indtmp2 * outputHeight; indtmp1 = indtmp2; indtmp2 = indtmp1/outputChannels; const int c = indtmp1 - indtmp2 * outputChannels; const int n = indtmp2; int inputChannel = c; int inputChannels = outputChannels; if (depthwiseMultiplier !=1) { inputChannel /= depthwiseMultiplier; inputChannels /= depthwiseMultiplier; } int weightOffset = c * kernelHeight * kernelWidth; acc_t value = biasEnabled ? static_cast<acc_t>(bias.data()[c]) : acc_t(0); const index_t offset0 = (n * inputChannels + inputChannel) * inputHeight * inputWidth; #if !defined(USE_ROCM) #pragma unroll #endif for (int kH = 0; kH < KH_LIMIT; ++kH) { #if !defined(USE_ROCM) #pragma unroll #endif for (int kW = 0; kW < KW_LIMIT; ++kW) { const int h_in = -padHeight + h * strideHeight + kH * dilationHeight; const int w_in = -padWidth + w * strideWidth + kW * dilationWidth; if ((h_in >= 0) && (h_in < inputHeight) && (w_in >= 0) && (w_in < inputWidth)) { const index_t offset = offset0 + h_in * inputWidth + w_in; value += (static_cast<acc_t>(weight.data()[weightOffset]) * static_cast<acc_t>(input.data()[offset])); } ++weightOffset; } } output.data()[linearIndex] = static_cast<scalar_t>(value); } } template <int kSize, int stride, typename scalar_t, typename index_t> __global__ void conv_depthwise2d_backward_kernel( const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> grad_output, PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> grad_input, const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> weight, index_t totalElements, const int inputChannels, const int depthwiseMultiplier, const int outputChannels, const int inputWidth, const int inputHeight, const int outputWidth, const int outputHeight, const int kernelWidth, const int kernelHeight, const int strideWidth, const int strideHeight, const int padWidth, const int padHeight, const int dilationWidth, const int dilationHeight) { using acc_t = at::acc_type<scalar_t, true>; const int KW_LIMIT = (kSize != 0) ? kSize : kernelWidth; const int KH_LIMIT = (kSize != 0) ? kSize : kernelHeight; const int strideW = (stride != 0) ? stride : strideWidth; const int strideH = (stride != 0) ? stride : strideHeight; CUDA_KERNEL_LOOP_TYPE(linearIndex, totalElements, index_t) { int indtmp1 = linearIndex/inputWidth; const int w = linearIndex - indtmp1 * inputWidth; int indtmp2 = indtmp1/inputHeight; const int h = indtmp1 - indtmp2 * inputHeight; indtmp1 = indtmp2; indtmp2 = indtmp1/inputChannels; const int c = indtmp1 - indtmp2 * inputChannels; const int n = indtmp2; acc_t value(0); #if !defined(USE_ROCM) #pragma unroll #endif for (int multiplier = 0; multiplier < depthwiseMultiplier; ++multiplier) { int och = (c * depthwiseMultiplier) + multiplier; int weightOffset = och * kernelHeight * kernelWidth; #if !defined(USE_ROCM) #pragma unroll #endif for (int kh = 0; kh < KH_LIMIT; ++kh) { #if defined(USE_ROCM) #pragma unroll #endif for (int kw = 0; kw < KW_LIMIT; ++kw) { int h_out = h + padHeight - kh * dilationHeight; int w_out = w + padWidth - kw * dilationWidth; if ((h_out % strideH == 0) && (w_out % strideW == 0)) { h_out = h_out / strideH; w_out = w_out / strideW; if ((h_out >= 0) && (h_out < outputHeight) && (w_out >= 0) && (w_out < outputWidth)) { const int offset = ((n * outputChannels + och) * outputHeight + h_out) * outputWidth + w_out; value += (static_cast<acc_t>(weight.data()[weightOffset]) * static_cast<acc_t>(grad_output.data()[offset])); } } ++weightOffset; } } } grad_input.data()[linearIndex] = static_cast<scalar_t>(value); } } template <typename scalar_t, typename index_t=unsigned> __global__ void conv_depthwise2d_grad_weight_kernel( const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> grad_output, const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> input, PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> grad_weight, const int batchSize, const int inputChannels, const int kernelChannels, const int depthwiseMultiplier, const int inputWidth, const int inputHeight, const int outputWidth, const int outputHeight, const int kernelWidth, const int kernelHeight, const int strideWidth, const int strideHeight, const int padWidth, const int padHeight, const int dilationWidth, const int dilationHeight) { using acc_t = at::acc_type<scalar_t, true>; const int channelStride = kernelWidth * kernelHeight; // Each Block is responsible for accumulating over a permutation of // (channels x kH x kW), use blockIdx to determine which one int bidx = blockIdx.x; int kW = bidx % kernelWidth; int kH = (bidx / kernelWidth) % kernelHeight; int ch = (bidx / channelStride); // Need to calculate which input channel is associated with this filter // channel int inputCh = ch / depthwiseMultiplier; acc_t grad(0); const int laneId = threadIdx.x % C10_WARP_SIZE; const int batch = threadIdx.x / C10_WARP_SIZE; const int nwarps = blockDim.x / C10_WARP_SIZE; const int imageElements = outputWidth * outputHeight; // Use warp per item. In the original kernel, a threadblock was used to sum over NHW. // Here, we use a warp to sum values over HW dimension, and if batchSize is larger than the // number of warps, a warp would loop over remaining batch items (e.g. if there are 8 warps, // warp 0 would go over 0-8-16 etc image, warp 1 over 1-9-17 etc). Later in blockReduce, // all the warps will be reduced anyway, thus the full reduction will be over NHW, like it // should be. That allows to get rid of one modulo operation inside the loop (because n/batchIdx // now does not have to be computed through modulo, you are just looping over it), and // bring a nice speed-up. for (int batchIdx = batch; batchIdx < batchSize; batchIdx += nwarps){ // Warp-stride loop over elements in a batch item for (index_t idx = laneId; idx < imageElements; idx += C10_WARP_SIZE) { // Need to calculate the following: batch position, and offset into the grad_output // in height, and width. We can intuit the corresponding position in the input from // the other parameters we have int go_w_offset = idx % outputWidth; int go_h_offset = (idx / outputWidth); int i_w_offset = (go_w_offset * strideWidth) + (kW * dilationWidth) - padWidth; int i_h_offset = (go_h_offset * strideHeight) + (kH * dilationHeight) - padHeight; if (i_w_offset >= 0 && i_h_offset >= 0 && i_w_offset < inputWidth && i_h_offset < inputHeight) { int inputOffset = ((batchIdx * inputChannels + inputCh) * inputHeight + i_h_offset) * inputWidth + i_w_offset; int outputOffset = ((batchIdx * kernelChannels + ch) * outputHeight ) * outputWidth + idx; grad += (static_cast<acc_t>(input.data()[inputOffset]) * static_cast<acc_t>(grad_output.data()[outputOffset])); } } } // At this point each thread in the block has a local gradient, which we need to // accumulate prior to writing the global value extern __shared__ char smem[]; acc_t* buf = reinterpret_cast<acc_t*>(smem); acc_t tval = cuda_utils::BlockReduceSum(grad, buf); // After reduction, first thread in the block has the gradient, so its responsible // for writing it to grad_weight if (threadIdx.x == 0) { int weightOffset = kW + (kernelWidth * kH) + (kernelWidth * kernelHeight * ch); grad_weight.data()[weightOffset] = static_cast<scalar_t>(tval); } } void conv_depthwise2d_forward_out( const Tensor &input, const Tensor &output, const Tensor &weight, const Tensor &bias, const int kW, const int kH, const int dW, const int dH, const int padW, const int padH, const int dilationW, const int dilationH) { // Only handle 4D Input Tensors for now TORCH_CHECK(input.numel() > 0 && input.dim() == 4); TORCH_CHECK(weight.numel() > 0 && weight.dim() == 4); TORCH_CHECK(output.is_contiguous()); auto in_sizes = input.sizes(); auto w_sizes = weight.sizes(); // We assume that the input and weight Tensors are shaped properly by // the caller, so we verify that here to some extent // Weight Tensor is shape (output_channels, 1, kH, kW) TORCH_CHECK(w_sizes[1] == 1); // Input Tensor is shape (N, input_channels, H, W) // We verify that the # of output_channels is a multiple of input_channels TORCH_CHECK(w_sizes[0] % in_sizes[1] == 0); // Bias has same # of channels as output const bool has_bias = bias.defined(); TORCH_CHECK(!has_bias || (bias.dim() <= 1 && bias.numel() == w_sizes[0])); // Following the behavior of other THCUNN functions, we shape the output // Tensor ourselves int64_t height = in_sizes[2]; int64_t width = in_sizes[3]; int64_t outputChannels = w_sizes[0]; auto out_sizes = conv_output_size(in_sizes, weight.sizes(), {padH, padW}, {dH, dW}, {dilationH, dilationW}); const auto outputWidth = out_sizes[3]; const auto outputHeight = out_sizes[2]; resize_output(output, out_sizes); int64_t inputChannels = in_sizes[1]; int64_t depthwiseMultiplier = outputChannels / inputChannels; // One thread per output value TORCH_CHECK(canUse32BitIndexMath(input) && canUse32BitIndexMath(output)); int32_t n = output.numel(); int blocks = GET_BLOCKS(n); dim3 grid(blocks); dim3 block(CUDA_NUM_THREADS); const auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "conv_depthwise2d_forward_cuda", [&] { // Create PackedTensorAccessor // Kernel currently relies upon all the Tensors to be contiguous, but we made // them contiguous above const auto input_a = input.packed_accessor32<scalar_t, 4>(); const auto weight_a = weight.packed_accessor32<scalar_t, 4>(); const auto output_a = output.packed_accessor32<scalar_t, 4>(); const auto bias_a = has_bias ? bias.packed_accessor32<scalar_t, 1>() : dummy_packed_accessor32<scalar_t, 1>(); if (kW == 3 && kH == 3) { hipLaunchKernelGGL(( conv_depthwise2d_forward_kernel<3>) , dim3(grid), dim3(block), 0, stream, input_a, output_a, weight_a, bias_a, has_bias, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (kW == 1 && kH == 1) { hipLaunchKernelGGL(( conv_depthwise2d_forward_kernel<1>) , dim3(grid), dim3(block), 0, stream, input_a, output_a, weight_a, bias_a, has_bias, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( conv_depthwise2d_forward_kernel<0>) , dim3(grid), dim3(block), 0, stream, input_a, output_a, weight_a, bias_a, has_bias, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } }); } void conv_depthwise2d_backward_out( const Tensor &input, const Tensor &grad_output, const Tensor &grad_input, const Tensor &weight, const int kW, const int kH, const int dW, const int dH, const int padW, const int padH, const int dilationW, const int dilationH) { // Only handle 4D Input Tensors for now TORCH_CHECK(input.numel() > 0 && input.dim() == 4); TORCH_CHECK(weight.numel() > 0 && weight.dim() == 4); TORCH_CHECK(grad_output.numel() > 0 && grad_output.dim() == 4); // Minimal shape checking, as above // Same # of elements in batch TORCH_CHECK(input.sizes()[0] == grad_output.sizes()[0]); // Same # of filters as outputChannels TORCH_CHECK(weight.sizes()[0] == grad_output.sizes()[1]); // Resize Grainput_a auto in_sizes = input.sizes(); resize_output(grad_input, in_sizes); int inputChannels = in_sizes[1]; int height = in_sizes[2]; int width = in_sizes[3]; auto gO_sizes = grad_output.sizes(); int outputChannels = gO_sizes[1]; int outputHeight = gO_sizes[2]; int outputWidth = gO_sizes[3]; int depthwiseMultiplier = outputChannels / inputChannels; // Kernel currently relies upon all the Tensors to be contiguous TORCH_CHECK(grad_output.is_contiguous()); TORCH_CHECK(weight.is_contiguous()); TORCH_CHECK(grad_input.is_contiguous()); // One thread per grainput_a value TORCH_CHECK(canUse32BitIndexMath(grad_input) && canUse32BitIndexMath(grad_output)); int32_t n = grad_input.numel(); int blocks = GET_BLOCKS(n); dim3 grid(blocks); dim3 block(CUDA_NUM_THREADS); const auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_output.scalar_type(), "conv_depthwise2d_backward_cuda", [&] { auto grad_output_a = grad_output.packed_accessor32<scalar_t, 4>(); auto grad_input_a = grad_input.packed_accessor32<scalar_t, 4>(); auto weight_a = weight.packed_accessor32<scalar_t, 4>(); if (kW == 3 && kH == 3) { if (dW == 1 && dH == 1){ hipLaunchKernelGGL(( conv_depthwise2d_backward_kernel<3, 1>), dim3(grid), dim3(block), 0, stream, grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (dW == 2 && dH == 2) { hipLaunchKernelGGL(( conv_depthwise2d_backward_kernel<3, 2>), dim3(grid), dim3(block), 0, stream, grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( conv_depthwise2d_backward_kernel<3, 0>), dim3(grid), dim3(block), 0, stream, grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } } else if (kW == 1 && kH == 1) { if (dW == 1 && dH == 1){ hipLaunchKernelGGL(( conv_depthwise2d_backward_kernel<1, 1>), dim3(grid), dim3(block), 0, stream, grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (dW == 2 && dH == 2) { hipLaunchKernelGGL(( conv_depthwise2d_backward_kernel<1, 2>), dim3(grid), dim3(block), 0, stream, grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( conv_depthwise2d_backward_kernel<1, 0>), dim3(grid), dim3(block), 0, stream, grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } } else if (dW == 1 && dH == 1) { hipLaunchKernelGGL(( conv_depthwise2d_backward_kernel<0, 1>), dim3(grid), dim3(block), 0, stream, grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (dW == 2 && dH == 2) { hipLaunchKernelGGL(( conv_depthwise2d_backward_kernel<0, 2>), dim3(grid), dim3(block), 0, stream, grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( conv_depthwise2d_backward_kernel<0, 0>), dim3(grid), dim3(block), 0, stream, grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } }); } // Crude benchmarks suggest 256 is better than 512 and 1024 // TODO: Autotune/use better heuristics, improve speed more. int getGradParamsNumThreads(int batchSize) { //warp per item in a batch, up to a maximum constexpr int MAX_BLOCK_SIZE = 256; return ::min(batchSize * at::cuda::warp_size(), MAX_BLOCK_SIZE); } void conv_depthwise2d_grad_weight_out( const Tensor &input, const Tensor &grad_output, const Tensor &grad_weight, const int kW, const int kH, const int dW, const int dH, const int padW, const int padH, const int dilationW, const int dilationH) { // Only handle 4D Input Tensors for now TORCH_CHECK(input.numel() > 0 && input.dim() == 4); TORCH_CHECK(grad_output.numel() > 0 && grad_output.dim() == 4); // Minimal shape checking as above // Same # of elements in batch TORCH_CHECK(input.sizes()[0] == grad_output.sizes()[0]); auto in_sizes = input.sizes(); int batchSize = in_sizes[0]; int inputChannels = in_sizes[1]; int height = in_sizes[2]; int width = in_sizes[3]; auto gO_sizes = grad_output.sizes(); int outputChannels = gO_sizes[1]; int outputHeight = gO_sizes[2]; int outputWidth = gO_sizes[3]; int depthwiseMultiplier = outputChannels / inputChannels; resize_output(grad_weight, {outputChannels, 1, kH, kW}); // Kernel currently relies upon all the Tensors to be contiguous TORCH_CHECK(grad_output.is_contiguous()); TORCH_CHECK(input.is_contiguous()); TORCH_CHECK(grad_weight.is_contiguous()); // We parallelize so that each block computes a single value in grad_weight TORCH_CHECK(canUse32BitIndexMath(input) && canUse32BitIndexMath(grad_output)); int blocks = outputChannels * kH * kW; // Make sure we have enough threads to perform the reduction, and use this number // to create the shared memory size for the reduction dim3 grid(blocks); dim3 block(getGradParamsNumThreads(batchSize)); const auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_output.scalar_type(), "conv_depthwise2d_grad_weight_cuda", [&] { const auto grad_output_a = grad_output.packed_accessor32<scalar_t, 4>(); const auto input_a = input.packed_accessor32<scalar_t, 4>(); const auto grad_weight_a = grad_weight.packed_accessor32<scalar_t, 4>(); using acc_t = at::acc_type<scalar_t, true>; int warp_size = at::cuda::warp_size(); TORCH_INTERNAL_ASSERT(block.x % warp_size == 0); int smem = (block.x / warp_size) * sizeof(acc_t); hipLaunchKernelGGL(( conv_depthwise2d_grad_weight_kernel), dim3(grid), dim3(block), smem, stream, grad_output_a, input_a, grad_weight_a, batchSize, inputChannels, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } } // namespace (anonymous) const Tensor& conv_depthwise2d_cuda_out( const Tensor &input_, const Tensor &weight_, IntArrayRef kernel_size, const c10::optional<Tensor> &bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor &out) { TORCH_CHECK(kernel_size.size() == 2); TORCH_CHECK(stride.size() == 2); TORCH_CHECK(padding.size() == 2); TORCH_CHECK(dilation.size() == 2); auto input = input_.expect_contiguous(); auto weight = weight_.expect_contiguous(); auto bias = [&] { if (bias_opt.has_value() && bias_opt->defined()) { return bias_opt->expect_contiguous(); } return c10::MaybeOwned<Tensor>::owned(c10::in_place); }(); conv_depthwise2d_forward_out( *input, out, *weight, *bias, kernel_size[1], kernel_size[0], stride[1], stride[0], padding[1], padding[0], dilation[1], dilation[0]); return out; } Tensor conv_depthwise2d_cuda( const Tensor &input, const Tensor &weight, IntArrayRef kernel_size, const c10::optional<Tensor> &bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { auto out = at::empty({0}, input.options()); return conv_depthwise2d_cuda_out(input, weight, kernel_size, bias, stride, padding, dilation, out); } std::tuple<Tensor&, Tensor&> conv_depthwise2d_backward_cuda_out( const Tensor & grad_output_, const Tensor & self_, const Tensor & weight_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, Tensor & grad_input, Tensor & grad_weight) { auto grad_output = grad_output_.expect_contiguous(); if (grad_weight.defined()) { auto self = self_.expect_contiguous(); conv_depthwise2d_grad_weight_out( *self, *grad_output, grad_weight, kernel_size[1], kernel_size[0], stride[1], stride[0], padding[1], padding[0], dilation[1], dilation[0]); } if (grad_input.defined()) { auto weight = weight_.expect_contiguous(); conv_depthwise2d_backward_out( self_, *grad_output, grad_input, *weight, kernel_size[1], kernel_size[0], stride[1], stride[0], padding[1], padding[0], dilation[1], dilation[0]); } return std::forward_as_tuple(grad_input, grad_weight); } std::tuple<Tensor, Tensor> conv_depthwise2d_backward_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool, 2> output_mask) { Tensor grad_input; Tensor grad_weight; if (output_mask[0]) { grad_input = at::empty({0}, grad_output.options()); } if (output_mask[1]) { grad_weight = at::empty({0}, grad_output.options()); } return conv_depthwise2d_backward_cuda_out( grad_output, self, weight, kernel_size, stride, padding, dilation, grad_input, grad_weight); } REGISTER_CUDA_DISPATCH(conv_depthwise2d_backward_stub, &conv_depthwise2d_backward_cuda); } // namespace at::native
fb6478d3a332d501fcd737be7e9e4883aa77333b.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/Dispatch.h> #include <ATen/AccumulateType.h> #include <ATen/div_rtn.h> #include <ATen/cuda/CUDABlas.h> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/native/ConvUtils.h> #include <ATen/native/cuda/block_reduce.cuh> #include <ATen/native/Resize.h> #include <ATen/native/IndexingUtils.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/_conv_depthwise2d_native.h> #endif namespace at::native { namespace { using at::cuda::detail::CUDA_NUM_THREADS; using at::cuda::detail::GET_BLOCKS; template <typename scalar_t, int ndim, template <typename U> class PtrTraits = DefaultPtrTraits> PackedTensorAccessor32<scalar_t, ndim, PtrTraits> dummy_packed_accessor32() { std::array<int64_t, ndim> zeros{}; return {nullptr, zeros.data(), zeros.data()}; } template <int kSize, typename scalar_t, typename index_t> __global__ void conv_depthwise2d_forward_kernel( const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> input, PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> output, const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> weight, const PackedTensorAccessor32<scalar_t, 1, DefaultPtrTraits> bias, bool biasEnabled, index_t totalElements, const int outputChannels, const int depthwiseMultiplier, const int inputWidth, const int inputHeight, const int outputWidth, const int outputHeight, const int kernelWidth, const int kernelHeight, const int strideWidth, const int strideHeight, const int padWidth, const int padHeight, const int dilationWidth, const int dilationHeight) { using acc_t = at::acc_type<scalar_t, true>; const int KW_LIMIT = (kSize != 0) ? kSize : kernelWidth; const int KH_LIMIT = (kSize != 0) ? kSize : kernelHeight; CUDA_KERNEL_LOOP_TYPE(linearIndex, totalElements, index_t) { //calculate n,c,h,w indices, replacing modulos by divide and multiply add, //result is same as would be in the code below //const int n = linearIndex / batchStride; //batchStride = outputChannels * outputHeight * outputWidth //const int c = (linearIndex / channelStride) % outputChannels; //channelStride = outputHeight * outputWidth //const int h = (linearIndex / outputWidth) % outputHeight; //const int w = linearIndex % outputWidth; int indtmp1 = linearIndex/outputWidth; const int w = linearIndex - indtmp1 * outputWidth; int indtmp2 = indtmp1/outputHeight; const int h = indtmp1 - indtmp2 * outputHeight; indtmp1 = indtmp2; indtmp2 = indtmp1/outputChannels; const int c = indtmp1 - indtmp2 * outputChannels; const int n = indtmp2; int inputChannel = c; int inputChannels = outputChannels; if (depthwiseMultiplier !=1) { inputChannel /= depthwiseMultiplier; inputChannels /= depthwiseMultiplier; } int weightOffset = c * kernelHeight * kernelWidth; acc_t value = biasEnabled ? static_cast<acc_t>(bias.data()[c]) : acc_t(0); const index_t offset0 = (n * inputChannels + inputChannel) * inputHeight * inputWidth; #if !defined(USE_ROCM) #pragma unroll #endif for (int kH = 0; kH < KH_LIMIT; ++kH) { #if !defined(USE_ROCM) #pragma unroll #endif for (int kW = 0; kW < KW_LIMIT; ++kW) { const int h_in = -padHeight + h * strideHeight + kH * dilationHeight; const int w_in = -padWidth + w * strideWidth + kW * dilationWidth; if ((h_in >= 0) && (h_in < inputHeight) && (w_in >= 0) && (w_in < inputWidth)) { const index_t offset = offset0 + h_in * inputWidth + w_in; value += (static_cast<acc_t>(weight.data()[weightOffset]) * static_cast<acc_t>(input.data()[offset])); } ++weightOffset; } } output.data()[linearIndex] = static_cast<scalar_t>(value); } } template <int kSize, int stride, typename scalar_t, typename index_t> __global__ void conv_depthwise2d_backward_kernel( const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> grad_output, PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> grad_input, const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> weight, index_t totalElements, const int inputChannels, const int depthwiseMultiplier, const int outputChannels, const int inputWidth, const int inputHeight, const int outputWidth, const int outputHeight, const int kernelWidth, const int kernelHeight, const int strideWidth, const int strideHeight, const int padWidth, const int padHeight, const int dilationWidth, const int dilationHeight) { using acc_t = at::acc_type<scalar_t, true>; const int KW_LIMIT = (kSize != 0) ? kSize : kernelWidth; const int KH_LIMIT = (kSize != 0) ? kSize : kernelHeight; const int strideW = (stride != 0) ? stride : strideWidth; const int strideH = (stride != 0) ? stride : strideHeight; CUDA_KERNEL_LOOP_TYPE(linearIndex, totalElements, index_t) { int indtmp1 = linearIndex/inputWidth; const int w = linearIndex - indtmp1 * inputWidth; int indtmp2 = indtmp1/inputHeight; const int h = indtmp1 - indtmp2 * inputHeight; indtmp1 = indtmp2; indtmp2 = indtmp1/inputChannels; const int c = indtmp1 - indtmp2 * inputChannels; const int n = indtmp2; acc_t value(0); #if !defined(USE_ROCM) #pragma unroll #endif for (int multiplier = 0; multiplier < depthwiseMultiplier; ++multiplier) { int och = (c * depthwiseMultiplier) + multiplier; int weightOffset = och * kernelHeight * kernelWidth; #if !defined(USE_ROCM) #pragma unroll #endif for (int kh = 0; kh < KH_LIMIT; ++kh) { #if defined(USE_ROCM) #pragma unroll #endif for (int kw = 0; kw < KW_LIMIT; ++kw) { int h_out = h + padHeight - kh * dilationHeight; int w_out = w + padWidth - kw * dilationWidth; if ((h_out % strideH == 0) && (w_out % strideW == 0)) { h_out = h_out / strideH; w_out = w_out / strideW; if ((h_out >= 0) && (h_out < outputHeight) && (w_out >= 0) && (w_out < outputWidth)) { const int offset = ((n * outputChannels + och) * outputHeight + h_out) * outputWidth + w_out; value += (static_cast<acc_t>(weight.data()[weightOffset]) * static_cast<acc_t>(grad_output.data()[offset])); } } ++weightOffset; } } } grad_input.data()[linearIndex] = static_cast<scalar_t>(value); } } template <typename scalar_t, typename index_t=unsigned> __global__ void conv_depthwise2d_grad_weight_kernel( const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> grad_output, const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> input, PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> grad_weight, const int batchSize, const int inputChannels, const int kernelChannels, const int depthwiseMultiplier, const int inputWidth, const int inputHeight, const int outputWidth, const int outputHeight, const int kernelWidth, const int kernelHeight, const int strideWidth, const int strideHeight, const int padWidth, const int padHeight, const int dilationWidth, const int dilationHeight) { using acc_t = at::acc_type<scalar_t, true>; const int channelStride = kernelWidth * kernelHeight; // Each Block is responsible for accumulating over a permutation of // (channels x kH x kW), use blockIdx to determine which one int bidx = blockIdx.x; int kW = bidx % kernelWidth; int kH = (bidx / kernelWidth) % kernelHeight; int ch = (bidx / channelStride); // Need to calculate which input channel is associated with this filter // channel int inputCh = ch / depthwiseMultiplier; acc_t grad(0); const int laneId = threadIdx.x % C10_WARP_SIZE; const int batch = threadIdx.x / C10_WARP_SIZE; const int nwarps = blockDim.x / C10_WARP_SIZE; const int imageElements = outputWidth * outputHeight; // Use warp per item. In the original kernel, a threadblock was used to sum over NHW. // Here, we use a warp to sum values over HW dimension, and if batchSize is larger than the // number of warps, a warp would loop over remaining batch items (e.g. if there are 8 warps, // warp 0 would go over 0-8-16 etc image, warp 1 over 1-9-17 etc). Later in blockReduce, // all the warps will be reduced anyway, thus the full reduction will be over NHW, like it // should be. That allows to get rid of one modulo operation inside the loop (because n/batchIdx // now does not have to be computed through modulo, you are just looping over it), and // bring a nice speed-up. for (int batchIdx = batch; batchIdx < batchSize; batchIdx += nwarps){ // Warp-stride loop over elements in a batch item for (index_t idx = laneId; idx < imageElements; idx += C10_WARP_SIZE) { // Need to calculate the following: batch position, and offset into the grad_output // in height, and width. We can intuit the corresponding position in the input from // the other parameters we have int go_w_offset = idx % outputWidth; int go_h_offset = (idx / outputWidth); int i_w_offset = (go_w_offset * strideWidth) + (kW * dilationWidth) - padWidth; int i_h_offset = (go_h_offset * strideHeight) + (kH * dilationHeight) - padHeight; if (i_w_offset >= 0 && i_h_offset >= 0 && i_w_offset < inputWidth && i_h_offset < inputHeight) { int inputOffset = ((batchIdx * inputChannels + inputCh) * inputHeight + i_h_offset) * inputWidth + i_w_offset; int outputOffset = ((batchIdx * kernelChannels + ch) * outputHeight ) * outputWidth + idx; grad += (static_cast<acc_t>(input.data()[inputOffset]) * static_cast<acc_t>(grad_output.data()[outputOffset])); } } } // At this point each thread in the block has a local gradient, which we need to // accumulate prior to writing the global value extern __shared__ char smem[]; acc_t* buf = reinterpret_cast<acc_t*>(smem); acc_t tval = cuda_utils::BlockReduceSum(grad, buf); // After reduction, first thread in the block has the gradient, so its responsible // for writing it to grad_weight if (threadIdx.x == 0) { int weightOffset = kW + (kernelWidth * kH) + (kernelWidth * kernelHeight * ch); grad_weight.data()[weightOffset] = static_cast<scalar_t>(tval); } } void conv_depthwise2d_forward_out( const Tensor &input, const Tensor &output, const Tensor &weight, const Tensor &bias, const int kW, const int kH, const int dW, const int dH, const int padW, const int padH, const int dilationW, const int dilationH) { // Only handle 4D Input Tensors for now TORCH_CHECK(input.numel() > 0 && input.dim() == 4); TORCH_CHECK(weight.numel() > 0 && weight.dim() == 4); TORCH_CHECK(output.is_contiguous()); auto in_sizes = input.sizes(); auto w_sizes = weight.sizes(); // We assume that the input and weight Tensors are shaped properly by // the caller, so we verify that here to some extent // Weight Tensor is shape (output_channels, 1, kH, kW) TORCH_CHECK(w_sizes[1] == 1); // Input Tensor is shape (N, input_channels, H, W) // We verify that the # of output_channels is a multiple of input_channels TORCH_CHECK(w_sizes[0] % in_sizes[1] == 0); // Bias has same # of channels as output const bool has_bias = bias.defined(); TORCH_CHECK(!has_bias || (bias.dim() <= 1 && bias.numel() == w_sizes[0])); // Following the behavior of other THCUNN functions, we shape the output // Tensor ourselves int64_t height = in_sizes[2]; int64_t width = in_sizes[3]; int64_t outputChannels = w_sizes[0]; auto out_sizes = conv_output_size(in_sizes, weight.sizes(), {padH, padW}, {dH, dW}, {dilationH, dilationW}); const auto outputWidth = out_sizes[3]; const auto outputHeight = out_sizes[2]; resize_output(output, out_sizes); int64_t inputChannels = in_sizes[1]; int64_t depthwiseMultiplier = outputChannels / inputChannels; // One thread per output value TORCH_CHECK(canUse32BitIndexMath(input) && canUse32BitIndexMath(output)); int32_t n = output.numel(); int blocks = GET_BLOCKS(n); dim3 grid(blocks); dim3 block(CUDA_NUM_THREADS); const auto stream = c10::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "conv_depthwise2d_forward_cuda", [&] { // Create PackedTensorAccessor // Kernel currently relies upon all the Tensors to be contiguous, but we made // them contiguous above const auto input_a = input.packed_accessor32<scalar_t, 4>(); const auto weight_a = weight.packed_accessor32<scalar_t, 4>(); const auto output_a = output.packed_accessor32<scalar_t, 4>(); const auto bias_a = has_bias ? bias.packed_accessor32<scalar_t, 1>() : dummy_packed_accessor32<scalar_t, 1>(); if (kW == 3 && kH == 3) { conv_depthwise2d_forward_kernel<3> <<<grid, block, 0, stream>>>( input_a, output_a, weight_a, bias_a, has_bias, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (kW == 1 && kH == 1) { conv_depthwise2d_forward_kernel<1> <<<grid, block, 0, stream>>>( input_a, output_a, weight_a, bias_a, has_bias, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { conv_depthwise2d_forward_kernel<0> <<<grid, block, 0, stream>>>( input_a, output_a, weight_a, bias_a, has_bias, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }); } void conv_depthwise2d_backward_out( const Tensor &input, const Tensor &grad_output, const Tensor &grad_input, const Tensor &weight, const int kW, const int kH, const int dW, const int dH, const int padW, const int padH, const int dilationW, const int dilationH) { // Only handle 4D Input Tensors for now TORCH_CHECK(input.numel() > 0 && input.dim() == 4); TORCH_CHECK(weight.numel() > 0 && weight.dim() == 4); TORCH_CHECK(grad_output.numel() > 0 && grad_output.dim() == 4); // Minimal shape checking, as above // Same # of elements in batch TORCH_CHECK(input.sizes()[0] == grad_output.sizes()[0]); // Same # of filters as outputChannels TORCH_CHECK(weight.sizes()[0] == grad_output.sizes()[1]); // Resize Grainput_a auto in_sizes = input.sizes(); resize_output(grad_input, in_sizes); int inputChannels = in_sizes[1]; int height = in_sizes[2]; int width = in_sizes[3]; auto gO_sizes = grad_output.sizes(); int outputChannels = gO_sizes[1]; int outputHeight = gO_sizes[2]; int outputWidth = gO_sizes[3]; int depthwiseMultiplier = outputChannels / inputChannels; // Kernel currently relies upon all the Tensors to be contiguous TORCH_CHECK(grad_output.is_contiguous()); TORCH_CHECK(weight.is_contiguous()); TORCH_CHECK(grad_input.is_contiguous()); // One thread per grainput_a value TORCH_CHECK(canUse32BitIndexMath(grad_input) && canUse32BitIndexMath(grad_output)); int32_t n = grad_input.numel(); int blocks = GET_BLOCKS(n); dim3 grid(blocks); dim3 block(CUDA_NUM_THREADS); const auto stream = c10::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_output.scalar_type(), "conv_depthwise2d_backward_cuda", [&] { auto grad_output_a = grad_output.packed_accessor32<scalar_t, 4>(); auto grad_input_a = grad_input.packed_accessor32<scalar_t, 4>(); auto weight_a = weight.packed_accessor32<scalar_t, 4>(); if (kW == 3 && kH == 3) { if (dW == 1 && dH == 1){ conv_depthwise2d_backward_kernel<3, 1><<<grid, block, 0, stream>>>( grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (dW == 2 && dH == 2) { conv_depthwise2d_backward_kernel<3, 2><<<grid, block, 0, stream>>>( grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { conv_depthwise2d_backward_kernel<3, 0><<<grid, block, 0, stream>>>( grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } else if (kW == 1 && kH == 1) { if (dW == 1 && dH == 1){ conv_depthwise2d_backward_kernel<1, 1><<<grid, block, 0, stream>>>( grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (dW == 2 && dH == 2) { conv_depthwise2d_backward_kernel<1, 2><<<grid, block, 0, stream>>>( grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { conv_depthwise2d_backward_kernel<1, 0><<<grid, block, 0, stream>>>( grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } else if (dW == 1 && dH == 1) { conv_depthwise2d_backward_kernel<0, 1><<<grid, block, 0, stream>>>( grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (dW == 2 && dH == 2) { conv_depthwise2d_backward_kernel<0, 2><<<grid, block, 0, stream>>>( grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { conv_depthwise2d_backward_kernel<0, 0><<<grid, block, 0, stream>>>( grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }); } // Crude benchmarks suggest 256 is better than 512 and 1024 // TODO: Autotune/use better heuristics, improve speed more. int getGradParamsNumThreads(int batchSize) { //warp per item in a batch, up to a maximum constexpr int MAX_BLOCK_SIZE = 256; return std::min(batchSize * at::cuda::warp_size(), MAX_BLOCK_SIZE); } void conv_depthwise2d_grad_weight_out( const Tensor &input, const Tensor &grad_output, const Tensor &grad_weight, const int kW, const int kH, const int dW, const int dH, const int padW, const int padH, const int dilationW, const int dilationH) { // Only handle 4D Input Tensors for now TORCH_CHECK(input.numel() > 0 && input.dim() == 4); TORCH_CHECK(grad_output.numel() > 0 && grad_output.dim() == 4); // Minimal shape checking as above // Same # of elements in batch TORCH_CHECK(input.sizes()[0] == grad_output.sizes()[0]); auto in_sizes = input.sizes(); int batchSize = in_sizes[0]; int inputChannels = in_sizes[1]; int height = in_sizes[2]; int width = in_sizes[3]; auto gO_sizes = grad_output.sizes(); int outputChannels = gO_sizes[1]; int outputHeight = gO_sizes[2]; int outputWidth = gO_sizes[3]; int depthwiseMultiplier = outputChannels / inputChannels; resize_output(grad_weight, {outputChannels, 1, kH, kW}); // Kernel currently relies upon all the Tensors to be contiguous TORCH_CHECK(grad_output.is_contiguous()); TORCH_CHECK(input.is_contiguous()); TORCH_CHECK(grad_weight.is_contiguous()); // We parallelize so that each block computes a single value in grad_weight TORCH_CHECK(canUse32BitIndexMath(input) && canUse32BitIndexMath(grad_output)); int blocks = outputChannels * kH * kW; // Make sure we have enough threads to perform the reduction, and use this number // to create the shared memory size for the reduction dim3 grid(blocks); dim3 block(getGradParamsNumThreads(batchSize)); const auto stream = c10::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_output.scalar_type(), "conv_depthwise2d_grad_weight_cuda", [&] { const auto grad_output_a = grad_output.packed_accessor32<scalar_t, 4>(); const auto input_a = input.packed_accessor32<scalar_t, 4>(); const auto grad_weight_a = grad_weight.packed_accessor32<scalar_t, 4>(); using acc_t = at::acc_type<scalar_t, true>; int warp_size = at::cuda::warp_size(); TORCH_INTERNAL_ASSERT(block.x % warp_size == 0); int smem = (block.x / warp_size) * sizeof(acc_t); conv_depthwise2d_grad_weight_kernel<<<grid, block, smem, stream>>>( grad_output_a, input_a, grad_weight_a, batchSize, inputChannels, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } } // namespace (anonymous) const Tensor& conv_depthwise2d_cuda_out( const Tensor &input_, const Tensor &weight_, IntArrayRef kernel_size, const c10::optional<Tensor> &bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor &out) { TORCH_CHECK(kernel_size.size() == 2); TORCH_CHECK(stride.size() == 2); TORCH_CHECK(padding.size() == 2); TORCH_CHECK(dilation.size() == 2); auto input = input_.expect_contiguous(); auto weight = weight_.expect_contiguous(); auto bias = [&] { if (bias_opt.has_value() && bias_opt->defined()) { return bias_opt->expect_contiguous(); } return c10::MaybeOwned<Tensor>::owned(c10::in_place); }(); conv_depthwise2d_forward_out( *input, out, *weight, *bias, kernel_size[1], kernel_size[0], stride[1], stride[0], padding[1], padding[0], dilation[1], dilation[0]); return out; } Tensor conv_depthwise2d_cuda( const Tensor &input, const Tensor &weight, IntArrayRef kernel_size, const c10::optional<Tensor> &bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { auto out = at::empty({0}, input.options()); return conv_depthwise2d_cuda_out(input, weight, kernel_size, bias, stride, padding, dilation, out); } std::tuple<Tensor&, Tensor&> conv_depthwise2d_backward_cuda_out( const Tensor & grad_output_, const Tensor & self_, const Tensor & weight_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, Tensor & grad_input, Tensor & grad_weight) { auto grad_output = grad_output_.expect_contiguous(); if (grad_weight.defined()) { auto self = self_.expect_contiguous(); conv_depthwise2d_grad_weight_out( *self, *grad_output, grad_weight, kernel_size[1], kernel_size[0], stride[1], stride[0], padding[1], padding[0], dilation[1], dilation[0]); } if (grad_input.defined()) { auto weight = weight_.expect_contiguous(); conv_depthwise2d_backward_out( self_, *grad_output, grad_input, *weight, kernel_size[1], kernel_size[0], stride[1], stride[0], padding[1], padding[0], dilation[1], dilation[0]); } return std::forward_as_tuple(grad_input, grad_weight); } std::tuple<Tensor, Tensor> conv_depthwise2d_backward_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool, 2> output_mask) { Tensor grad_input; Tensor grad_weight; if (output_mask[0]) { grad_input = at::empty({0}, grad_output.options()); } if (output_mask[1]) { grad_weight = at::empty({0}, grad_output.options()); } return conv_depthwise2d_backward_cuda_out( grad_output, self, weight, kernel_size, stride, padding, dilation, grad_input, grad_weight); } REGISTER_CUDA_DISPATCH(conv_depthwise2d_backward_stub, &conv_depthwise2d_backward_cuda); } // namespace at::native
74df0a894ab8da05665f34cc6f8f059fcb0471a5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } /**********************add for pruning************************/ template <> void caffe_gpu_csrmm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float * csrval,const int * csrrowptr,const int * csrcolind,const int nnz, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipsparseOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPSPARSE_OPERATION_NON_TRANSPOSE:HIPSPARSE_OPERATION_TRANSPOSE; hipsparseOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPSPARSE_OPERATION_NON_TRANSPOSE:HIPSPARSE_OPERATION_TRANSPOSE; cuTransB = HIPSPARSE_OPERATION_NON_TRANSPOSE; hipsparseScsrmm(Caffe::cusparse_handle(),cuTransB,N,M,K,nnz,&alpha,Caffe::cusparse_MatDescr(),csrval,csrrowptr,csrcolind,A,lda,&beta,C,N); } template <> void caffe_gpu_csrmm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double * csrval,const int * csrrowptr,const int * csrcolind,const int nnz, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipsparseOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPSPARSE_OPERATION_NON_TRANSPOSE:HIPSPARSE_OPERATION_TRANSPOSE; hipsparseOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPSPARSE_OPERATION_NON_TRANSPOSE:HIPSPARSE_OPERATION_TRANSPOSE; cuTransB = HIPSPARSE_OPERATION_NON_TRANSPOSE; hipsparseDcsrmm(Caffe::cusparse_handle(),cuTransB,N,M,K,nnz,&alpha,Caffe::cusparse_MatDescr(),csrval,csrrowptr,csrcolind,A,lda,&beta,C,N); } template <> void caffe_gpu_csrmv<float>(const CBLAS_TRANSPOSE TransA,const int M,const int N,const float alpha,const float * csrval,const int * csrrowptr,const int * csrcolind,const int nnz,const float * x,const float beta,float * y) { hipsparseOperation_t cuTransA = (TransA==CblasNoTrans)?HIPSPARSE_OPERATION_NON_TRANSPOSE:HIPSPARSE_OPERATION_TRANSPOSE; /* hipsparseHandle_t cphandle; hipsparseCreate(&cphandle); hipsparseMatDescr_t descrA; hipsparseCreateMatDescr(&descrA); hipsparseSetMatType(descrA,HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descrA,HIPSPARSE_INDEX_BASE_ZERO); */ //hipsparseScsrmv(cphandle,cuTransA,M,N,nnz,&alpha,descrA,csrval,csrrowptr,csrcolind,x,&beta,y); hipsparseScsrmv(Caffe::cusparse_handle(),cuTransA,M,N,nnz,&alpha,Caffe::cusparse_MatDescr(),csrval,csrrowptr,csrcolind,x,&beta,y); /* hipDeviceSynchronize(); hipsparseDestroyMatDescr(descrA); hipsparseDestroy(cphandle); */ } template <> void caffe_gpu_csrmv<double>(const CBLAS_TRANSPOSE TransA,const int M,const int N,const double alpha,const double * csrval,const int * csrrowptr,const int * csrcolind,const int nnz,const double * x,const double beta,double * y) { hipsparseOperation_t cuTransA = (TransA==CblasNoTrans)?HIPSPARSE_OPERATION_NON_TRANSPOSE:HIPSPARSE_OPERATION_TRANSPOSE; /* hipsparseHandle_t cphandle; hipsparseCreate(&cphandle); hipsparseMatDescr_t descrA; hipsparseCreateMatDescr(&descrA); hipsparseSetMatType(descrA,HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descrA,HIPSPARSE_INDEX_BASE_ZERO); */ //hipsparseDcsrmv(cphandle,cuTransA,M,N,nnz,&alpha,descrA,csrval,csrrowptr,csrcolind,x,&beta,y); hipsparseDcsrmv(Caffe::cusparse_handle(),cuTransA,M,N,nnz,&alpha,Caffe::cusparse_MatDescr(),csrval,csrrowptr,csrcolind,x,&beta,y); /* hipDeviceSynchronize(); hipsparseDestroyMatDescr(descrA); hipsparseDestroy(cphandle); */ } /**************************************************************/ template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float* X, hipStream_t str) { hipStream_t initial_stream; CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream)); CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str)); CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double* X, hipStream_t str) { hipStream_t initial_stream; CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream)); CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str)); CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } } // namespace caffe
74df0a894ab8da05665f34cc6f8f059fcb0471a5.cu
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } /**********************add for pruning************************/ template <> void caffe_gpu_csrmm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float * csrval,const int * csrrowptr,const int * csrcolind,const int nnz, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cusparseOperation_t cuTransA = (TransA == CblasNoTrans) ? CUSPARSE_OPERATION_NON_TRANSPOSE:CUSPARSE_OPERATION_TRANSPOSE; cusparseOperation_t cuTransB = (TransB == CblasNoTrans) ? CUSPARSE_OPERATION_NON_TRANSPOSE:CUSPARSE_OPERATION_TRANSPOSE; cuTransB = CUSPARSE_OPERATION_NON_TRANSPOSE; cusparseScsrmm(Caffe::cusparse_handle(),cuTransB,N,M,K,nnz,&alpha,Caffe::cusparse_MatDescr(),csrval,csrrowptr,csrcolind,A,lda,&beta,C,N); } template <> void caffe_gpu_csrmm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double * csrval,const int * csrrowptr,const int * csrcolind,const int nnz, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cusparseOperation_t cuTransA = (TransA == CblasNoTrans) ? CUSPARSE_OPERATION_NON_TRANSPOSE:CUSPARSE_OPERATION_TRANSPOSE; cusparseOperation_t cuTransB = (TransB == CblasNoTrans) ? CUSPARSE_OPERATION_NON_TRANSPOSE:CUSPARSE_OPERATION_TRANSPOSE; cuTransB = CUSPARSE_OPERATION_NON_TRANSPOSE; cusparseDcsrmm(Caffe::cusparse_handle(),cuTransB,N,M,K,nnz,&alpha,Caffe::cusparse_MatDescr(),csrval,csrrowptr,csrcolind,A,lda,&beta,C,N); } template <> void caffe_gpu_csrmv<float>(const CBLAS_TRANSPOSE TransA,const int M,const int N,const float alpha,const float * csrval,const int * csrrowptr,const int * csrcolind,const int nnz,const float * x,const float beta,float * y) { cusparseOperation_t cuTransA = (TransA==CblasNoTrans)?CUSPARSE_OPERATION_NON_TRANSPOSE:CUSPARSE_OPERATION_TRANSPOSE; /* cusparseHandle_t cphandle; cusparseCreate(&cphandle); cusparseMatDescr_t descrA; cusparseCreateMatDescr(&descrA); cusparseSetMatType(descrA,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descrA,CUSPARSE_INDEX_BASE_ZERO); */ //cusparseScsrmv(cphandle,cuTransA,M,N,nnz,&alpha,descrA,csrval,csrrowptr,csrcolind,x,&beta,y); cusparseScsrmv(Caffe::cusparse_handle(),cuTransA,M,N,nnz,&alpha,Caffe::cusparse_MatDescr(),csrval,csrrowptr,csrcolind,x,&beta,y); /* cudaDeviceSynchronize(); cusparseDestroyMatDescr(descrA); cusparseDestroy(cphandle); */ } template <> void caffe_gpu_csrmv<double>(const CBLAS_TRANSPOSE TransA,const int M,const int N,const double alpha,const double * csrval,const int * csrrowptr,const int * csrcolind,const int nnz,const double * x,const double beta,double * y) { cusparseOperation_t cuTransA = (TransA==CblasNoTrans)?CUSPARSE_OPERATION_NON_TRANSPOSE:CUSPARSE_OPERATION_TRANSPOSE; /* cusparseHandle_t cphandle; cusparseCreate(&cphandle); cusparseMatDescr_t descrA; cusparseCreateMatDescr(&descrA); cusparseSetMatType(descrA,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descrA,CUSPARSE_INDEX_BASE_ZERO); */ //cusparseDcsrmv(cphandle,cuTransA,M,N,nnz,&alpha,descrA,csrval,csrrowptr,csrcolind,x,&beta,y); cusparseDcsrmv(Caffe::cusparse_handle(),cuTransA,M,N,nnz,&alpha,Caffe::cusparse_MatDescr(),csrval,csrrowptr,csrcolind,x,&beta,y); /* cudaDeviceSynchronize(); cusparseDestroyMatDescr(descrA); cusparseDestroy(cphandle); */ } /**************************************************************/ template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float* X, cudaStream_t str) { cudaStream_t initial_stream; CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream)); CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double* X, cudaStream_t str) { cudaStream_t initial_stream; CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream)); CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } } // namespace caffe
6eeb518a65fc00b8b6881ff23d18a0173e49fc63.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda_runtime.h> #include<sys/time.h> #include<stdint.h> #include<stdio.h> #define CHECK(call)\ {\ const hipError_t error = call;\ if(error != hipSuccess)\ {\ printf("Error: %s, %d\n", __FILE__, __LINE__);\ printf("Code: %d, reason: %s\n", error, hipGetErrorString(error));\ exit(0);\ }\ }\ double cpu_sec(void) { struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } void check_num(float *c, float *g, int n) { double epsilon = 1.0E-8; int match = 1; for (int i = 0; i < n; i++) { if (abs(c[i] - g[i]) > epsilon) { match = 0; printf("Don't match!\n"); printf("host %5.2f device %5.2f at current %d\n", c[i], g[i], i); break; } } if (match) printf("Array match\n\n"); return; } void init_data(float *inp, int n) { time_t t; srand((unsigned) time(&t)); for (int i = 0; i< n; i++) inp[i] = (float)(rand() & 0xFF) /10.0f; } void mat_sum(float *a, float *b, float *c, int x, int y) { float *aa = a; float *bb = b; float *cc = c; for(int j = 0; j < y; j++) { for(int i = 0; i < x; i++) cc[i] = aa[i] + bb[i]; aa += x; bb += x; cc += x; } } __global__ void mat_sum_g(float *a, float *b, float *c, int x, int y) { unsigned int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < x){ for(int j =0; j < y; j++) { int idx = i + j * x; c[idx] = a[idx] + b[idx]; } } } int main() { int x = 1 << 6, y = 1 << 6; int n = x * y; size_t num = n * sizeof(float); float *ha, *hb, *cpu, *gpu; ha = (float *)malloc(num); hb = (float *)malloc(num); gpu = (float *)malloc(num); cpu = (float *)malloc(num); init_data(ha, n); init_data(hb, n); memset(gpu, 0, num); memset(cpu, 0, num); double start, duration; start = cpu_sec(); mat_sum(ha, hb, cpu, x, y); duration = cpu_sec() - start; printf("Mat sum cpu time cost %f ms\n", duration*1000); float *da, *db, *dc; hipMalloc((float **)&da, num); hipMalloc((float **)&db, num); hipMalloc((float **)&dc, num); hipMemcpy(da, ha, num, hipMemcpyHostToDevice); hipMemcpy(db, hb, num, hipMemcpyHostToDevice); dim3 block(32, 1); dim3 grid((x + block.x - 1) / block.x, 1); start = cpu_sec(); hipLaunchKernelGGL(( mat_sum_g), dim3(grid), dim3(block), 0, 0, da, db, dc, x, y); hipDeviceSynchronize(); duration = cpu_sec() - start; printf("Mat sum GPU <<<(%d, %d), (%d,%d)>>> time cost %f ms\n", grid.x, grid.y, block.x, block.y, duration * 1000); hipMemcpy(gpu, dc, num, hipMemcpyDeviceToHost); check_num(cpu, gpu, n); hipFree(da); hipFree(db); hipFree(dc); free(ha); free(hb); free(cpu); free(gpu); hipDeviceReset(); int c = getchar(); return 0; }
6eeb518a65fc00b8b6881ff23d18a0173e49fc63.cu
#include<cuda_runtime.h> #include<sys/time.h> #include<stdint.h> #include<stdio.h> #define CHECK(call)\ {\ const cudaError_t error = call;\ if(error != cudaSuccess)\ {\ printf("Error: %s, %d\n", __FILE__, __LINE__);\ printf("Code: %d, reason: %s\n", error, cudaGetErrorString(error));\ exit(0);\ }\ }\ double cpu_sec(void) { struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } void check_num(float *c, float *g, int n) { double epsilon = 1.0E-8; int match = 1; for (int i = 0; i < n; i++) { if (abs(c[i] - g[i]) > epsilon) { match = 0; printf("Don't match!\n"); printf("host %5.2f device %5.2f at current %d\n", c[i], g[i], i); break; } } if (match) printf("Array match\n\n"); return; } void init_data(float *inp, int n) { time_t t; srand((unsigned) time(&t)); for (int i = 0; i< n; i++) inp[i] = (float)(rand() & 0xFF) /10.0f; } void mat_sum(float *a, float *b, float *c, int x, int y) { float *aa = a; float *bb = b; float *cc = c; for(int j = 0; j < y; j++) { for(int i = 0; i < x; i++) cc[i] = aa[i] + bb[i]; aa += x; bb += x; cc += x; } } __global__ void mat_sum_g(float *a, float *b, float *c, int x, int y) { unsigned int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < x){ for(int j =0; j < y; j++) { int idx = i + j * x; c[idx] = a[idx] + b[idx]; } } } int main() { int x = 1 << 6, y = 1 << 6; int n = x * y; size_t num = n * sizeof(float); float *ha, *hb, *cpu, *gpu; ha = (float *)malloc(num); hb = (float *)malloc(num); gpu = (float *)malloc(num); cpu = (float *)malloc(num); init_data(ha, n); init_data(hb, n); memset(gpu, 0, num); memset(cpu, 0, num); double start, duration; start = cpu_sec(); mat_sum(ha, hb, cpu, x, y); duration = cpu_sec() - start; printf("Mat sum cpu time cost %f ms\n", duration*1000); float *da, *db, *dc; cudaMalloc((float **)&da, num); cudaMalloc((float **)&db, num); cudaMalloc((float **)&dc, num); cudaMemcpy(da, ha, num, cudaMemcpyHostToDevice); cudaMemcpy(db, hb, num, cudaMemcpyHostToDevice); dim3 block(32, 1); dim3 grid((x + block.x - 1) / block.x, 1); start = cpu_sec(); mat_sum_g<<<grid, block>>>(da, db, dc, x, y); cudaDeviceSynchronize(); duration = cpu_sec() - start; printf("Mat sum GPU <<<(%d, %d), (%d,%d)>>> time cost %f ms\n", grid.x, grid.y, block.x, block.y, duration * 1000); cudaMemcpy(gpu, dc, num, cudaMemcpyDeviceToHost); check_num(cpu, gpu, n); cudaFree(da); cudaFree(db); cudaFree(dc); free(ha); free(hb); free(cpu); free(gpu); cudaDeviceReset(); int c = getchar(); return 0; }
7a8d9b69bc0efdabd7ac39424d3bc3c6376c7870.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _REDUCTION_KERNEL_H_ #define _REDUCTION_KERNEL_H_ #include <stdio.h> #include "reduction.h" __global__ void reduction_naive(float* d_odata, float* d_idata, int num_elements) { int idx = (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; d_odata[idx] = d_idata[idx]+d_idata[idx+num_elements/2]; } #define COALESCED_NUM 16 #define blockDimX 512 #define blockDimY 1 #define gridDimX (gridDim.x) #define gridDimY (gridDim.y) #define idx (blockIdx.x*blockDimX+threadIdx.x) #define idy (blockIdx.y*blockDimY+threadIdx.y) #define bidy (blockIdx.y) #define bidx (blockIdx.x) #define tidx (threadIdx.x) #define tidy (threadIdx.y) #define merger_y 1 #define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM) #define globalDimX 65536 #define globalDimY 1 __global__ void reduction_opt_0(float * A, int size, int segSize) { #pragma gCompiler gValue segSize 262144 int k; float sum; int nidx; __shared__ float shared_0[512]; nidx=((((tidx/16)*2048)+(idx&15))+((idx/512)*16)); float tmp_2; float tmp_3; float tmp_0; float tmp_1; sum=0; for (k=0; k<size; k=(k+262144)) { float r; r=A[(nidx+k)]; sum+=r; } tmp_0=sum; __syncthreads(); sum=0; for (k=0; k<size; k=(k+262144)) { float r; r=A[((nidx+131072)+k)]; sum+=r; } tmp_1=sum; __syncthreads(); float a; float b; float c; a=tmp_0; b=tmp_1; c=(a+b); tmp_2=c; sum=0; for (k=0; k<size; k=(k+262144)) { float r; r=A[((nidx+65536)+k)]; sum+=r; } tmp_0=sum; __syncthreads(); sum=0; for (k=0; k<size; k=(k+262144)) { float r; r=A[(((nidx+65536)+131072)+k)]; sum+=r; } tmp_1=sum; __syncthreads(); a=tmp_0; b=tmp_1; c=(a+b); tmp_3=c; a=tmp_2; b=tmp_3; c=(a+b); shared_0[(tidx+0)]=c; __syncthreads(); if ((nidx<32768)) { float a; float b; float c; a=shared_0[(tidx+0)]; b=shared_0[(tidx+256)]; c=(a+b); shared_0[(tidx+0)]=c; } __syncthreads(); if ((nidx<16384)) { float a; float b; float c; a=shared_0[(tidx+0)]; b=shared_0[(tidx+128)]; c=(a+b); shared_0[(tidx+0)]=c; } __syncthreads(); if ((nidx<8192)) { float a; float b; float c; a=shared_0[(tidx+0)]; b=shared_0[(tidx+64)]; c=(a+b); shared_0[(tidx+0)]=c; } __syncthreads(); if ((nidx<4096)) { float a; float b; float c; a=shared_0[(tidx+0)]; b=shared_0[(tidx+32)]; c=(a+b); shared_0[(tidx+0)]=c; } __syncthreads(); if ((nidx<2048)) { float a; float b; float c; a=shared_0[(tidx+0)]; b=shared_0[(tidx+16)]; c=(a+b); { A[nidx]=c; } } } #define COALESCED_NUM 16 #define blockDimX 512 #define blockDimY 1 #define gridDimX (gridDim.x) #define gridDimY (gridDim.y) #define idx (blockIdx.x*blockDimX+threadIdx.x) #define idy (blockIdx.y*blockDimY+threadIdx.y) #define bidy (blockIdx.y) #define bidx (blockIdx.x) #define tidx (threadIdx.x) #define tidy (threadIdx.y) #define merger_y 1 #define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM) #define globalDimX 512 #define globalDimY 1 __global__ void reduction_opt_1(float * A, int size, int segSize) { #pragma gCompiler gValue segSize 262144 __shared__ float shared_1[512]; float tmp_4; float tmp_5; float a; float b; float c; { a=A[idx]; } { b=A[(idx+((262144/128)/2))]; } c=(a+b); tmp_4=c; { a=A[(idx+512)]; } { b=A[((idx+512)+((262144/128)/2))]; } c=(a+b); tmp_5=c; a=tmp_4; b=tmp_5; c=(a+b); shared_1[(tidx+0)]=c; __syncthreads(); if ((idx<256)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+256)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<128)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+128)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<64)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+64)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<32)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+32)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<16)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+16)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<8)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+8)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<4)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+4)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<2)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+2)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<1)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+1)]; c=(a+b); { A[idx]=c; } } } #define COALESCED_NUM 16 #define blockDimX 512 #define blockDimY 1 #define gridDimX (gridDim.x) #define gridDimY (gridDim.y) #define idx (blockIdx.x*blockDimX+threadIdx.x) #define idy (blockIdx.y*blockDimY+threadIdx.y) #define bidy (blockIdx.y) #define bidx (blockIdx.x) #define tidx (threadIdx.x) #define tidy (threadIdx.y) #define merger_y 1 #define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM) #define globalDimX 65536 #define globalDimY 1 __global__ void reduction_complex_opt_0(float * A, float * B, int size, int segSize) { #pragma gCompiler gValue segSize 262144 int k; float sum; int nidx; __shared__ float shared_0[512]; nidx=((((tidx/16)*2048)+(idx&15))+((idx/512)*16)); float tmp_4; float tmp_5; float tmp_2; float tmp_3; sum=0; for (k=0; k<size; k=(k+262144)) { float real; float img; struct float2 * tmp_0; struct float2 tmp_1; tmp_0=((struct float2 * )A); tmp_1=tmp_0[(nidx+k)]; real=tmp_1.x; img=tmp_1.y; sum+=real; sum+=img; } tmp_2=sum; __syncthreads(); sum=0; for (k=0; k<size; k=(k+262144)) { float real; float img; struct float2 * tmp_0; struct float2 tmp_1; tmp_0=((struct float2 * )A); tmp_1=tmp_0[((nidx+131072)+k)]; real=tmp_1.x; img=tmp_1.y; sum+=real; sum+=img; } tmp_3=sum; __syncthreads(); float a; float b; float c; a=tmp_2; b=tmp_3; c=(a+b); tmp_4=c; sum=0; for (k=0; k<size; k=(k+262144)) { float real; float img; struct float2 * tmp_0; struct float2 tmp_1; tmp_0=((struct float2 * )A); tmp_1=tmp_0[((nidx+65536)+k)]; real=tmp_1.x; img=tmp_1.y; sum+=real; sum+=img; } tmp_2=sum; __syncthreads(); sum=0; for (k=0; k<size; k=(k+262144)) { float real; float img; struct float2 * tmp_0; struct float2 tmp_1; tmp_0=((struct float2 * )A); tmp_1=tmp_0[(((nidx+65536)+131072)+k)]; real=tmp_1.x; img=tmp_1.y; sum+=real; sum+=img; } tmp_3=sum; __syncthreads(); a=tmp_2; b=tmp_3; c=(a+b); tmp_5=c; a=tmp_4; b=tmp_5; c=(a+b); shared_0[(tidx+0)]=c; __syncthreads(); if ((nidx<32768)) { float a; float b; float c; a=shared_0[(tidx+0)]; b=shared_0[(tidx+256)]; c=(a+b); shared_0[(tidx+0)]=c; } __syncthreads(); if ((nidx<16384)) { float a; float b; float c; a=shared_0[(tidx+0)]; b=shared_0[(tidx+128)]; c=(a+b); shared_0[(tidx+0)]=c; } __syncthreads(); if ((nidx<8192)) { float a; float b; float c; a=shared_0[(tidx+0)]; b=shared_0[(tidx+64)]; c=(a+b); shared_0[(tidx+0)]=c; } __syncthreads(); if ((nidx<4096)) { float a; float b; float c; a=shared_0[(tidx+0)]; b=shared_0[(tidx+32)]; c=(a+b); shared_0[(tidx+0)]=c; } __syncthreads(); if ((nidx<2048)) { float a; float b; float c; a=shared_0[(tidx+0)]; b=shared_0[(tidx+16)]; c=(a+b); { B[nidx]=c; } } } #define COALESCED_NUM 16 #define blockDimX 512 #define blockDimY 1 #define gridDimX (gridDim.x) #define gridDimY (gridDim.y) #define idx (blockIdx.x*blockDimX+threadIdx.x) #define idy (blockIdx.y*blockDimY+threadIdx.y) #define bidy (blockIdx.y) #define bidx (blockIdx.x) #define tidx (threadIdx.x) #define tidy (threadIdx.y) #define merger_y 1 #define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM) #define globalDimX 512 #define globalDimY 1 __global__ void reduction_complex_opt_1(float * A, float * B, int size, int segSize) { #pragma gCompiler gValue segSize 262144 __shared__ float shared_1[512]; float tmp_6; float tmp_7; float a; float b; float c; { a=B[idx]; } { b=B[(idx+((262144/128)/2))]; } c=(a+b); tmp_6=c; { a=B[(idx+512)]; } { b=B[((idx+512)+((262144/128)/2))]; } c=(a+b); tmp_7=c; a=tmp_6; b=tmp_7; c=(a+b); shared_1[(tidx+0)]=c; __syncthreads(); if ((idx<256)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+256)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<128)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+128)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<64)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+64)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<32)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+32)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<16)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+16)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<8)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+8)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<4)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+4)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<2)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+2)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<1)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+1)]; c=(a+b); { B[idx]=c; } } } #endif // #ifndef _REDUCTION_KERNEL_H_
7a8d9b69bc0efdabd7ac39424d3bc3c6376c7870.cu
#ifndef _REDUCTION_KERNEL_H_ #define _REDUCTION_KERNEL_H_ #include <stdio.h> #include "reduction.h" __global__ void reduction_naive(float* d_odata, float* d_idata, int num_elements) { int idx = (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; d_odata[idx] = d_idata[idx]+d_idata[idx+num_elements/2]; } #define COALESCED_NUM 16 #define blockDimX 512 #define blockDimY 1 #define gridDimX (gridDim.x) #define gridDimY (gridDim.y) #define idx (blockIdx.x*blockDimX+threadIdx.x) #define idy (blockIdx.y*blockDimY+threadIdx.y) #define bidy (blockIdx.y) #define bidx (blockIdx.x) #define tidx (threadIdx.x) #define tidy (threadIdx.y) #define merger_y 1 #define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM) #define globalDimX 65536 #define globalDimY 1 __global__ void reduction_opt_0(float * A, int size, int segSize) { #pragma gCompiler gValue segSize 262144 int k; float sum; int nidx; __shared__ float shared_0[512]; nidx=((((tidx/16)*2048)+(idx&15))+((idx/512)*16)); float tmp_2; float tmp_3; float tmp_0; float tmp_1; sum=0; for (k=0; k<size; k=(k+262144)) { float r; r=A[(nidx+k)]; sum+=r; } tmp_0=sum; __syncthreads(); sum=0; for (k=0; k<size; k=(k+262144)) { float r; r=A[((nidx+131072)+k)]; sum+=r; } tmp_1=sum; __syncthreads(); float a; float b; float c; a=tmp_0; b=tmp_1; c=(a+b); tmp_2=c; sum=0; for (k=0; k<size; k=(k+262144)) { float r; r=A[((nidx+65536)+k)]; sum+=r; } tmp_0=sum; __syncthreads(); sum=0; for (k=0; k<size; k=(k+262144)) { float r; r=A[(((nidx+65536)+131072)+k)]; sum+=r; } tmp_1=sum; __syncthreads(); a=tmp_0; b=tmp_1; c=(a+b); tmp_3=c; a=tmp_2; b=tmp_3; c=(a+b); shared_0[(tidx+0)]=c; __syncthreads(); if ((nidx<32768)) { float a; float b; float c; a=shared_0[(tidx+0)]; b=shared_0[(tidx+256)]; c=(a+b); shared_0[(tidx+0)]=c; } __syncthreads(); if ((nidx<16384)) { float a; float b; float c; a=shared_0[(tidx+0)]; b=shared_0[(tidx+128)]; c=(a+b); shared_0[(tidx+0)]=c; } __syncthreads(); if ((nidx<8192)) { float a; float b; float c; a=shared_0[(tidx+0)]; b=shared_0[(tidx+64)]; c=(a+b); shared_0[(tidx+0)]=c; } __syncthreads(); if ((nidx<4096)) { float a; float b; float c; a=shared_0[(tidx+0)]; b=shared_0[(tidx+32)]; c=(a+b); shared_0[(tidx+0)]=c; } __syncthreads(); if ((nidx<2048)) { float a; float b; float c; a=shared_0[(tidx+0)]; b=shared_0[(tidx+16)]; c=(a+b); { A[nidx]=c; } } } #define COALESCED_NUM 16 #define blockDimX 512 #define blockDimY 1 #define gridDimX (gridDim.x) #define gridDimY (gridDim.y) #define idx (blockIdx.x*blockDimX+threadIdx.x) #define idy (blockIdx.y*blockDimY+threadIdx.y) #define bidy (blockIdx.y) #define bidx (blockIdx.x) #define tidx (threadIdx.x) #define tidy (threadIdx.y) #define merger_y 1 #define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM) #define globalDimX 512 #define globalDimY 1 __global__ void reduction_opt_1(float * A, int size, int segSize) { #pragma gCompiler gValue segSize 262144 __shared__ float shared_1[512]; float tmp_4; float tmp_5; float a; float b; float c; { a=A[idx]; } { b=A[(idx+((262144/128)/2))]; } c=(a+b); tmp_4=c; { a=A[(idx+512)]; } { b=A[((idx+512)+((262144/128)/2))]; } c=(a+b); tmp_5=c; a=tmp_4; b=tmp_5; c=(a+b); shared_1[(tidx+0)]=c; __syncthreads(); if ((idx<256)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+256)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<128)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+128)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<64)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+64)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<32)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+32)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<16)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+16)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<8)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+8)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<4)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+4)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<2)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+2)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<1)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+1)]; c=(a+b); { A[idx]=c; } } } #define COALESCED_NUM 16 #define blockDimX 512 #define blockDimY 1 #define gridDimX (gridDim.x) #define gridDimY (gridDim.y) #define idx (blockIdx.x*blockDimX+threadIdx.x) #define idy (blockIdx.y*blockDimY+threadIdx.y) #define bidy (blockIdx.y) #define bidx (blockIdx.x) #define tidx (threadIdx.x) #define tidy (threadIdx.y) #define merger_y 1 #define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM) #define globalDimX 65536 #define globalDimY 1 __global__ void reduction_complex_opt_0(float * A, float * B, int size, int segSize) { #pragma gCompiler gValue segSize 262144 int k; float sum; int nidx; __shared__ float shared_0[512]; nidx=((((tidx/16)*2048)+(idx&15))+((idx/512)*16)); float tmp_4; float tmp_5; float tmp_2; float tmp_3; sum=0; for (k=0; k<size; k=(k+262144)) { float real; float img; struct float2 * tmp_0; struct float2 tmp_1; tmp_0=((struct float2 * )A); tmp_1=tmp_0[(nidx+k)]; real=tmp_1.x; img=tmp_1.y; sum+=real; sum+=img; } tmp_2=sum; __syncthreads(); sum=0; for (k=0; k<size; k=(k+262144)) { float real; float img; struct float2 * tmp_0; struct float2 tmp_1; tmp_0=((struct float2 * )A); tmp_1=tmp_0[((nidx+131072)+k)]; real=tmp_1.x; img=tmp_1.y; sum+=real; sum+=img; } tmp_3=sum; __syncthreads(); float a; float b; float c; a=tmp_2; b=tmp_3; c=(a+b); tmp_4=c; sum=0; for (k=0; k<size; k=(k+262144)) { float real; float img; struct float2 * tmp_0; struct float2 tmp_1; tmp_0=((struct float2 * )A); tmp_1=tmp_0[((nidx+65536)+k)]; real=tmp_1.x; img=tmp_1.y; sum+=real; sum+=img; } tmp_2=sum; __syncthreads(); sum=0; for (k=0; k<size; k=(k+262144)) { float real; float img; struct float2 * tmp_0; struct float2 tmp_1; tmp_0=((struct float2 * )A); tmp_1=tmp_0[(((nidx+65536)+131072)+k)]; real=tmp_1.x; img=tmp_1.y; sum+=real; sum+=img; } tmp_3=sum; __syncthreads(); a=tmp_2; b=tmp_3; c=(a+b); tmp_5=c; a=tmp_4; b=tmp_5; c=(a+b); shared_0[(tidx+0)]=c; __syncthreads(); if ((nidx<32768)) { float a; float b; float c; a=shared_0[(tidx+0)]; b=shared_0[(tidx+256)]; c=(a+b); shared_0[(tidx+0)]=c; } __syncthreads(); if ((nidx<16384)) { float a; float b; float c; a=shared_0[(tidx+0)]; b=shared_0[(tidx+128)]; c=(a+b); shared_0[(tidx+0)]=c; } __syncthreads(); if ((nidx<8192)) { float a; float b; float c; a=shared_0[(tidx+0)]; b=shared_0[(tidx+64)]; c=(a+b); shared_0[(tidx+0)]=c; } __syncthreads(); if ((nidx<4096)) { float a; float b; float c; a=shared_0[(tidx+0)]; b=shared_0[(tidx+32)]; c=(a+b); shared_0[(tidx+0)]=c; } __syncthreads(); if ((nidx<2048)) { float a; float b; float c; a=shared_0[(tidx+0)]; b=shared_0[(tidx+16)]; c=(a+b); { B[nidx]=c; } } } #define COALESCED_NUM 16 #define blockDimX 512 #define blockDimY 1 #define gridDimX (gridDim.x) #define gridDimY (gridDim.y) #define idx (blockIdx.x*blockDimX+threadIdx.x) #define idy (blockIdx.y*blockDimY+threadIdx.y) #define bidy (blockIdx.y) #define bidx (blockIdx.x) #define tidx (threadIdx.x) #define tidy (threadIdx.y) #define merger_y 1 #define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM) #define globalDimX 512 #define globalDimY 1 __global__ void reduction_complex_opt_1(float * A, float * B, int size, int segSize) { #pragma gCompiler gValue segSize 262144 __shared__ float shared_1[512]; float tmp_6; float tmp_7; float a; float b; float c; { a=B[idx]; } { b=B[(idx+((262144/128)/2))]; } c=(a+b); tmp_6=c; { a=B[(idx+512)]; } { b=B[((idx+512)+((262144/128)/2))]; } c=(a+b); tmp_7=c; a=tmp_6; b=tmp_7; c=(a+b); shared_1[(tidx+0)]=c; __syncthreads(); if ((idx<256)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+256)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<128)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+128)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<64)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+64)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<32)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+32)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<16)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+16)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<8)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+8)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<4)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+4)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<2)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+2)]; c=(a+b); shared_1[(tidx+0)]=c; } __syncthreads(); if ((idx<1)) { float a; float b; float c; a=shared_1[(tidx+0)]; b=shared_1[(tidx+1)]; c=(a+b); { B[idx]=c; } } } #endif // #ifndef _REDUCTION_KERNEL_H_
019570d51d421876aaa13a62d54a29aaab986dcf.hip
// !!! This is a file automatically generated by hipify!!! #include "image-processing.h" //#if defined(_MSC_VER) && (_MSC_VER >= 1600) //#pragma execution_character_set("utf-8") //#endif //#include <iostream> //#include <cmath> //#include <limits.h> //#include <hip/hip_runtime.h> //#include <opencv2/opencv.hpp> //using namespace cv; //using namespace std; //// cpu: //extern "C" void resizeImage(const Mat &src, Mat &dst, const Size &s); //// cpu: xlen ylen //extern "C" void transferImage(const Mat &src, Mat &dst, int xlen, int ylen); //// cpu: num = 0 | 1 //extern "C" void mirrorImage(const Mat &src, Mat &dst, int num); //// cpu: //extern "C" void rotateImage(const Mat &src, Mat &dst, int degree); //// //extern "C" void cutImage(const Mat &src, Mat &dst, int dir, int len); // cuda //extern "C" bool initCUDA(); //extern "C" void resizeImageGPU(const Mat &_src, Mat &_dst, const Size &s); //extern "C" void transferImageGPU(const Mat &_src, Mat &_dst, int xlen, int ylen); //extern "C" void mirrorImageGPU(const Mat &_src, Mat &_dst, int num); //extern "C" void rotateImageGPU(const Mat &src, Mat &dst, int degree); //extern "C" void cutImageGPU(const Mat &_src, Mat &_dst, int dir, int len); // cpu: extern "C" void resizeImage(const Mat &src, Mat &dst, const Size &s) { dst = Mat::zeros(s, CV_8UC3); double fRows = s.height / (float)src.rows; double fCols = s.width / (float)src.cols; int pX = 0; int pY = 0; for (int i = 0; i != dst.rows; ++i) { for (int j = 0; j != dst.cols; ++j) { pX = cvRound(i / (double)fRows); // pY = cvRound(j / (double)fCols); if (pX < src.rows && pX >= 0 && pY < src.cols && pY >= 0) { dst.at<Vec3b>(i, j)[0] = src.at<Vec3b>(pX, pY)[0]; // B dst.at<Vec3b>(i, j)[1] = src.at<Vec3b>(pX, pY)[1]; // G dst.at<Vec3b>(i, j)[2] = src.at<Vec3b>(pX, pY)[2]; // R } } } } // cpu: xlen ylen extern "C" void transferImage(const Mat &src, Mat &dst, int xlen, int ylen) { int width = src.cols, height = src.rows; width += abs(xlen); height += abs(ylen); dst = Mat::zeros(Size(width, height), CV_8UC3); int xadd = xlen < 0 ? 0 : abs(xlen); int yadd = ylen < 0 ? abs(ylen) : 0; for (int i = 0; i != src.rows; ++i) { for (int j = 0; j != src.cols; ++j) { dst.at<Vec3b>(i + yadd, j + xadd)[0] = src.at<Vec3b>(i, j)[0]; dst.at<Vec3b>(i + yadd, j + xadd)[1] = src.at<Vec3b>(i, j)[1]; dst.at<Vec3b>(i + yadd, j + xadd)[2] = src.at<Vec3b>(i, j)[2]; } } } // cpu: num = 0 | 1; (0:x1:y) extern "C" void mirrorImage(const Mat &src, Mat &dst, int num) { dst = Mat::zeros(Size(src.cols, src.rows), CV_8UC3); if (0 == num) { for (int i = 0, x = src.rows - 1; i != src.rows; ++i, --x) { for (int j = 0, y = 0; j != src.cols; ++j, ++y) { dst.at<Vec3b>(x, y)[0] = src.at<Vec3b>(i, j)[0]; dst.at<Vec3b>(x, y)[1] = src.at<Vec3b>(i, j)[1]; dst.at<Vec3b>(x, y)[2] = src.at<Vec3b>(i, j)[2]; } } } else { for (int i = 0, x = 0; i != src.rows; ++i, ++x) { for (int j = 0, y = src.cols - 1; j != src.cols; ++j, --y) { dst.at<Vec3b>(x, y)[0] = src.at<Vec3b>(i, j)[0]; dst.at<Vec3b>(x, y)[1] = src.at<Vec3b>(i, j)[1]; dst.at<Vec3b>(x, y)[2] = src.at<Vec3b>(i, j)[2]; } } } } // cpu: http://blog.csdn.net/ab1322583838/article/details/52102732 http://blog.csdn.net/fengbingchun/article/details/17713429 extern "C" void rotateImage(const Mat &src, Mat &dst, int degree) { degree = -degree; // double angle = degree * CV_PI / 180.; // double a = sin(angle), b = cos(angle); int width = src.cols, height = src.rows; // int width_rotate = int(height * fabs(a) + width * fabs(b)); int height_rotate = int(width * fabs(a) + height * fabs(b)); dst = Mat::zeros(Size(width_rotate, height_rotate), CV_8UC3); // map // [ m0 m1 m2 ] ===> [ A11 A12 b1 ] // [ m3 m4 m5 ] ===> [ A21 A22 b2 ] float map[6]; Mat map_matrix = Mat(2, 3, CV_32F, map); // CvPoint2D32f center = cvPoint2D32f(width / 2, height / 2); CvMat map_matrix2 = map_matrix; cv2DRotationMatrix(center, degree, 1.0, &map_matrix2); map[2] += (width_rotate - width) / 2; map[5] += (height_rotate - height) / 2; warpAffine(src, dst, map_matrix, Size(width_rotate, height_rotate), 0, 0, 0); // 0,0,0 1,0,0 // imshow("cpu", dst); } // extern "C" void cutImage(const Mat &src, Mat &dst, int dir, int len) { if (0 == dir) { dst = Mat(Size(src.cols + len, src.rows), CV_8UC3); uchar *src_data = src.data; uchar *dst_data = dst.data; double ratio = double(len) / double(dst.rows); for (int i = 0, x = 0; i < src.rows; i++, x++) { int start = (src.rows - i) * ratio; for (int j = start, y = 0; j < src.cols + start; j++, y++) { *(dst_data + (i*dst.cols + j) * 3 + 0) = *(src_data + (x*src.cols + y) * 3 + 0); *(dst_data + (i*dst.cols + j) * 3 + 1) = *(src_data + (x*src.cols + y) * 3 + 1); *(dst_data + (i*dst.cols + j) * 3 + 2) = *(src_data + (x*src.cols + y) * 3 + 2); } } } else { dst = Mat(Size(src.cols, src.rows + len), CV_8UC3); uchar *src_data = src.data; uchar *dst_data = dst.data; double ratio = double(len) / double(dst.cols); for (int j = 0, y = 0; j < src.cols; j++, y++) { int start = j * ratio; for (int i = start, x = 0; i < src.rows + start; i++, x++) { *(dst_data + (i*dst.cols + j) * 3 + 0) = *(src_data + (x*src.cols + y) * 3 + 0); *(dst_data + (i*dst.cols + j) * 3 + 1) = *(src_data + (x*src.cols + y) * 3 + 1); *(dst_data + (i*dst.cols + j) * 3 + 2) = *(src_data + (x*src.cols + y) * 3 + 2); } } } } ////////////////////////////////// // cuda extern "C" bool initCUDA() { int count; hipGetDeviceCount(&count); if (count == 0) { fprintf(stderr, "There is no device.\n"); return false; } int i; for (i = 0; i < count; i++) { hipDeviceProp_t prop; if (hipGetDeviceProperties(&prop, i) == hipSuccess) { if (prop.major >= 1) { break; } } } if (i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } hipSetDevice(i); return true; } //////////////////////////////// // gpu extern "C" __global__ void resizeKernel(uchar* _src_dev, uchar * _dst_dev, int _src_step, int _dst_step, int _src_rows, int _src_cols, int _dst_rows, int _dst_cols) { int i = blockIdx.x; int j = blockIdx.y; double fRows = _dst_rows / (float)_src_rows; double fCols = _dst_cols / (float)_src_cols; int pX = 0; int pY = 0; pX = (int)(i / fRows); pY = (int)(j / fCols); if (pX < _src_rows && pX >= 0 && pY < _src_cols && pY >= 0) { *(_dst_dev + i*_dst_step + 3 * j + 0) = *(_src_dev + pX*_src_step + 3 * pY); *(_dst_dev + i*_dst_step + 3 * j + 1) = *(_src_dev + pX*_src_step + 3 * pY + 1); *(_dst_dev + i*_dst_step + 3 * j + 2) = *(_src_dev + pX*_src_step + 3 * pY + 2); } } extern "C" void resizeImageGPU(const Mat &_src, Mat &_dst, const Size &s) { _dst = Mat(s, CV_8UC3); uchar *src_data = _src.data; int width = _src.cols; int height = _src.rows; uchar *src_dev, *dst_dev; hipMalloc((void**)&src_dev, 3 * width*height * sizeof(uchar)); hipMalloc((void**)&dst_dev, 3 * s.width * s.height * sizeof(uchar)); hipMemcpy(src_dev, src_data, 3 * width*height * sizeof(uchar), hipMemcpyHostToDevice); int src_step = _src.step; // _src int dst_step = _dst.step; // _dst dim3 grid(s.height, s.width); hipLaunchKernelGGL(( resizeKernel), dim3(grid), dim3(1) , 0, 0, src_dev, dst_dev, src_step, dst_step, height, width, s.height, s.width); hipMemcpy(_dst.data, dst_dev, 3 * s.width * s.height * sizeof(uchar), hipMemcpyDeviceToHost); } //////////////////////////////// // gpu extern "C" __global__ void transferKernel(uchar* _src_dev, uchar * _dst_dev, int width, int height, int _src_rows, int _src_cols, int xlen, int ylen) { int i = blockIdx.x; int j = blockIdx.y; int xadd = xlen < 0 ? 0 : abs(xlen); int yadd = ylen < 0 ? abs(ylen) : 0; int offset = i*gridDim.y + j; int tran_offset = (i + yadd) * width + j + xadd; if (i < gridDim.x && j >= 0 && j < gridDim.y && i >= 0) { *(_dst_dev + tran_offset * 3 + 0) = *(_src_dev + offset * 3 + 0); *(_dst_dev + tran_offset * 3 + 1) = *(_src_dev + offset * 3 + 1); *(_dst_dev + tran_offset * 3 + 2) = *(_src_dev + offset * 3 + 2); } } extern "C" void transferImageGPU(const Mat &_src, Mat &_dst, int xlen, int ylen) { int width = _src.cols, height = _src.rows; width += abs(xlen); height += abs(ylen); _dst = Mat::zeros(Size(width, height), CV_8UC3); uchar *src_data = _src.data; uchar *src_dev, *dst_dev; hipMalloc((void**)&src_dev, 3 * _src.rows * _src.cols * sizeof(uchar)); hipMalloc((void**)&dst_dev, 3 * width * height * sizeof(uchar)); hipMemcpy(src_dev, src_data, 3 * _src.rows * _src.cols * sizeof(uchar), hipMemcpyHostToDevice); hipMemset(dst_dev, 0, 3 * width * height * sizeof(uchar)); dim3 grid(_src.rows, _src.cols); // cout << _src.rows << " " << _src.cols << endl; hipLaunchKernelGGL(( transferKernel) , dim3(grid), dim3(1) , 0, 0, src_dev, dst_dev, width, height, _src.rows, _src.cols, xlen, ylen); // cout << width << " " << height << " " << _src.rows << " " << _src.cols << endl; hipMemcpy(_dst.data, dst_dev, 3 * width * height * sizeof(uchar), hipMemcpyDeviceToHost); } //////////////////////////////// // gpu extern "C" __global__ void mirrorKernel(uchar* _src_dev, uchar * _dst_dev, int height, int width, int num) { int i = blockIdx.x; int j = blockIdx.y; int offset = i*gridDim.y + j; int x, y; if (0 == num) { x = height - i - 1; y = j; } else { x = i; y = width - j - 1; } int mirror_offset = x*gridDim.y + y; if (i < gridDim.x && j >= 0 && j < gridDim.y && i >= 0) { *(_dst_dev + mirror_offset * 3 + 0) = *(_src_dev + offset * 3 + 0); *(_dst_dev + mirror_offset * 3 + 1) = *(_src_dev + offset * 3 + 1); *(_dst_dev + mirror_offset * 3 + 2) = *(_src_dev + offset * 3 + 2); } } extern "C" void mirrorImageGPU(const Mat &_src, Mat &_dst, int num) { _dst = Mat::zeros(Size(_src.cols, _src.rows), CV_8UC3); uchar *src_data = _src.data; uchar *src_dev, *dst_dev; hipMalloc((void**)&src_dev, 3 * _src.rows * _src.cols * sizeof(uchar)); hipMalloc((void**)&dst_dev, 3 * _src.rows * _src.cols * sizeof(uchar)); hipMemcpy(src_dev, src_data, 3 * _src.rows * _src.cols * sizeof(uchar), hipMemcpyHostToDevice); dim3 grid(_src.rows, _src.cols); hipLaunchKernelGGL(( mirrorKernel) , dim3(grid), dim3(1) , 0, 0, src_dev, dst_dev, _src.rows, _src.cols, num); hipMemcpy(_dst.data, dst_dev, 3 * _src.rows * _src.cols * sizeof(uchar), hipMemcpyDeviceToHost); } //////////////////////////////// // gpu extern "C" __device__ int saturateCast(double num) { return round(num); } __global__ void rotateKernel(uchar* _src_dev, uchar * _dst_dev, int width, int height, const double m0, const double m1, const double m2, const double m3, const double m4, const double m5, int round_delta) { int y = blockIdx.x; int x = blockIdx.y; // if (y < gridDim.x && y > 0 && x < gridDim.y && x > 0) { int adelta = saturateCast(m0 * x * 1024); int bdelta = saturateCast(m3 * x * 1024); int X0 = saturateCast((m1 * y + m2) * 1024) + round_delta; int Y0 = saturateCast((m4 * y + m5) * 1024) + round_delta; int X = (X0 + adelta) >> 10; int Y = (Y0 + bdelta) >> 10; if ((unsigned)X < width && (unsigned)Y < height) { *(_dst_dev + (y*gridDim.y + x) * 3 + 0) = *(_src_dev + (Y*width + X) * 3 + 0); *(_dst_dev + (y*gridDim.y + x) * 3 + 1) = *(_src_dev + (Y*width + X) * 3 + 1); *(_dst_dev + (y*gridDim.y + x) * 3 + 2) = *(_src_dev + (Y*width + X) * 3 + 2); } else { *(_dst_dev + (y*gridDim.y + x) * 3 + 0) = 0; *(_dst_dev + (y*gridDim.y + x) * 3 + 1) = 0; *(_dst_dev + (y*gridDim.y + x) * 3 + 2) = 0; } } } extern "C" void rotateImageGPU(const Mat &src, Mat &dst, int degree) { degree = -degree; double angle = degree * CV_PI / 180.; double alpha = cos(angle); double beta = sin(angle); int width = src.cols; int height = src.rows; int width_rotate = cvRound(width * fabs(alpha) + height * fabs(beta)); int height_rotate = cvRound(height * fabs(alpha) + width * fabs(beta)); double m[6]; m[0] = alpha; m[1] = beta; // m[2] = (1 - alpha) * width / 2. - beta * height / 2.; m[2] = height * -beta; // cout << width << " " << height << endl; // cout << width_rotate << " " << height_rotate << endl; // cout << alpha << " " << beta << endl; // cout << m[2] << endl; m[3] = -m[1]; m[4] = m[0]; // m[5] = beta * width / 2. + (1 - alpha) * height / 2.; m[5] = 0; // cout << "m[5] " << m[5] << endl; Mat M = Mat(2, 3, CV_64F, m); dst = Mat(cv::Size(width_rotate, height_rotate), src.type(), cv::Scalar::all(0)); double D = m[0] * m[4] - m[1] * m[3]; D = D != 0 ? 1. / D : 0; double A11 = m[4] * D, A22 = m[0] * D; m[0] = A11; m[1] *= -D; m[3] *= -D; m[4] = A22; double b1 = -m[0] * m[2] - m[1] * m[5]; double b2 = -m[3] * m[2] - m[4] * m[5]; m[2] = b1; m[5] = b2; int round_delta = 512; // // for (int y = 0; y < height_rotate; ++y) // { // for (int x = 0; x < width_rotate; ++x) // { // int adelta = cv::saturate_cast<int>(m[0] * x * 1024); // int bdelta = cv::saturate_cast<int>(m[3] * x * 1024); // int X0 = cv::saturate_cast<int>((m[1] * y + m[2]) * 1024) + round_delta; // int Y0 = cv::saturate_cast<int>((m[4] * y + m[5]) * 1024) + round_delta; // int X = (X0 + adelta) >> 10; // int Y = (Y0 + bdelta) >> 10; // if ((unsigned)X < width && (unsigned)Y < height) // { // // dst.at<cv::Vec3b>(y, x) = src.at<cv::Vec3b>(Y, X); // *(dst.data + (y*width_rotate+x)*3 + 0) = *(src.data + (Y*width+X)*3 + 0); // *(dst.data + (y*width_rotate+x)*3 + 1) = *(src.data + (Y*width+X)*3 + 1); // *(dst.data + (y*width_rotate+x)*3 + 2) = *(src.data + (Y*width+X)*3 + 2); // } // } // } // cout << saturate_cast<int>(-99999999999) << " **" << endl; // cout << INT_MAX << endl; uchar *src_data = src.data; uchar *src_dev, *dst_dev; hipMalloc((void**)&src_dev, 3 * src.rows * src.cols * sizeof(uchar)); hipMalloc((void**)&dst_dev, 3 * width_rotate * height_rotate * sizeof(uchar)); hipMemcpy(src_dev, src_data, 3 * src.rows * src.cols * sizeof(uchar), hipMemcpyHostToDevice); hipMemset(dst_dev, 0, width_rotate * height_rotate * sizeof(uchar)); dim3 grid(height_rotate, width_rotate); hipLaunchKernelGGL(( rotateKernel) , dim3(grid), dim3(1) , 0, 0, src_dev, dst_dev, width, height, m[0], m[1], m[2], m[3], m[4], m[5], round_delta); hipMemcpy(dst.data, dst_dev, 3 * width_rotate * height_rotate * sizeof(uchar), hipMemcpyDeviceToHost); } //////////////////////////////// // gpu extern "C" __global__ void cutKernel(uchar* _src_dev, uchar * _dst_dev, int width, double ratio, int dir) { int i = blockIdx.x; int j = blockIdx.y; int x = 0, y = 0; if (0 == dir) { y = (gridDim.x - i) * ratio; } else { x = j * ratio; } /* int start = (gridDim.x - i) * ratio; int y = start; */ int offset = i*gridDim.y + j; int tran_offset = (i + x) * width + j + y; if (i < gridDim.x && j >= 0 && j < gridDim.y && i >= 0) { *(_dst_dev + tran_offset * 3 + 0) = *(_src_dev + offset * 3 + 0); *(_dst_dev + tran_offset * 3 + 1) = *(_src_dev + offset * 3 + 1); *(_dst_dev + tran_offset * 3 + 2) = *(_src_dev + offset * 3 + 2); } } /*__global__ void cutKernel1(uchar* _src_dev, uchar * _dst_dev, int width, double ratio) { int i = blockIdx.x; int j = blockIdx.y; int start = j * ratio; int x = start; int offset = i*gridDim.y + j; int tran_offset = (i+x) * width + j; if (i < gridDim.x && j >= 0 && j < gridDim.y && i >= 0) { *(_dst_dev + tran_offset*3 + 0) = *(_src_dev + offset*3 + 0); *(_dst_dev + tran_offset*3 + 1) = *(_src_dev + offset*3 + 1); *(_dst_dev + tran_offset*3 + 2) = *(_src_dev + offset*3 + 2); } }*/ extern "C" void cutImageGPU(const Mat &_src, Mat &_dst, int dir, int len) { int width = _src.cols, height = _src.rows; /* if (0 == dir) { width += len; _dst = Mat::zeros(Size(width, height), CV_8UC3); uchar *src_data = _src.data; uchar *src_dev , *dst_dev; hipMalloc( (void**)&src_dev, 3 * _src.rows * _src.cols * sizeof(uchar) ); hipMalloc( (void**)&dst_dev, 3 * width * height * sizeof(uchar) ); hipMemcpy(src_dev, src_data, 3 * _src.rows * _src.cols * sizeof(uchar), hipMemcpyHostToDevice); hipMemset(dst_dev, 0, 3 * width * height * sizeof(uchar)); double ratio = (double)len / _dst.rows; dim3 grid(_src.rows, _src.cols); hipLaunchKernelGGL(( cutKernel) , dim3(grid), dim3(1) , 0, 0, src_dev, dst_dev, width, ratio, dir); hipMemcpy(_dst.data, dst_dev, 3 * width * height * sizeof(uchar), hipMemcpyDeviceToHost); } else { height += len; _dst = Mat::zeros(Size(width, height), CV_8UC3); uchar *src_data = _src.data; uchar *src_dev , *dst_dev; hipMalloc( (void**)&src_dev, 3 * _src.rows * _src.cols * sizeof(uchar) ); hipMalloc( (void**)&dst_dev, 3 * width * height * sizeof(uchar) ); hipMemcpy(src_dev, src_data, 3 * _src.rows * _src.cols * sizeof(uchar), hipMemcpyHostToDevice); hipMemset(dst_dev, 0, 3 * width * height * sizeof(uchar)); double ratio = (double)len / _dst.cols; dim3 grid(_src.rows, _src.cols); cutKernel1 <<< grid, 1 >>>(src_dev, dst_dev, width, ratio, dir); hipMemcpy(_dst.data, dst_dev, 3 * width * height * sizeof(uchar), hipMemcpyDeviceToHost); }*/ double ratio; if (0 == dir) { width += len; ratio = (double)len / height; } else { height += len; ratio = (double)len / width; } _dst = Mat::zeros(Size(width, height), CV_8UC3); uchar *src_data = _src.data; uchar *src_dev, *dst_dev; hipMalloc((void**)&src_dev, 3 * _src.rows * _src.cols * sizeof(uchar)); hipMalloc((void**)&dst_dev, 3 * width * height * sizeof(uchar)); hipMemcpy(src_dev, src_data, 3 * _src.rows * _src.cols * sizeof(uchar), hipMemcpyHostToDevice); hipMemset(dst_dev, 0, 3 * width * height * sizeof(uchar)); dim3 grid(_src.rows, _src.cols); hipLaunchKernelGGL(( cutKernel) , dim3(grid), dim3(1) , 0, 0, src_dev, dst_dev, width, ratio, dir); hipMemcpy(_dst.data, dst_dev, 3 * width * height * sizeof(uchar), hipMemcpyDeviceToHost); } //int main() //{ // Mat src = cv::imread("f.bmp" , 1); // // Mat dst_scale_cpu; // Mat dst_scale_gpu; // Mat dst_trans_cpu; // Mat dst_trans_gpu; // Mat dst_mirror_cpu; // Mat dst_mirror_gpu; // Mat dst_rotate_cpu; // Mat dst_rotate_gpu; // Mat dst_cut_cpu; // Mat dst_cut_gpu; ///* // struct timeval start; // struct timeval end; // unsigned long timer; // gettimeofday(&start, NULL); // // resizeImage(src, dst_scale_cpu, Size(src.cols * 2, src.rows * 2)); // CPU dst_cpu // gettimeofday(&end, NULL); // // timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec; // cout << "cpu" << timer << "us\n"; //*/ // struct timeval start; // struct timeval end; // unsigned long timer; // gettimeofday(&start, NULL); // ///////////////////////////// // resizeImage(src, dst_scale_cpu, Size(src.cols * 2, src.rows * 2)); // transferImage(src, dst_trans_cpu, 100, -100); // mirrorImage(src, dst_mirror_cpu, 1); // rotateImage(src, dst_rotate_cpu, 30); // cutImage(src, dst_cut_cpu, 0, 50); ///////////////////////////// // gettimeofday(&end, NULL); // // timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec; // cout << "cpu" << timer << "us\n"; // initCUDA(); // gettimeofday(&start, NULL); ///////////////////////////// // resizeImageGPU(src, dst_scale_gpu, Size(src.cols * 2, src.rows * 2)); // transferImageGPU(src, dst_trans_gpu, 100, -100); // mirrorImageGPU(src, dst_mirror_gpu, 1); // rotateImageGPU(src, dst_rotate_gpu, 30); // cutImageGPU(src, dst_cut_gpu, 0, 50); ///////////////////////////// // gettimeofday(&end, NULL); // timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec; // cout << "gpu" << timer << "us\n"; //////////////////////////// // imshow("", src); // imshow("_cpu", dst_scale_cpu); // imshow("_gpu", dst_scale_gpu); // imshow("_cpu", dst_trans_cpu); // imshow("_gpu", dst_trans_gpu); // imshow("_cpu", dst_mirror_cpu); // imshow("_gpu", dst_mirror_gpu); // imshow("_cpu", dst_rotate_cpu); // imshow("_gpu", dst_rotate_gpu); // imshow("_cpu", dst_cut_cpu); // imshow("_gpu", dst_cut_gpu); // // transferImage(src, dst_trans_cpu, 100, -100); // // imshow("cpu_trans", dst_trans_cpu); // // transferImageGPU(src, dst_trans_gpu, 100, -100); // // imshow("gpu_trans", dst_trans_gpu); // // mirrorImage(src, dst_mirror_cpu, 1); // // mirrorImageGPU(src, dst_mirror_gpu, 1); // // imshow("gpu", dst_mirror_gpu); // // rotateImage(src, dst_rotate_cpu, 30); // // rotateImageGPU(src, dst_rotate_gpu, 30); // // imshow("gpu", dst_rotate_gpu); // // cutImage(src, dst_cut_cpu, 0, 50); // // imshow("cpu", dst_cut_cpu); // // cutImageGPU(src, dst_cut_gpu, 0, 50); // // imshow("gpu", dst_cut_gpu); ///* // initCUDA(); // Mat dst_gpu; // gettimeofday(&start, NULL); // resizeImageGPU(src, dst_gpu, Size(src.cols * 2, src.rows * 2)); //// imshow("src", src); //// imshow(" ", dst_gpu); // gettimeofday(&end, NULL); // timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec; // cout << "gpu" << timer << "us\n"; //// imshow("Demo", dst_gpu); //*/ // waitKey(0); // return 0; //}
019570d51d421876aaa13a62d54a29aaab986dcf.cu
#include "image-processing.h" //#if defined(_MSC_VER) && (_MSC_VER >= 1600) //#pragma execution_character_set("utf-8") //#endif //#include <iostream> //#include <cmath> //#include <limits.h> //#include <cuda.h> //#include <opencv2/opencv.hpp> //using namespace cv; //using namespace std; //// cpu: 对图像进行缩放 //extern "C" void resizeImage(const Mat &src, Mat &dst, const Size &s); //// cpu: 对图像进行平移 xlen左右 ylen上下 //extern "C" void transferImage(const Mat &src, Mat &dst, int xlen, int ylen); //// cpu: 对图像镜面变换 num = 0 | 1 //extern "C" void mirrorImage(const Mat &src, Mat &dst, int num); //// cpu: 对图像旋转变换 //extern "C" void rotateImage(const Mat &src, Mat &dst, int degree); //// 对图像进行错切 //extern "C" void cutImage(const Mat &src, Mat &dst, int dir, int len); // cuda 设备检测 //extern "C" bool initCUDA(); //extern "C" void resizeImageGPU(const Mat &_src, Mat &_dst, const Size &s); //extern "C" void transferImageGPU(const Mat &_src, Mat &_dst, int xlen, int ylen); //extern "C" void mirrorImageGPU(const Mat &_src, Mat &_dst, int num); //extern "C" void rotateImageGPU(const Mat &src, Mat &dst, int degree); //extern "C" void cutImageGPU(const Mat &_src, Mat &_dst, int dir, int len); // cpu: 对图像进行缩放 extern "C" void resizeImage(const Mat &src, Mat &dst, const Size &s) { dst = Mat::zeros(s, CV_8UC3); double fRows = s.height / (float)src.rows; double fCols = s.width / (float)src.cols; int pX = 0; int pY = 0; for (int i = 0; i != dst.rows; ++i) { for (int j = 0; j != dst.cols; ++j) { pX = cvRound(i / (double)fRows); // 四舍五入 pY = cvRound(j / (double)fCols); if (pX < src.rows && pX >= 0 && pY < src.cols && pY >= 0) { dst.at<Vec3b>(i, j)[0] = src.at<Vec3b>(pX, pY)[0]; // B dst.at<Vec3b>(i, j)[1] = src.at<Vec3b>(pX, pY)[1]; // G dst.at<Vec3b>(i, j)[2] = src.at<Vec3b>(pX, pY)[2]; // R } } } } // cpu: 对图像进行平移 xlen左右 ylen上下 extern "C" void transferImage(const Mat &src, Mat &dst, int xlen, int ylen) { int width = src.cols, height = src.rows; width += abs(xlen); height += abs(ylen); dst = Mat::zeros(Size(width, height), CV_8UC3); int xadd = xlen < 0 ? 0 : abs(xlen); int yadd = ylen < 0 ? abs(ylen) : 0; for (int i = 0; i != src.rows; ++i) { for (int j = 0; j != src.cols; ++j) { dst.at<Vec3b>(i + yadd, j + xadd)[0] = src.at<Vec3b>(i, j)[0]; dst.at<Vec3b>(i + yadd, j + xadd)[1] = src.at<Vec3b>(i, j)[1]; dst.at<Vec3b>(i + yadd, j + xadd)[2] = src.at<Vec3b>(i, j)[2]; } } } // cpu: 对图像镜面变换 num = 0 | 1; (0:x轴;1:y轴) extern "C" void mirrorImage(const Mat &src, Mat &dst, int num) { dst = Mat::zeros(Size(src.cols, src.rows), CV_8UC3); if (0 == num) { for (int i = 0, x = src.rows - 1; i != src.rows; ++i, --x) { for (int j = 0, y = 0; j != src.cols; ++j, ++y) { dst.at<Vec3b>(x, y)[0] = src.at<Vec3b>(i, j)[0]; dst.at<Vec3b>(x, y)[1] = src.at<Vec3b>(i, j)[1]; dst.at<Vec3b>(x, y)[2] = src.at<Vec3b>(i, j)[2]; } } } else { for (int i = 0, x = 0; i != src.rows; ++i, ++x) { for (int j = 0, y = src.cols - 1; j != src.cols; ++j, --y) { dst.at<Vec3b>(x, y)[0] = src.at<Vec3b>(i, j)[0]; dst.at<Vec3b>(x, y)[1] = src.at<Vec3b>(i, j)[1]; dst.at<Vec3b>(x, y)[2] = src.at<Vec3b>(i, j)[2]; } } } } // cpu: 对图像旋转变换 http://blog.csdn.net/ab1322583838/article/details/52102732 http://blog.csdn.net/fengbingchun/article/details/17713429 extern "C" void rotateImage(const Mat &src, Mat &dst, int degree) { degree = -degree; // 原始为逆时针,取负转为顺时针 double angle = degree * CV_PI / 180.; // 转为弧度 double a = sin(angle), b = cos(angle); int width = src.cols, height = src.rows; // 旋转后的新图尺寸 int width_rotate = int(height * fabs(a) + width * fabs(b)); int height_rotate = int(width * fabs(a) + height * fabs(b)); dst = Mat::zeros(Size(width_rotate, height_rotate), CV_8UC3); // 旋转数组map // [ m0 m1 m2 ] ===> [ A11 A12 b1 ] // [ m3 m4 m5 ] ===> [ A21 A22 b2 ] float map[6]; Mat map_matrix = Mat(2, 3, CV_32F, map); // 旋转中心 CvPoint2D32f center = cvPoint2D32f(width / 2, height / 2); CvMat map_matrix2 = map_matrix; cv2DRotationMatrix(center, degree, 1.0, &map_matrix2); map[2] += (width_rotate - width) / 2; map[5] += (height_rotate - height) / 2; warpAffine(src, dst, map_matrix, Size(width_rotate, height_rotate), 0, 0, 0); // 0,0,0 最近邻插值 1,0,0 双线性插值 // imshow("cpu", dst); } // 对图像进行错切 extern "C" void cutImage(const Mat &src, Mat &dst, int dir, int len) { if (0 == dir) { dst = Mat(Size(src.cols + len, src.rows), CV_8UC3); uchar *src_data = src.data; uchar *dst_data = dst.data; double ratio = double(len) / double(dst.rows); for (int i = 0, x = 0; i < src.rows; i++, x++) { int start = (src.rows - i) * ratio; for (int j = start, y = 0; j < src.cols + start; j++, y++) { *(dst_data + (i*dst.cols + j) * 3 + 0) = *(src_data + (x*src.cols + y) * 3 + 0); *(dst_data + (i*dst.cols + j) * 3 + 1) = *(src_data + (x*src.cols + y) * 3 + 1); *(dst_data + (i*dst.cols + j) * 3 + 2) = *(src_data + (x*src.cols + y) * 3 + 2); } } } else { dst = Mat(Size(src.cols, src.rows + len), CV_8UC3); uchar *src_data = src.data; uchar *dst_data = dst.data; double ratio = double(len) / double(dst.cols); for (int j = 0, y = 0; j < src.cols; j++, y++) { int start = j * ratio; for (int i = start, x = 0; i < src.rows + start; i++, x++) { *(dst_data + (i*dst.cols + j) * 3 + 0) = *(src_data + (x*src.cols + y) * 3 + 0); *(dst_data + (i*dst.cols + j) * 3 + 1) = *(src_data + (x*src.cols + y) * 3 + 1); *(dst_data + (i*dst.cols + j) * 3 + 2) = *(src_data + (x*src.cols + y) * 3 + 2); } } } } ////////////////////////////////// // cuda 设备检测 extern "C" bool initCUDA() { int count; cudaGetDeviceCount(&count); if (count == 0) { fprintf(stderr, "There is no device.\n"); return false; } int i; for (i = 0; i < count; i++) { cudaDeviceProp prop; if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if (prop.major >= 1) { break; } } } if (i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } cudaSetDevice(i); return true; } //////////////////////////////// // gpu 缩放变换 extern "C" __global__ void resizeKernel(uchar* _src_dev, uchar * _dst_dev, int _src_step, int _dst_step, int _src_rows, int _src_cols, int _dst_rows, int _dst_cols) { int i = blockIdx.x; int j = blockIdx.y; double fRows = _dst_rows / (float)_src_rows; double fCols = _dst_cols / (float)_src_cols; int pX = 0; int pY = 0; pX = (int)(i / fRows); pY = (int)(j / fCols); if (pX < _src_rows && pX >= 0 && pY < _src_cols && pY >= 0) { *(_dst_dev + i*_dst_step + 3 * j + 0) = *(_src_dev + pX*_src_step + 3 * pY); *(_dst_dev + i*_dst_step + 3 * j + 1) = *(_src_dev + pX*_src_step + 3 * pY + 1); *(_dst_dev + i*_dst_step + 3 * j + 2) = *(_src_dev + pX*_src_step + 3 * pY + 2); } } extern "C" void resizeImageGPU(const Mat &_src, Mat &_dst, const Size &s) { _dst = Mat(s, CV_8UC3); uchar *src_data = _src.data; int width = _src.cols; int height = _src.rows; uchar *src_dev, *dst_dev; cudaMalloc((void**)&src_dev, 3 * width*height * sizeof(uchar)); cudaMalloc((void**)&dst_dev, 3 * s.width * s.height * sizeof(uchar)); cudaMemcpy(src_dev, src_data, 3 * width*height * sizeof(uchar), cudaMemcpyHostToDevice); int src_step = _src.step; // 矩阵_src一行元素的字节数 int dst_step = _dst.step; // 矩阵_dst一行元素的字节数 dim3 grid(s.height, s.width); resizeKernel<<< grid, 1 >>>(src_dev, dst_dev, src_step, dst_step, height, width, s.height, s.width); cudaMemcpy(_dst.data, dst_dev, 3 * s.width * s.height * sizeof(uchar), cudaMemcpyDeviceToHost); } //////////////////////////////// // gpu 平移变换 extern "C" __global__ void transferKernel(uchar* _src_dev, uchar * _dst_dev, int width, int height, int _src_rows, int _src_cols, int xlen, int ylen) { int i = blockIdx.x; int j = blockIdx.y; int xadd = xlen < 0 ? 0 : abs(xlen); int yadd = ylen < 0 ? abs(ylen) : 0; int offset = i*gridDim.y + j; int tran_offset = (i + yadd) * width + j + xadd; if (i < gridDim.x && j >= 0 && j < gridDim.y && i >= 0) { *(_dst_dev + tran_offset * 3 + 0) = *(_src_dev + offset * 3 + 0); *(_dst_dev + tran_offset * 3 + 1) = *(_src_dev + offset * 3 + 1); *(_dst_dev + tran_offset * 3 + 2) = *(_src_dev + offset * 3 + 2); } } extern "C" void transferImageGPU(const Mat &_src, Mat &_dst, int xlen, int ylen) { int width = _src.cols, height = _src.rows; width += abs(xlen); height += abs(ylen); _dst = Mat::zeros(Size(width, height), CV_8UC3); uchar *src_data = _src.data; uchar *src_dev, *dst_dev; cudaMalloc((void**)&src_dev, 3 * _src.rows * _src.cols * sizeof(uchar)); cudaMalloc((void**)&dst_dev, 3 * width * height * sizeof(uchar)); cudaMemcpy(src_dev, src_data, 3 * _src.rows * _src.cols * sizeof(uchar), cudaMemcpyHostToDevice); cudaMemset(dst_dev, 0, 3 * width * height * sizeof(uchar)); dim3 grid(_src.rows, _src.cols); // cout << _src.rows << " " << _src.cols << endl; transferKernel <<< grid, 1 >>>(src_dev, dst_dev, width, height, _src.rows, _src.cols, xlen, ylen); // cout << width << " " << height << " " << _src.rows << " " << _src.cols << endl; cudaMemcpy(_dst.data, dst_dev, 3 * width * height * sizeof(uchar), cudaMemcpyDeviceToHost); } //////////////////////////////// // gpu 镜面变换 extern "C" __global__ void mirrorKernel(uchar* _src_dev, uchar * _dst_dev, int height, int width, int num) { int i = blockIdx.x; int j = blockIdx.y; int offset = i*gridDim.y + j; int x, y; if (0 == num) { x = height - i - 1; y = j; } else { x = i; y = width - j - 1; } int mirror_offset = x*gridDim.y + y; if (i < gridDim.x && j >= 0 && j < gridDim.y && i >= 0) { *(_dst_dev + mirror_offset * 3 + 0) = *(_src_dev + offset * 3 + 0); *(_dst_dev + mirror_offset * 3 + 1) = *(_src_dev + offset * 3 + 1); *(_dst_dev + mirror_offset * 3 + 2) = *(_src_dev + offset * 3 + 2); } } extern "C" void mirrorImageGPU(const Mat &_src, Mat &_dst, int num) { _dst = Mat::zeros(Size(_src.cols, _src.rows), CV_8UC3); uchar *src_data = _src.data; uchar *src_dev, *dst_dev; cudaMalloc((void**)&src_dev, 3 * _src.rows * _src.cols * sizeof(uchar)); cudaMalloc((void**)&dst_dev, 3 * _src.rows * _src.cols * sizeof(uchar)); cudaMemcpy(src_dev, src_data, 3 * _src.rows * _src.cols * sizeof(uchar), cudaMemcpyHostToDevice); dim3 grid(_src.rows, _src.cols); mirrorKernel <<< grid, 1 >>>(src_dev, dst_dev, _src.rows, _src.cols, num); cudaMemcpy(_dst.data, dst_dev, 3 * _src.rows * _src.cols * sizeof(uchar), cudaMemcpyDeviceToHost); } //////////////////////////////// // gpu 旋转变换 extern "C" __device__ int saturateCast(double num) { return round(num); } __global__ void rotateKernel(uchar* _src_dev, uchar * _dst_dev, int width, int height, const double m0, const double m1, const double m2, const double m3, const double m4, const double m5, int round_delta) { int y = blockIdx.x; int x = blockIdx.y; // if (y < gridDim.x && y > 0 && x < gridDim.y && x > 0) { int adelta = saturateCast(m0 * x * 1024); int bdelta = saturateCast(m3 * x * 1024); int X0 = saturateCast((m1 * y + m2) * 1024) + round_delta; int Y0 = saturateCast((m4 * y + m5) * 1024) + round_delta; int X = (X0 + adelta) >> 10; int Y = (Y0 + bdelta) >> 10; if ((unsigned)X < width && (unsigned)Y < height) { *(_dst_dev + (y*gridDim.y + x) * 3 + 0) = *(_src_dev + (Y*width + X) * 3 + 0); *(_dst_dev + (y*gridDim.y + x) * 3 + 1) = *(_src_dev + (Y*width + X) * 3 + 1); *(_dst_dev + (y*gridDim.y + x) * 3 + 2) = *(_src_dev + (Y*width + X) * 3 + 2); } else { *(_dst_dev + (y*gridDim.y + x) * 3 + 0) = 0; *(_dst_dev + (y*gridDim.y + x) * 3 + 1) = 0; *(_dst_dev + (y*gridDim.y + x) * 3 + 2) = 0; } } } extern "C" void rotateImageGPU(const Mat &src, Mat &dst, int degree) { degree = -degree; double angle = degree * CV_PI / 180.; double alpha = cos(angle); double beta = sin(angle); int width = src.cols; int height = src.rows; int width_rotate = cvRound(width * fabs(alpha) + height * fabs(beta)); int height_rotate = cvRound(height * fabs(alpha) + width * fabs(beta)); double m[6]; m[0] = alpha; m[1] = beta; // m[2] = (1 - alpha) * width / 2. - beta * height / 2.; m[2] = height * -beta; // cout << width << " " << height << endl; // cout << width_rotate << " " << height_rotate << endl; // cout << alpha << " " << beta << endl; // cout << m[2] << endl; m[3] = -m[1]; m[4] = m[0]; // m[5] = beta * width / 2. + (1 - alpha) * height / 2.; m[5] = 0; // cout << "m[5] " << m[5] << endl; Mat M = Mat(2, 3, CV_64F, m); dst = Mat(cv::Size(width_rotate, height_rotate), src.type(), cv::Scalar::all(0)); double D = m[0] * m[4] - m[1] * m[3]; D = D != 0 ? 1. / D : 0; double A11 = m[4] * D, A22 = m[0] * D; m[0] = A11; m[1] *= -D; m[3] *= -D; m[4] = A22; double b1 = -m[0] * m[2] - m[1] * m[5]; double b2 = -m[3] * m[2] - m[4] * m[5]; m[2] = b1; m[5] = b2; int round_delta = 512; // 最近邻插值 // for (int y = 0; y < height_rotate; ++y) // { // for (int x = 0; x < width_rotate; ++x) // { // int adelta = cv::saturate_cast<int>(m[0] * x * 1024); // int bdelta = cv::saturate_cast<int>(m[3] * x * 1024); // int X0 = cv::saturate_cast<int>((m[1] * y + m[2]) * 1024) + round_delta; // int Y0 = cv::saturate_cast<int>((m[4] * y + m[5]) * 1024) + round_delta; // int X = (X0 + adelta) >> 10; // int Y = (Y0 + bdelta) >> 10; // if ((unsigned)X < width && (unsigned)Y < height) // { // // dst.at<cv::Vec3b>(y, x) = src.at<cv::Vec3b>(Y, X); // *(dst.data + (y*width_rotate+x)*3 + 0) = *(src.data + (Y*width+X)*3 + 0); // *(dst.data + (y*width_rotate+x)*3 + 1) = *(src.data + (Y*width+X)*3 + 1); // *(dst.data + (y*width_rotate+x)*3 + 2) = *(src.data + (Y*width+X)*3 + 2); // } // } // } // cout << saturate_cast<int>(-99999999999) << " **" << endl; // cout << INT_MAX << endl; uchar *src_data = src.data; uchar *src_dev, *dst_dev; cudaMalloc((void**)&src_dev, 3 * src.rows * src.cols * sizeof(uchar)); cudaMalloc((void**)&dst_dev, 3 * width_rotate * height_rotate * sizeof(uchar)); cudaMemcpy(src_dev, src_data, 3 * src.rows * src.cols * sizeof(uchar), cudaMemcpyHostToDevice); cudaMemset(dst_dev, 0, width_rotate * height_rotate * sizeof(uchar)); dim3 grid(height_rotate, width_rotate); rotateKernel <<< grid, 1 >>>(src_dev, dst_dev, width, height, m[0], m[1], m[2], m[3], m[4], m[5], round_delta); cudaMemcpy(dst.data, dst_dev, 3 * width_rotate * height_rotate * sizeof(uchar), cudaMemcpyDeviceToHost); } //////////////////////////////// // gpu 错切变换 extern "C" __global__ void cutKernel(uchar* _src_dev, uchar * _dst_dev, int width, double ratio, int dir) { int i = blockIdx.x; int j = blockIdx.y; int x = 0, y = 0; if (0 == dir) { y = (gridDim.x - i) * ratio; } else { x = j * ratio; } /* int start = (gridDim.x - i) * ratio; int y = start; */ int offset = i*gridDim.y + j; int tran_offset = (i + x) * width + j + y; if (i < gridDim.x && j >= 0 && j < gridDim.y && i >= 0) { *(_dst_dev + tran_offset * 3 + 0) = *(_src_dev + offset * 3 + 0); *(_dst_dev + tran_offset * 3 + 1) = *(_src_dev + offset * 3 + 1); *(_dst_dev + tran_offset * 3 + 2) = *(_src_dev + offset * 3 + 2); } } /*__global__ void cutKernel1(uchar* _src_dev, uchar * _dst_dev, int width, double ratio) { int i = blockIdx.x; int j = blockIdx.y; int start = j * ratio; int x = start; int offset = i*gridDim.y + j; int tran_offset = (i+x) * width + j; if (i < gridDim.x && j >= 0 && j < gridDim.y && i >= 0) { *(_dst_dev + tran_offset*3 + 0) = *(_src_dev + offset*3 + 0); *(_dst_dev + tran_offset*3 + 1) = *(_src_dev + offset*3 + 1); *(_dst_dev + tran_offset*3 + 2) = *(_src_dev + offset*3 + 2); } }*/ extern "C" void cutImageGPU(const Mat &_src, Mat &_dst, int dir, int len) { int width = _src.cols, height = _src.rows; /* if (0 == dir) { width += len; _dst = Mat::zeros(Size(width, height), CV_8UC3); uchar *src_data = _src.data; uchar *src_dev , *dst_dev; cudaMalloc( (void**)&src_dev, 3 * _src.rows * _src.cols * sizeof(uchar) ); cudaMalloc( (void**)&dst_dev, 3 * width * height * sizeof(uchar) ); cudaMemcpy(src_dev, src_data, 3 * _src.rows * _src.cols * sizeof(uchar), cudaMemcpyHostToDevice); cudaMemset(dst_dev, 0, 3 * width * height * sizeof(uchar)); double ratio = (double)len / _dst.rows; dim3 grid(_src.rows, _src.cols); cutKernel <<< grid, 1 >>>(src_dev, dst_dev, width, ratio, dir); cudaMemcpy(_dst.data, dst_dev, 3 * width * height * sizeof(uchar), cudaMemcpyDeviceToHost); } else { height += len; _dst = Mat::zeros(Size(width, height), CV_8UC3); uchar *src_data = _src.data; uchar *src_dev , *dst_dev; cudaMalloc( (void**)&src_dev, 3 * _src.rows * _src.cols * sizeof(uchar) ); cudaMalloc( (void**)&dst_dev, 3 * width * height * sizeof(uchar) ); cudaMemcpy(src_dev, src_data, 3 * _src.rows * _src.cols * sizeof(uchar), cudaMemcpyHostToDevice); cudaMemset(dst_dev, 0, 3 * width * height * sizeof(uchar)); double ratio = (double)len / _dst.cols; dim3 grid(_src.rows, _src.cols); cutKernel1 <<< grid, 1 >>>(src_dev, dst_dev, width, ratio, dir); cudaMemcpy(_dst.data, dst_dev, 3 * width * height * sizeof(uchar), cudaMemcpyDeviceToHost); }*/ double ratio; if (0 == dir) { width += len; ratio = (double)len / height; } else { height += len; ratio = (double)len / width; } _dst = Mat::zeros(Size(width, height), CV_8UC3); uchar *src_data = _src.data; uchar *src_dev, *dst_dev; cudaMalloc((void**)&src_dev, 3 * _src.rows * _src.cols * sizeof(uchar)); cudaMalloc((void**)&dst_dev, 3 * width * height * sizeof(uchar)); cudaMemcpy(src_dev, src_data, 3 * _src.rows * _src.cols * sizeof(uchar), cudaMemcpyHostToDevice); cudaMemset(dst_dev, 0, 3 * width * height * sizeof(uchar)); dim3 grid(_src.rows, _src.cols); cutKernel <<< grid, 1 >>>(src_dev, dst_dev, width, ratio, dir); cudaMemcpy(_dst.data, dst_dev, 3 * width * height * sizeof(uchar), cudaMemcpyDeviceToHost); } //int main() //{ // Mat src = cv::imread("f.bmp" , 1); // 读入图片 // Mat dst_scale_cpu; // Mat dst_scale_gpu; // Mat dst_trans_cpu; // Mat dst_trans_gpu; // Mat dst_mirror_cpu; // Mat dst_mirror_gpu; // Mat dst_rotate_cpu; // Mat dst_rotate_gpu; // Mat dst_cut_cpu; // Mat dst_cut_gpu; ///* // struct timeval start; // struct timeval end; // unsigned long timer; // gettimeofday(&start, NULL); // 开始计时 // resizeImage(src, dst_scale_cpu, Size(src.cols * 2, src.rows * 2)); // CPU 图片缩放 缩放后的结果存放在dst_cpu中 第三个参数为缩放大小 // gettimeofday(&end, NULL); // 结束计时 // timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec; // cout << "cpu缩放所耗费的时间:" << timer << "us\n"; //*/ // struct timeval start; // struct timeval end; // unsigned long timer; // gettimeofday(&start, NULL); // 开始计时 ///////////////////////////// // resizeImage(src, dst_scale_cpu, Size(src.cols * 2, src.rows * 2)); // transferImage(src, dst_trans_cpu, 100, -100); // mirrorImage(src, dst_mirror_cpu, 1); // rotateImage(src, dst_rotate_cpu, 30); // cutImage(src, dst_cut_cpu, 0, 50); ///////////////////////////// // gettimeofday(&end, NULL); // 结束计时 // timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec; // cout << "cpu所耗费的时间:" << timer << "us\n"; // initCUDA(); // gettimeofday(&start, NULL); ///////////////////////////// // resizeImageGPU(src, dst_scale_gpu, Size(src.cols * 2, src.rows * 2)); // transferImageGPU(src, dst_trans_gpu, 100, -100); // mirrorImageGPU(src, dst_mirror_gpu, 1); // rotateImageGPU(src, dst_rotate_gpu, 30); // cutImageGPU(src, dst_cut_gpu, 0, 50); ///////////////////////////// // gettimeofday(&end, NULL); // timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec; // cout << "gpu所耗费的时间:" << timer << "us\n"; //////////////////////////// // imshow("原图", src); // imshow("缩放_cpu", dst_scale_cpu); // imshow("缩放_gpu", dst_scale_gpu); // imshow("平移_cpu", dst_trans_cpu); // imshow("平移_gpu", dst_trans_gpu); // imshow("镜像_cpu", dst_mirror_cpu); // imshow("镜像_gpu", dst_mirror_gpu); // imshow("旋转_cpu", dst_rotate_cpu); // imshow("旋转_gpu", dst_rotate_gpu); // imshow("错切_cpu", dst_cut_cpu); // imshow("错切_gpu", dst_cut_gpu); // // transferImage(src, dst_trans_cpu, 100, -100); // // imshow("cpu_trans", dst_trans_cpu); // // transferImageGPU(src, dst_trans_gpu, 100, -100); // // imshow("gpu_trans", dst_trans_gpu); // // mirrorImage(src, dst_mirror_cpu, 1); // // mirrorImageGPU(src, dst_mirror_gpu, 1); // // imshow("gpu", dst_mirror_gpu); // // rotateImage(src, dst_rotate_cpu, 30); // // rotateImageGPU(src, dst_rotate_gpu, 30); // // imshow("gpu", dst_rotate_gpu); // // cutImage(src, dst_cut_cpu, 0, 50); // // imshow("cpu", dst_cut_cpu); // // cutImageGPU(src, dst_cut_gpu, 0, 50); // // imshow("gpu", dst_cut_gpu); ///* // initCUDA(); // Mat dst_gpu; // gettimeofday(&start, NULL); // resizeImageGPU(src, dst_gpu, Size(src.cols * 2, src.rows * 2)); //// imshow("src", src); //// imshow(" ", dst_gpu); // gettimeofday(&end, NULL); // timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec; // cout << "gpu缩放所耗费的时间:" << timer << "us\n"; //// imshow("Demo", dst_gpu); //*/ // waitKey(0); // return 0; //}
82f9b4afef03c169ad9ecb9141da398cc08b6e27.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <Render.h> #include <Hitable.h> #include <Material.h> #include <Objects/Sphere.h> __global__ void render_init(int max_x, int max_y, hiprandState_t *rand_state) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if((i >= max_x) || (j >= max_y)) return; int pixel_index = j*max_x + i; //Each thread gets same seed, a different sequence number, no offset hiprand_init(1984, pixel_index, 0, &rand_state[pixel_index]); } #define RND (hiprand_uniform(&local_rand_state)) __global__ void create_world(hitable **d_list, hitable **d_world,hiprandState_t *rand_state) { if (threadIdx.x == 0 && blockIdx.x == 0) { hiprandState_t local_rand_state = *rand_state; *d_world = new hitable_list(d_list, 22*22+1+3); d_list[0] = new sphere(vec3(0,-1000.0f,-1), 1000, new lambertian(vec3(float(0x87), float(0xd3), float(0x7c))/vec3(256.0f,256.0f,256.0f))); int i = 1; for(int a = -11 ; a < 11 ; a++) { for(int b = -11 ; b < 11 ; b++ ) { float chose_mat = RND; vec3 center (a+RND,0.2,b+RND); if(chose_mat < 0.8f) { d_list[i] = new sphere(center, 0.2, new lambertian(vec3(RND*RND, RND*RND, RND*RND))); } else if(chose_mat < 0.95f) { d_list[i] = new sphere(center, 0.2, new metal(vec3(0.5f*(1.0f+RND), 0.5f*(1.0f+RND), 0.5f*(1.0f+RND)), 0.5f*RND)); } else { d_list[i] = new sphere(center, 0.2, new dielectric(1.5)); } i+=1; } } d_list[i++] = new sphere(vec3(0, 1,0), 1.0, new dielectric(1.5)); d_list[i++] = new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1))); d_list[i++] = new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0)); } } __global__ void init_cam(camera **d_camera,unsigned int nx,unsigned int ny,unsigned int step,unsigned int total) { if (threadIdx.x == 0 && blockIdx.x == 0) { vec3 lookfrom = vec3(sin(float(step)/float(total)*2.0f*float(M_PI))*15,2,cos(float(step)/float(total)*2.0f*float(M_PI))*15); vec3 lookat = vec3(0,0,0); float dist_to_focus = (lookfrom-lookat).length(); float aperture = 0.1f; *d_camera = new camera(lookfrom, lookat, vec3(0,1,0), 30.0f, float(nx)/float(ny), aperture, dist_to_focus); } } __global__ void delete_cam(camera **d_camera) { if (threadIdx.x == 0 && blockIdx.x == 0) { delete *d_camera; } } __global__ void free_world(hitable **d_list, hitable **d_world) { for(int i=0; i<22*22+3+1; i++) { delete ((sphere*)d_list[i])->mat_ptr; delete d_list[i]; } delete *d_world; } __device__ vec3 color(const ray& r, hitable **world,hiprandState_t *local_rand_state) { ray cur_ray = r; vec3 cur_attenuation(1.0f,1.0f,1.0f); for(int i = 0; i < 50; i++) { // Here 50 bounces of ray is max hit_record rec; if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) { ray scattered; vec3 attenuation; if(rec.mat_ptr->scatter(cur_ray,rec,attenuation,scattered,local_rand_state)){ cur_attenuation *= attenuation; cur_ray = scattered; } else { return vec3(0.0f,0.0f,0.0f); } } else { vec3 unit_direction = unit_vector(cur_ray.direction()); float t = 0.5f*(unit_direction.y() + 1.0f); vec3 c = (1.0f-t)*vec3(1.0f, 1.0f, 1.0f) + t*vec3(0.5f, 0.7f, 1.0f); return cur_attenuation * c; } } return vec3(0.0,0.0,0.0); // exceeded recursion } __global__ void render(vec3 *fb, int max_x, int max_y, int ns, camera **cam, hitable **world, hiprandState_t *rand_state) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if((i >= max_x) || (j >= max_y)) return; int pixel_index = j*max_x + i; hiprandState_t local_rand_state = rand_state[pixel_index]; vec3 col(0,0,0); for(int s=0; s < ns; s++) { float u = float(i + hiprand_uniform(&local_rand_state)) / float(max_x); float v = float(j + hiprand_uniform(&local_rand_state)) / float(max_y); ray r = (*cam)->get_ray(u,v,&local_rand_state); col += color(r, world,&local_rand_state); } fb[pixel_index] = col/float(ns); }
82f9b4afef03c169ad9ecb9141da398cc08b6e27.cu
#include <cfloat> #include <Render.h> #include <Hitable.h> #include <Material.h> #include <Objects/Sphere.h> __global__ void render_init(int max_x, int max_y, curandState *rand_state) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if((i >= max_x) || (j >= max_y)) return; int pixel_index = j*max_x + i; //Each thread gets same seed, a different sequence number, no offset curand_init(1984, pixel_index, 0, &rand_state[pixel_index]); } #define RND (curand_uniform(&local_rand_state)) __global__ void create_world(hitable **d_list, hitable **d_world,curandState *rand_state) { if (threadIdx.x == 0 && blockIdx.x == 0) { curandState local_rand_state = *rand_state; *d_world = new hitable_list(d_list, 22*22+1+3); d_list[0] = new sphere(vec3(0,-1000.0f,-1), 1000, new lambertian(vec3(float(0x87), float(0xd3), float(0x7c))/vec3(256.0f,256.0f,256.0f))); int i = 1; for(int a = -11 ; a < 11 ; a++) { for(int b = -11 ; b < 11 ; b++ ) { float chose_mat = RND; vec3 center (a+RND,0.2,b+RND); if(chose_mat < 0.8f) { d_list[i] = new sphere(center, 0.2, new lambertian(vec3(RND*RND, RND*RND, RND*RND))); } else if(chose_mat < 0.95f) { d_list[i] = new sphere(center, 0.2, new metal(vec3(0.5f*(1.0f+RND), 0.5f*(1.0f+RND), 0.5f*(1.0f+RND)), 0.5f*RND)); } else { d_list[i] = new sphere(center, 0.2, new dielectric(1.5)); } i+=1; } } d_list[i++] = new sphere(vec3(0, 1,0), 1.0, new dielectric(1.5)); d_list[i++] = new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1))); d_list[i++] = new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0)); } } __global__ void init_cam(camera **d_camera,unsigned int nx,unsigned int ny,unsigned int step,unsigned int total) { if (threadIdx.x == 0 && blockIdx.x == 0) { vec3 lookfrom = vec3(sin(float(step)/float(total)*2.0f*float(M_PI))*15,2,cos(float(step)/float(total)*2.0f*float(M_PI))*15); vec3 lookat = vec3(0,0,0); float dist_to_focus = (lookfrom-lookat).length(); float aperture = 0.1f; *d_camera = new camera(lookfrom, lookat, vec3(0,1,0), 30.0f, float(nx)/float(ny), aperture, dist_to_focus); } } __global__ void delete_cam(camera **d_camera) { if (threadIdx.x == 0 && blockIdx.x == 0) { delete *d_camera; } } __global__ void free_world(hitable **d_list, hitable **d_world) { for(int i=0; i<22*22+3+1; i++) { delete ((sphere*)d_list[i])->mat_ptr; delete d_list[i]; } delete *d_world; } __device__ vec3 color(const ray& r, hitable **world,curandState *local_rand_state) { ray cur_ray = r; vec3 cur_attenuation(1.0f,1.0f,1.0f); for(int i = 0; i < 50; i++) { // Here 50 bounces of ray is max hit_record rec; if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) { ray scattered; vec3 attenuation; if(rec.mat_ptr->scatter(cur_ray,rec,attenuation,scattered,local_rand_state)){ cur_attenuation *= attenuation; cur_ray = scattered; } else { return vec3(0.0f,0.0f,0.0f); } } else { vec3 unit_direction = unit_vector(cur_ray.direction()); float t = 0.5f*(unit_direction.y() + 1.0f); vec3 c = (1.0f-t)*vec3(1.0f, 1.0f, 1.0f) + t*vec3(0.5f, 0.7f, 1.0f); return cur_attenuation * c; } } return vec3(0.0,0.0,0.0); // exceeded recursion } __global__ void render(vec3 *fb, int max_x, int max_y, int ns, camera **cam, hitable **world, curandState *rand_state) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if((i >= max_x) || (j >= max_y)) return; int pixel_index = j*max_x + i; curandState local_rand_state = rand_state[pixel_index]; vec3 col(0,0,0); for(int s=0; s < ns; s++) { float u = float(i + curand_uniform(&local_rand_state)) / float(max_x); float v = float(j + curand_uniform(&local_rand_state)) / float(max_y); ray r = (*cam)->get_ray(u,v,&local_rand_state); col += color(r, world,&local_rand_state); } fb[pixel_index] = col/float(ns); }
f373a999b01370dc58f28f55110d4a87104fca84.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/ceil_div.h> #include <ATen/hip/Atomic.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/hip/DeviceUtils.cuh> #include <ATen/TensorUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/AccumulateType.h> #include <ATen/hip/cub.cuh> #include <ATen/native/hip/SortingCommon.cuh> #include <ATen/native/hip/EmbeddingBackwardKernel.cuh> #include <ATen/native/hip/KernelUtils.cuh> #include <c10/macros/Macros.h> namespace at { namespace native { template<typename index_t> void embedding_dense_backward_cuda_scan(Tensor &sorted_indices, Tensor &count); namespace { constexpr int MODE_SUM = 0; constexpr int MODE_MEAN = 1; constexpr int MODE_MAX = 2; std::pair<Tensor, Tensor> promoteIndicesAndOffsets( const Tensor& indices, const Tensor& offsets) { const auto commonType = promoteTypes(offsets.scalar_type(), indices.scalar_type()); return { indices.scalar_type() == commonType ? indices : indices.toType(commonType), offsets.scalar_type() == commonType ? offsets : offsets.toType(commonType)}; } // This kernel assumes that all input tensors except `weight` and // per_sample_weights are contiguous. template <typename scalar_t, typename index_t> __global__ void EmbeddingBag_updateOutputKernel_max( index_t *input, index_t *offsets, scalar_t *weight, scalar_t *output, index_t *offset2bag, int64_t numIndices, int64_t numBags, int64_t featureSize, int64_t weight_stride0, int64_t weight_stride1, index_t *bag_size, index_t *max_indices, index_t padding_idx) { // the strategy here is that each bag x feature is handled by a single thread int64_t chunksPerBag = ceil_div(featureSize, (int64_t)blockDim.x); int64_t numChunks = numBags * chunksPerBag; int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y; int64_t chunkStride = gridDim.x * blockDim.y; for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) { int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x; if (featureDim < featureSize) { int64_t bag = chunk / chunksPerBag; scalar_t *weightFeat = weight + featureDim * weight_stride1; int64_t begin = bag == 0 ? 0 : offsets[bag]; // forces first offset to be 0 instead of asserting on it int64_t end = (bag < numBags - 1) ? (offsets[bag + 1]) : numIndices; CUDA_KERNEL_ASSERT(end >= begin); scalar_t weightFeatMax = 0; int64_t bag_size_ = 0; int64_t maxWord = -1; for (int64_t emb = begin; emb < end; emb++) { bool pad = (input[emb] == padding_idx); const int64_t weightRow = input[emb] * weight_stride0; scalar_t weightValue = weightFeat[weightRow]; if (bag_size_ == 0 || weightValue > weightFeatMax) { weightFeatMax = pad ? weightFeatMax : weightValue; maxWord = pad ? maxWord : input[emb]; } bag_size_ += pad ? 0 : 1; if (featureDim == 0) { offset2bag[emb] = bag; } } bag_size[bag] = bag_size_; max_indices[bag * featureSize + featureDim] = maxWord; output[bag * featureSize + featureDim] = weightFeatMax; } } } // This kernel assumes that all input tensors except `weight` and // per_sample_weights are contiguous. template <typename scalar_t, typename index_t> __global__ void EmbeddingBag_updateOutputKernel_sum_mean( index_t *input, index_t *offsets, scalar_t *weight, scalar_t *output, index_t *offset2bag, int64_t numIndices, int64_t numBags, int64_t featureSize, int64_t weight_stride0, int64_t weight_stride1, int mode, index_t *bag_size, scalar_t* per_sample_weights, int64_t per_sample_weights_stride, index_t padding_idx) { // the strategy here is that each bag x feature is handled by a single thread using accscalar_t = acc_type<scalar_t, true>; int64_t chunksPerBag = ceil_div(featureSize, (int64_t)blockDim.x); int64_t numChunks = numBags * chunksPerBag; int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y; int64_t chunkStride = gridDim.x * blockDim.y; for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) { int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x; if (featureDim < featureSize) { int64_t bag = chunk / chunksPerBag; scalar_t *weightFeat = weight + featureDim * weight_stride1; int64_t begin = bag == 0 ? 0 : offsets[bag]; // forces first offset to be 0 instead of asserting on it int64_t end = (bag < numBags - 1) ? (offsets[bag + 1]) : numIndices; CUDA_KERNEL_ASSERT(end >= begin); accscalar_t weightFeatSum = 0; int64_t bag_size_ = 0; for (int64_t emb = begin; emb < end; emb++) { bool pad = (input[emb] == padding_idx); const int64_t weightRow = input[emb] * weight_stride0; scalar_t weightValue = weightFeat[weightRow]; weightValue = pad ? static_cast<scalar_t>(0) : weightValue; if (per_sample_weights) { accscalar_t scaleWeightBy = static_cast<accscalar_t>( per_sample_weights[emb * per_sample_weights_stride]); weightFeatSum += scaleWeightBy * static_cast<accscalar_t>(weightValue); } else { weightFeatSum += static_cast<accscalar_t>(weightValue); } bag_size_ += pad ? 0 : 1; if (featureDim == 0) { offset2bag[emb] = bag; } } if (mode == MODE_MEAN) { if (bag_size_ != 0) { weightFeatSum = weightFeatSum / static_cast<accscalar_t>(bag_size_); } } bag_size[bag] = bag_size_; output[bag * featureSize + featureDim] = static_cast<scalar_t>(weightFeatSum); } } } Tensor embedding_bag_backward_cuda_sum_avg( const Tensor &grad, const Tensor &indices_, const Tensor &offset2bag, const Tensor &bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const Tensor& per_sample_weights, int64_t padding_idx) { auto indices = indices_.contiguous(); auto grad_weight = at::zeros({num_weights, grad.size(1)}, grad.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); ptrdiff_t num_indices = indices.numel(); if (num_indices == 0) { // all empty bags return at::zeros({num_weights, grad.size(1)}, grad.options()); } int64_t stride = grad_weight.stride(0); auto sorted_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto orig_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor count; AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_bag_backward_cuda_sum_avg", [&] () { auto range = at::arange(num_indices, indices.options()); int64_t nbits = cuda::cub::get_num_bits(num_weights); cuda::cub::sort_pairs( indices.data_ptr<index_t>(), sorted_indices.data_ptr<index_t>(), range.data_ptr<index_t>(), orig_indices.data_ptr<index_t>(), num_indices, false/*, 0, nbits*/); if (scale_grad_by_freq) { count = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); embedding_dense_backward_cuda_scan<index_t>(sorted_indices, count); } }); return embedding_backward_cuda_kernel(grad, orig_indices, sorted_indices, count, num_weights, padding_idx, mode == MODE_MEAN, offset2bag, bag_size, per_sample_weights); } template <typename scalar_t, typename index_t> __global__ void EmbeddingBag_accGradParametersKernel_max( index_t *max_indices, scalar_t *gradOutput, scalar_t *gradWeight, int64_t stride, int64_t numBags, index_t padding_idx, const index_t numel) { using accscalar_t = acc_type<scalar_t, true>; int64_t chunksPerBag = ceil_div(stride, (int64_t)blockDim.x); int64_t numChunks = numBags * chunksPerBag; int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y; int64_t chunkStride = gridDim.x * blockDim.y; for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) { int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x; if (featureDim < stride) { int64_t bag = chunk / chunksPerBag; index_t word_idx = max_indices[bag * stride + featureDim]; if (word_idx >= 0 && word_idx != padding_idx) { // If bag is empty, we have max_indices[idx] set to -1 in forward. fastAtomicAdd( gradWeight, static_cast<index_t>(word_idx * stride + featureDim), numel, gradOutput[bag * stride + featureDim], true); } } } } Tensor embedding_bag_backward_cuda_max(const Tensor &grad, const Tensor &max_indices, int64_t num_weights, int64_t padding_idx) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("embedding_bag_backward_cuda_max"); auto grad_weight = at::zeros({num_weights, grad.size(1)}, grad.options()); int64_t stride = grad_weight.stride(0); int64_t numBags = grad.size(0); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); #if defined(USE_ROCM) dim3 block = dim3(64, 4); #else dim3 block = dim3(32, 8); #endif int grid = 1024; AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "embedding_bag_backward_cuda_max", [&] { AT_DISPATCH_INDEX_TYPES(max_indices.scalar_type(), "embedding_bag_backward_cuda_max", [&] () { hipLaunchKernelGGL(( EmbeddingBag_accGradParametersKernel_max< scalar_t, index_t>), dim3(grid), dim3(block), 0, stream, max_indices.data_ptr<index_t>(), grad.data_ptr<scalar_t>(), grad_weight.data_ptr<scalar_t>(), stride, numBags, padding_idx, grad_weight.numel()); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); return grad_weight; } } // Assumes all input tensors are contiguous. // See NOTE [ embedding_bag Native Functions ] in native_functions.yaml for details std::tuple<Tensor, Tensor, Tensor, Tensor> _embedding_bag_forward_only_cuda(const Tensor &weight, const Tensor &indices, const Tensor &offsets, const bool scale_grad_by_freq, const int64_t mode, bool sparse, const c10::optional<Tensor>& per_sample_weights_opt, bool include_last_offset, int64_t padding_idx) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> per_sample_weights_maybe_owned = at::borrow_from_optional_tensor(per_sample_weights_opt); const Tensor& per_sample_weights = *per_sample_weights_maybe_owned; return _embedding_bag_cuda( weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx); } // Assumes all input tensors are contiguous. // See NOTE [ embedding_bag Native Functions ] in native_functions.yaml for details std::tuple<Tensor, Tensor, Tensor, Tensor> _embedding_bag_cuda(const Tensor &weight, const Tensor &indices_, const Tensor &offsets_, const bool scale_grad_by_freq, const int64_t mode, bool sparse, const c10::optional<Tensor>& per_sample_weights_opt, bool include_last_offset, int64_t padding_idx) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> per_sample_weights_maybe_owned = at::borrow_from_optional_tensor(per_sample_weights_opt); const Tensor& per_sample_weights = *per_sample_weights_maybe_owned; Tensor indices, offsets; std::tie(indices, offsets) = promoteIndicesAndOffsets(indices_, offsets_); auto indices_arg = TensorArg(indices, "indices", 1); checkScalarTypes("embedding_bag_cuda", indices_arg, {kLong, kInt}); auto offsets_arg = TensorArg(offsets, "offsets", 1); checkScalarTypes("embedding_bag_cuda", offsets_arg, {kLong, kInt}); checkSameType("embedding_bag_cuda", indices_arg, offsets_arg); auto weight_arg = TensorArg(weight, "weight", 1); checkSameGPU("embedding_bag_cuda", weight_arg, indices_arg); checkSameGPU("embedding_bag_cuda", weight_arg, offsets_arg); int64_t numIndices = indices.size(0); int64_t numBags = offsets.size(0); if (include_last_offset) { // Check https://github.com/pytorch/pytorch/issues/29019 // We plan to add one more element in offsets, which is equal to the size of // indices. Currently for cuda devices, we still use the legacy // implementation even this flag is enabled. TORCH_CHECK( numBags >= 1, "include_last_offset: numBags should be at least 1"); numBags -= 1; } int64_t featureSize = weight.size(1); auto bag_size = at::empty(offsets.sizes(), indices.options()); auto offset2bag = at::empty({indices.size(0)}, indices.options()); // offset2bag = [0 0 0 0 0] hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto output = at::empty({numBags, featureSize}, weight.options()); Tensor max_indices; if (mode == MODE_MAX) { max_indices = at::empty({numBags, featureSize}, indices.options()); } else { // No need to allocate if we aren't doing a backwards pass max_indices = at::empty({0}, indices.options()); } #if defined(USE_ROCM) dim3 block = dim3(64, 4); #else dim3 block = dim3(32, 8); #endif int grid = 1024; AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, weight.scalar_type(), "embedding_bag_cuda", [&] { AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_bag_cuda", [&] () { if (mode == MODE_MAX) { hipLaunchKernelGGL(( EmbeddingBag_updateOutputKernel_max<scalar_t, index_t>), dim3(grid), dim3(block), 0, stream, indices.data_ptr<index_t>(), offsets.data_ptr<index_t>(), weight.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), offset2bag.data_ptr<index_t>(), numIndices, numBags, featureSize, weight.stride(0), weight.stride(1), bag_size.data_ptr<index_t>(), max_indices.data_ptr<index_t>(), padding_idx); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( EmbeddingBag_updateOutputKernel_sum_mean<scalar_t, index_t>), dim3(grid), dim3(block), 0, stream, indices.data_ptr<index_t>(), offsets.data_ptr<index_t>(), weight.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), offset2bag.data_ptr<index_t>(), numIndices, numBags, featureSize, weight.stride(0), weight.stride(1), mode, bag_size.data_ptr<index_t>(), per_sample_weights.defined() ? per_sample_weights.data_ptr<scalar_t>() : NULL, per_sample_weights.defined() ? per_sample_weights.stride(0) : 0, padding_idx); C10_HIP_KERNEL_LAUNCH_CHECK(); } }); }); return std::tuple<Tensor, Tensor, Tensor, Tensor>(output, offset2bag, bag_size, max_indices); } Tensor _embedding_bag_dense_backward_cuda(const Tensor &grad_, const Tensor &indices, const Tensor &offset2bag, const Tensor &bag_size_, const Tensor &max_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<Tensor>& per_sample_weights_opt, int64_t padding_idx) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> per_sample_weights_maybe_owned = at::borrow_from_optional_tensor(per_sample_weights_opt); const Tensor& per_sample_weights = *per_sample_weights_maybe_owned; // indices, offsets and offset2bag are assumed having correct dtypes and // contiguous here due to the checks in _embedding_bag_backward in // EmbeddingBag.cpp. // Also see NOTE [ embedding_bag Native Functions ] in native_functions.yaml // for more details. Tensor grad = grad_.contiguous(); auto indices_arg = TensorArg(indices, "indices", 1); auto grad_arg = TensorArg(grad, "grad", 1); checkSameGPU("embedding_bag_cuda", grad_arg, indices_arg); switch (mode) { case MODE_SUM: case MODE_MEAN: if (mode == MODE_MEAN) AT_ASSERT(!per_sample_weights.defined()); return embedding_bag_backward_cuda_sum_avg(grad, indices, offset2bag, bag_size_, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx); case MODE_MAX: AT_ASSERT(!per_sample_weights.defined()); return embedding_bag_backward_cuda_max(grad, max_indices, num_weights, padding_idx); default: AT_ERROR( "Unknown mode for embedding_bag_backward_cuda ", mode); } } template <typename scalar_t> __inline__ __device__ static scalar_t warpReduceSum(scalar_t val) { for (int offset = C10_WARP_SIZE/2; offset > 0; offset /= 2) val += WARP_SHFL_DOWN(val, offset); return val; } template <typename scalar_t, typename index_t> __global__ static void _embedding_bag_per_sample_weights_backward_kernel( const scalar_t* grad, int64_t grad_stride0, int64_t grad_stride1, const scalar_t* weight, int64_t weight_stride0, int64_t weight_stride1, const index_t* indices, // contiguous const index_t* offset2bag, // contiguous int64_t num_samples, int64_t embedding_features, scalar_t* output, index_t padding_idx) { using accscalar_t = acc_type<scalar_t, true>; const int idx = threadIdx.x + blockIdx.x * blockDim.x; const int warp = idx / C10_WARP_SIZE; const int thread_in_warp = idx % C10_WARP_SIZE; const int num_warps = blockDim.x * gridDim.x / C10_WARP_SIZE; // Each warp is responsible for the accumulation of one sample. // This involves doing one dot product between grad[bag_idx] and weight[embedding_idx]. for (int sample_idx = warp; sample_idx < num_samples; sample_idx += num_warps) { accscalar_t result = 0.; const int bag_idx = (int)offset2bag[sample_idx]; const int embedding_idx = (int)indices[sample_idx]; if (embedding_idx != padding_idx) { for (int feature_idx = thread_in_warp; feature_idx < embedding_features; feature_idx += C10_WARP_SIZE) { result += grad[grad_stride0 * bag_idx + grad_stride1 * feature_idx] * weight[weight_stride0 * embedding_idx + weight_stride1 * feature_idx]; } } result = warpReduceSum<accscalar_t>(result); if (thread_in_warp == 0) { output[sample_idx] = result; } } } Tensor _embedding_bag_per_sample_weights_backward_cuda( const Tensor& grad, const Tensor& weight, // NB: embedding table, not per_sample_weights const Tensor& indices_, const Tensor& offsets_, const Tensor& offset2bag, int64_t mode, int64_t padding_idx) { TORCH_CHECK( mode == MODE_SUM, "embedding_bag_backward: per_sample_weights only supported for mode='sum'"); AT_ASSERT(grad.dim() == 2); auto embedding_features = grad.size(1); Tensor indices, offsets; std::tie(indices, offsets) = promoteIndicesAndOffsets(indices_, offsets_); AT_ASSERT(indices.dim() == 1); auto num_samples = indices.size(0); AT_ASSERT(weight.dim() == 2); AT_ASSERT(weight.size(1) == embedding_features); const int threads_per_block = 512; const int warps_per_block = threads_per_block / C10_WARP_SIZE; dim3 block(threads_per_block); dim3 grid((num_samples + warps_per_block - 1) / warps_per_block); auto output = at::empty({num_samples}, grad.options()); // Early return when there is no samples in the batch. This saves unnecesary kernel // launch, but also prevents hipGetLastError() to complain about invalid launch args if (num_samples == 0) { return output; } AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "_embedding_bag_per_sample_weights_backward_cuda", [&]() { AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "_embedding_bag_per_sample_weights_backward_cuda", [&]() { hipLaunchKernelGGL(( _embedding_bag_per_sample_weights_backward_kernel<scalar_t, index_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad.data_ptr<scalar_t>(), grad.stride(0), grad.stride(1), weight.data_ptr<scalar_t>(), weight.stride(0), weight.stride(1), indices.data_ptr<index_t>(), offset2bag.data_ptr<index_t>(), num_samples, embedding_features, output.data_ptr<scalar_t>(), padding_idx); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } ); return output; } } }
f373a999b01370dc58f28f55110d4a87104fca84.cu
#include <ATen/ATen.h> #include <ATen/ceil_div.h> #include <ATen/cuda/Atomic.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/DeviceUtils.cuh> #include <ATen/TensorUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/cub.cuh> #include <ATen/native/cuda/SortingCommon.cuh> #include <ATen/native/cuda/EmbeddingBackwardKernel.cuh> #include <ATen/native/cuda/KernelUtils.cuh> #include <c10/macros/Macros.h> namespace at { namespace native { template<typename index_t> void embedding_dense_backward_cuda_scan(Tensor &sorted_indices, Tensor &count); namespace { constexpr int MODE_SUM = 0; constexpr int MODE_MEAN = 1; constexpr int MODE_MAX = 2; std::pair<Tensor, Tensor> promoteIndicesAndOffsets( const Tensor& indices, const Tensor& offsets) { const auto commonType = promoteTypes(offsets.scalar_type(), indices.scalar_type()); return { indices.scalar_type() == commonType ? indices : indices.toType(commonType), offsets.scalar_type() == commonType ? offsets : offsets.toType(commonType)}; } // This kernel assumes that all input tensors except `weight` and // per_sample_weights are contiguous. template <typename scalar_t, typename index_t> __global__ void EmbeddingBag_updateOutputKernel_max( index_t *input, index_t *offsets, scalar_t *weight, scalar_t *output, index_t *offset2bag, int64_t numIndices, int64_t numBags, int64_t featureSize, int64_t weight_stride0, int64_t weight_stride1, index_t *bag_size, index_t *max_indices, index_t padding_idx) { // the strategy here is that each bag x feature is handled by a single thread int64_t chunksPerBag = ceil_div(featureSize, (int64_t)blockDim.x); int64_t numChunks = numBags * chunksPerBag; int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y; int64_t chunkStride = gridDim.x * blockDim.y; for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) { int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x; if (featureDim < featureSize) { int64_t bag = chunk / chunksPerBag; scalar_t *weightFeat = weight + featureDim * weight_stride1; int64_t begin = bag == 0 ? 0 : offsets[bag]; // forces first offset to be 0 instead of asserting on it int64_t end = (bag < numBags - 1) ? (offsets[bag + 1]) : numIndices; CUDA_KERNEL_ASSERT(end >= begin); scalar_t weightFeatMax = 0; int64_t bag_size_ = 0; int64_t maxWord = -1; for (int64_t emb = begin; emb < end; emb++) { bool pad = (input[emb] == padding_idx); const int64_t weightRow = input[emb] * weight_stride0; scalar_t weightValue = weightFeat[weightRow]; if (bag_size_ == 0 || weightValue > weightFeatMax) { weightFeatMax = pad ? weightFeatMax : weightValue; maxWord = pad ? maxWord : input[emb]; } bag_size_ += pad ? 0 : 1; if (featureDim == 0) { offset2bag[emb] = bag; } } bag_size[bag] = bag_size_; max_indices[bag * featureSize + featureDim] = maxWord; output[bag * featureSize + featureDim] = weightFeatMax; } } } // This kernel assumes that all input tensors except `weight` and // per_sample_weights are contiguous. template <typename scalar_t, typename index_t> __global__ void EmbeddingBag_updateOutputKernel_sum_mean( index_t *input, index_t *offsets, scalar_t *weight, scalar_t *output, index_t *offset2bag, int64_t numIndices, int64_t numBags, int64_t featureSize, int64_t weight_stride0, int64_t weight_stride1, int mode, index_t *bag_size, scalar_t* per_sample_weights, int64_t per_sample_weights_stride, index_t padding_idx) { // the strategy here is that each bag x feature is handled by a single thread using accscalar_t = acc_type<scalar_t, true>; int64_t chunksPerBag = ceil_div(featureSize, (int64_t)blockDim.x); int64_t numChunks = numBags * chunksPerBag; int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y; int64_t chunkStride = gridDim.x * blockDim.y; for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) { int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x; if (featureDim < featureSize) { int64_t bag = chunk / chunksPerBag; scalar_t *weightFeat = weight + featureDim * weight_stride1; int64_t begin = bag == 0 ? 0 : offsets[bag]; // forces first offset to be 0 instead of asserting on it int64_t end = (bag < numBags - 1) ? (offsets[bag + 1]) : numIndices; CUDA_KERNEL_ASSERT(end >= begin); accscalar_t weightFeatSum = 0; int64_t bag_size_ = 0; for (int64_t emb = begin; emb < end; emb++) { bool pad = (input[emb] == padding_idx); const int64_t weightRow = input[emb] * weight_stride0; scalar_t weightValue = weightFeat[weightRow]; weightValue = pad ? static_cast<scalar_t>(0) : weightValue; if (per_sample_weights) { accscalar_t scaleWeightBy = static_cast<accscalar_t>( per_sample_weights[emb * per_sample_weights_stride]); weightFeatSum += scaleWeightBy * static_cast<accscalar_t>(weightValue); } else { weightFeatSum += static_cast<accscalar_t>(weightValue); } bag_size_ += pad ? 0 : 1; if (featureDim == 0) { offset2bag[emb] = bag; } } if (mode == MODE_MEAN) { if (bag_size_ != 0) { weightFeatSum = weightFeatSum / static_cast<accscalar_t>(bag_size_); } } bag_size[bag] = bag_size_; output[bag * featureSize + featureDim] = static_cast<scalar_t>(weightFeatSum); } } } Tensor embedding_bag_backward_cuda_sum_avg( const Tensor &grad, const Tensor &indices_, const Tensor &offset2bag, const Tensor &bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const Tensor& per_sample_weights, int64_t padding_idx) { auto indices = indices_.contiguous(); auto grad_weight = at::zeros({num_weights, grad.size(1)}, grad.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); ptrdiff_t num_indices = indices.numel(); if (num_indices == 0) { // all empty bags return at::zeros({num_weights, grad.size(1)}, grad.options()); } int64_t stride = grad_weight.stride(0); auto sorted_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto orig_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor count; AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_bag_backward_cuda_sum_avg", [&] () { auto range = at::arange(num_indices, indices.options()); int64_t nbits = cuda::cub::get_num_bits(num_weights); cuda::cub::sort_pairs( indices.data_ptr<index_t>(), sorted_indices.data_ptr<index_t>(), range.data_ptr<index_t>(), orig_indices.data_ptr<index_t>(), num_indices, false/*, 0, nbits*/); if (scale_grad_by_freq) { count = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); embedding_dense_backward_cuda_scan<index_t>(sorted_indices, count); } }); return embedding_backward_cuda_kernel(grad, orig_indices, sorted_indices, count, num_weights, padding_idx, mode == MODE_MEAN, offset2bag, bag_size, per_sample_weights); } template <typename scalar_t, typename index_t> __global__ void EmbeddingBag_accGradParametersKernel_max( index_t *max_indices, scalar_t *gradOutput, scalar_t *gradWeight, int64_t stride, int64_t numBags, index_t padding_idx, const index_t numel) { using accscalar_t = acc_type<scalar_t, true>; int64_t chunksPerBag = ceil_div(stride, (int64_t)blockDim.x); int64_t numChunks = numBags * chunksPerBag; int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y; int64_t chunkStride = gridDim.x * blockDim.y; for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) { int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x; if (featureDim < stride) { int64_t bag = chunk / chunksPerBag; index_t word_idx = max_indices[bag * stride + featureDim]; if (word_idx >= 0 && word_idx != padding_idx) { // If bag is empty, we have max_indices[idx] set to -1 in forward. fastAtomicAdd( gradWeight, static_cast<index_t>(word_idx * stride + featureDim), numel, gradOutput[bag * stride + featureDim], true); } } } } Tensor embedding_bag_backward_cuda_max(const Tensor &grad, const Tensor &max_indices, int64_t num_weights, int64_t padding_idx) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("embedding_bag_backward_cuda_max"); auto grad_weight = at::zeros({num_weights, grad.size(1)}, grad.options()); int64_t stride = grad_weight.stride(0); int64_t numBags = grad.size(0); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); #if defined(USE_ROCM) dim3 block = dim3(64, 4); #else dim3 block = dim3(32, 8); #endif int grid = 1024; AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "embedding_bag_backward_cuda_max", [&] { AT_DISPATCH_INDEX_TYPES(max_indices.scalar_type(), "embedding_bag_backward_cuda_max", [&] () { EmbeddingBag_accGradParametersKernel_max< scalar_t, index_t><<<grid, block, 0, stream>>>( max_indices.data_ptr<index_t>(), grad.data_ptr<scalar_t>(), grad_weight.data_ptr<scalar_t>(), stride, numBags, padding_idx, grad_weight.numel()); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); return grad_weight; } } // Assumes all input tensors are contiguous. // See NOTE [ embedding_bag Native Functions ] in native_functions.yaml for details std::tuple<Tensor, Tensor, Tensor, Tensor> _embedding_bag_forward_only_cuda(const Tensor &weight, const Tensor &indices, const Tensor &offsets, const bool scale_grad_by_freq, const int64_t mode, bool sparse, const c10::optional<Tensor>& per_sample_weights_opt, bool include_last_offset, int64_t padding_idx) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> per_sample_weights_maybe_owned = at::borrow_from_optional_tensor(per_sample_weights_opt); const Tensor& per_sample_weights = *per_sample_weights_maybe_owned; return _embedding_bag_cuda( weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx); } // Assumes all input tensors are contiguous. // See NOTE [ embedding_bag Native Functions ] in native_functions.yaml for details std::tuple<Tensor, Tensor, Tensor, Tensor> _embedding_bag_cuda(const Tensor &weight, const Tensor &indices_, const Tensor &offsets_, const bool scale_grad_by_freq, const int64_t mode, bool sparse, const c10::optional<Tensor>& per_sample_weights_opt, bool include_last_offset, int64_t padding_idx) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> per_sample_weights_maybe_owned = at::borrow_from_optional_tensor(per_sample_weights_opt); const Tensor& per_sample_weights = *per_sample_weights_maybe_owned; Tensor indices, offsets; std::tie(indices, offsets) = promoteIndicesAndOffsets(indices_, offsets_); auto indices_arg = TensorArg(indices, "indices", 1); checkScalarTypes("embedding_bag_cuda", indices_arg, {kLong, kInt}); auto offsets_arg = TensorArg(offsets, "offsets", 1); checkScalarTypes("embedding_bag_cuda", offsets_arg, {kLong, kInt}); checkSameType("embedding_bag_cuda", indices_arg, offsets_arg); auto weight_arg = TensorArg(weight, "weight", 1); checkSameGPU("embedding_bag_cuda", weight_arg, indices_arg); checkSameGPU("embedding_bag_cuda", weight_arg, offsets_arg); int64_t numIndices = indices.size(0); int64_t numBags = offsets.size(0); if (include_last_offset) { // Check https://github.com/pytorch/pytorch/issues/29019 // We plan to add one more element in offsets, which is equal to the size of // indices. Currently for cuda devices, we still use the legacy // implementation even this flag is enabled. TORCH_CHECK( numBags >= 1, "include_last_offset: numBags should be at least 1"); numBags -= 1; } int64_t featureSize = weight.size(1); auto bag_size = at::empty(offsets.sizes(), indices.options()); auto offset2bag = at::empty({indices.size(0)}, indices.options()); // offset2bag = [0 0 0 0 0] cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto output = at::empty({numBags, featureSize}, weight.options()); Tensor max_indices; if (mode == MODE_MAX) { max_indices = at::empty({numBags, featureSize}, indices.options()); } else { // No need to allocate if we aren't doing a backwards pass max_indices = at::empty({0}, indices.options()); } #if defined(USE_ROCM) dim3 block = dim3(64, 4); #else dim3 block = dim3(32, 8); #endif int grid = 1024; AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, weight.scalar_type(), "embedding_bag_cuda", [&] { AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_bag_cuda", [&] () { if (mode == MODE_MAX) { EmbeddingBag_updateOutputKernel_max<scalar_t, index_t><<<grid, block, 0, stream>>>( indices.data_ptr<index_t>(), offsets.data_ptr<index_t>(), weight.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), offset2bag.data_ptr<index_t>(), numIndices, numBags, featureSize, weight.stride(0), weight.stride(1), bag_size.data_ptr<index_t>(), max_indices.data_ptr<index_t>(), padding_idx); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { EmbeddingBag_updateOutputKernel_sum_mean<scalar_t, index_t><<<grid, block, 0, stream>>>( indices.data_ptr<index_t>(), offsets.data_ptr<index_t>(), weight.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), offset2bag.data_ptr<index_t>(), numIndices, numBags, featureSize, weight.stride(0), weight.stride(1), mode, bag_size.data_ptr<index_t>(), per_sample_weights.defined() ? per_sample_weights.data_ptr<scalar_t>() : NULL, per_sample_weights.defined() ? per_sample_weights.stride(0) : 0, padding_idx); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }); }); return std::tuple<Tensor, Tensor, Tensor, Tensor>(output, offset2bag, bag_size, max_indices); } Tensor _embedding_bag_dense_backward_cuda(const Tensor &grad_, const Tensor &indices, const Tensor &offset2bag, const Tensor &bag_size_, const Tensor &max_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<Tensor>& per_sample_weights_opt, int64_t padding_idx) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> per_sample_weights_maybe_owned = at::borrow_from_optional_tensor(per_sample_weights_opt); const Tensor& per_sample_weights = *per_sample_weights_maybe_owned; // indices, offsets and offset2bag are assumed having correct dtypes and // contiguous here due to the checks in _embedding_bag_backward in // EmbeddingBag.cpp. // Also see NOTE [ embedding_bag Native Functions ] in native_functions.yaml // for more details. Tensor grad = grad_.contiguous(); auto indices_arg = TensorArg(indices, "indices", 1); auto grad_arg = TensorArg(grad, "grad", 1); checkSameGPU("embedding_bag_cuda", grad_arg, indices_arg); switch (mode) { case MODE_SUM: case MODE_MEAN: if (mode == MODE_MEAN) AT_ASSERT(!per_sample_weights.defined()); return embedding_bag_backward_cuda_sum_avg(grad, indices, offset2bag, bag_size_, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx); case MODE_MAX: AT_ASSERT(!per_sample_weights.defined()); return embedding_bag_backward_cuda_max(grad, max_indices, num_weights, padding_idx); default: AT_ERROR( "Unknown mode for embedding_bag_backward_cuda ", mode); } } template <typename scalar_t> __inline__ __device__ static scalar_t warpReduceSum(scalar_t val) { for (int offset = C10_WARP_SIZE/2; offset > 0; offset /= 2) val += WARP_SHFL_DOWN(val, offset); return val; } template <typename scalar_t, typename index_t> __global__ static void _embedding_bag_per_sample_weights_backward_kernel( const scalar_t* grad, int64_t grad_stride0, int64_t grad_stride1, const scalar_t* weight, int64_t weight_stride0, int64_t weight_stride1, const index_t* indices, // contiguous const index_t* offset2bag, // contiguous int64_t num_samples, int64_t embedding_features, scalar_t* output, index_t padding_idx) { using accscalar_t = acc_type<scalar_t, true>; const int idx = threadIdx.x + blockIdx.x * blockDim.x; const int warp = idx / C10_WARP_SIZE; const int thread_in_warp = idx % C10_WARP_SIZE; const int num_warps = blockDim.x * gridDim.x / C10_WARP_SIZE; // Each warp is responsible for the accumulation of one sample. // This involves doing one dot product between grad[bag_idx] and weight[embedding_idx]. for (int sample_idx = warp; sample_idx < num_samples; sample_idx += num_warps) { accscalar_t result = 0.; const int bag_idx = (int)offset2bag[sample_idx]; const int embedding_idx = (int)indices[sample_idx]; if (embedding_idx != padding_idx) { for (int feature_idx = thread_in_warp; feature_idx < embedding_features; feature_idx += C10_WARP_SIZE) { result += grad[grad_stride0 * bag_idx + grad_stride1 * feature_idx] * weight[weight_stride0 * embedding_idx + weight_stride1 * feature_idx]; } } result = warpReduceSum<accscalar_t>(result); if (thread_in_warp == 0) { output[sample_idx] = result; } } } Tensor _embedding_bag_per_sample_weights_backward_cuda( const Tensor& grad, const Tensor& weight, // NB: embedding table, not per_sample_weights const Tensor& indices_, const Tensor& offsets_, const Tensor& offset2bag, int64_t mode, int64_t padding_idx) { TORCH_CHECK( mode == MODE_SUM, "embedding_bag_backward: per_sample_weights only supported for mode='sum'"); AT_ASSERT(grad.dim() == 2); auto embedding_features = grad.size(1); Tensor indices, offsets; std::tie(indices, offsets) = promoteIndicesAndOffsets(indices_, offsets_); AT_ASSERT(indices.dim() == 1); auto num_samples = indices.size(0); AT_ASSERT(weight.dim() == 2); AT_ASSERT(weight.size(1) == embedding_features); const int threads_per_block = 512; const int warps_per_block = threads_per_block / C10_WARP_SIZE; dim3 block(threads_per_block); dim3 grid((num_samples + warps_per_block - 1) / warps_per_block); auto output = at::empty({num_samples}, grad.options()); // Early return when there is no samples in the batch. This saves unnecesary kernel // launch, but also prevents cudaGetLastError() to complain about invalid launch args if (num_samples == 0) { return output; } AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "_embedding_bag_per_sample_weights_backward_cuda", [&]() { AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "_embedding_bag_per_sample_weights_backward_cuda", [&]() { _embedding_bag_per_sample_weights_backward_kernel<scalar_t, index_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( grad.data_ptr<scalar_t>(), grad.stride(0), grad.stride(1), weight.data_ptr<scalar_t>(), weight.stride(0), weight.stride(1), indices.data_ptr<index_t>(), offset2bag.data_ptr<index_t>(), num_samples, embedding_features, output.data_ptr<scalar_t>(), padding_idx); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } ); return output; } } }
48e7aee58abcf7d8fe1425348b72493b7b473472.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Indexing: idx = pixel/voxel index in cartesian coordinates cidx = center index in cartesian coordinates linear_idx = pixel/voxel index in flat array linear_cidx = center index in flat array Center Stride: c_stride = number_of_features + image_dimention center_addr = linear_cidx * c_stride Image Stride: z_stride = image_shape.y * image_shape.x y_stride = image_shape.x x_stride = 1 Transformations 3D: linear_idx = idx.z * z_stride + idx.y * y_stride + idx.x pixel_addr = linear_idx * n_features idx.z = linear_idx / z_stride plane_idx = linear_idx % z_stride idx.y = plane_idx / y_stride idx.x = plane_idx % y_stride Transformations 2D: linear_idx = idx.y * y_stride + idx.x pixel_addr = linear_idx * n_features idx.y = linear_idx / y_stride idx.x = linear_idx % y_stride */ #define DLIMIT 99999999 #define N_FEATURES {{ n_features }} #define N_CLUSTERS {{ n_clusters }} #define SS {{ SS }}f #define __min(a, b) (((a) < (b)) ? (a) : (b)) #define __max(a, b) (((a) >= (b)) ? (a) : (b)) #define sp_grid_x {{ sp_grid[0] }} #define sp_grid_y {{ sp_grid[1] }} #define sp_grid_z {{ sp_grid[2] }} #define sp_shape_x {{ sp_shape[0] }} #define sp_shape_y {{ sp_shape[1] }} #define sp_shape_z {{ sp_shape[2] }} #define im_shape_x {{ im_shape[0] }} #define im_shape_y {{ im_shape[1] }} #define im_shape_z {{ im_shape[2] }} #define spacing_x {{ spacing[0] }} #define spacing_y {{ spacing[1] }} #define spacing_z {{ spacing[2] }} __device__ float slic_distance(const int3 idx, const float* pixel, const long center_addr, const float* centers ) { // Color diff float color_diff = 0; for ( int w = 0; w < N_FEATURES; w++ ) { float d = pixel[w] - centers[center_addr + w]; color_diff += d * d; } // Position diff float3 pd; pd.z = (idx.z - centers[center_addr + N_FEATURES + 0]) * spacing_z; pd.y = (idx.y - centers[center_addr + N_FEATURES + 1]) * spacing_y; pd.x = (idx.x - centers[center_addr + N_FEATURES + 2]) * spacing_x; float position_diff = pd.z * pd.z + pd.y * pd.y + pd.x * pd.x; float dist = color_diff + position_diff / (SS); return dist; } __global__ void init_clusters(const float* data, float* centers ) { const long linear_cidx = threadIdx.x + (blockIdx.x * blockDim.x); if ( linear_cidx >= N_CLUSTERS ) { return; } // calculating the (0,0,0) index of each superpixel block // using linear to cartesian index transformation int3 cidx; int plane_size = sp_grid_y * sp_grid_x; cidx.z = linear_cidx / plane_size; int plane_idx = linear_cidx % plane_size; cidx.y = plane_idx / sp_grid_x; cidx.x = plane_idx % sp_grid_x; // centering index into middle of suprepixel block cidx.z = cidx.z * sp_shape_z + sp_shape_z / 2; cidx.y = cidx.y * sp_shape_y + sp_shape_y / 2; cidx.x = cidx.x * sp_shape_x + sp_shape_x / 2; //saving cluster center positions // note: the color is not initialized, but is kept at zero. const int c_stride = N_FEATURES + 3; centers[linear_cidx * c_stride + N_FEATURES + 0] = cidx.z; centers[linear_cidx * c_stride + N_FEATURES + 1] = cidx.y; centers[linear_cidx * c_stride + N_FEATURES + 2] = cidx.x; } __global__ void expectation(const float* data, const float* centers, unsigned int* labels ) { int3 idx; idx.z = threadIdx.x + (blockIdx.x * blockDim.x); idx.y = threadIdx.y + (blockIdx.y * blockDim.y); idx.x = threadIdx.z + (blockIdx.z * blockDim.z); if ( idx.x >= im_shape_x || idx.y >= im_shape_y || idx.z >= im_shape_z ) { return; } long z_stride = im_shape_x * im_shape_y; long y_stride = im_shape_x; const long linear_idx = idx.z * z_stride + idx.y * y_stride + idx.x; const long pixel_addr = linear_idx * N_FEATURES; float pixel[N_FEATURES]; for ( int w = 0; w < N_FEATURES; w++ ) { pixel[w] = data[pixel_addr + w]; } int4 cidx, iter_cidx; long iter_linear_cidx; long closest_linear_cidx = 0; // approx center grid positoin cidx.z = __max(0, __min(idx.z / sp_shape_z, sp_grid_z - 1)); cidx.y = __max(0, __min(idx.y / sp_shape_y, sp_grid_y - 1)); cidx.x = __max(0, __min(idx.x / sp_shape_x, sp_grid_x - 1)); const int c_stride = N_FEATURES + 3; float minimum_distance = DLIMIT; const int R = 2; for ( int k = -R; k <= R; k++ ) { for ( int j = -R; j <= R; j++ ) { for ( int i = -R; i <= R; i++ ) { iter_cidx.z = cidx.z + k; iter_cidx.y = cidx.y + j; iter_cidx.x = cidx.x + i; if ( iter_cidx.y < 0 || iter_cidx.y >= sp_grid_y || iter_cidx.z < 0 || iter_cidx.z >= sp_grid_z || iter_cidx.x < 0 || iter_cidx.x >= sp_grid_x ) {continue;} iter_linear_cidx = iter_cidx.z * sp_grid_y * sp_grid_x + iter_cidx.y * sp_grid_x + iter_cidx.x; long iter_center_addr = iter_linear_cidx * c_stride; if ( centers[iter_center_addr] == DLIMIT ) { continue; } float dist = slic_distance(idx, pixel, iter_center_addr, centers); // Wrapup if ( dist < minimum_distance ) { minimum_distance = dist; closest_linear_cidx = iter_linear_cidx; } } } } labels[linear_idx] = closest_linear_cidx + 1; } __global__ void maximization(const float* data, const unsigned int* labels, float* centers ) { const long linear_cidx = threadIdx.x + (blockIdx.x * blockDim.x); const int c_stride = N_FEATURES + 3; const long center_addr = linear_cidx * c_stride; if ( linear_cidx >= N_CLUSTERS ) { return; } int3 cidx; cidx.z = (int) centers[center_addr + N_FEATURES + 0]; cidx.y = (int) centers[center_addr + N_FEATURES + 1]; cidx.x = (int) centers[center_addr + N_FEATURES + 2]; float ratio = 2.0f; int3 from; from.z = __max(cidx.z - sp_shape_z * ratio, 0); from.y = __max(cidx.y - sp_shape_y * ratio, 0); from.x = __max(cidx.x - sp_shape_x * ratio, 0); int3 to; to.z = __min(cidx.z + sp_shape_z * ratio, im_shape_z); to.y = __min(cidx.y + sp_shape_y * ratio, im_shape_y); to.x = __min(cidx.x + sp_shape_x * ratio, im_shape_x); float f[c_stride]; for ( int k = 0; k < c_stride; k++ ) {f[k] = 0;} long z_stride = im_shape_x * im_shape_y; long y_stride = im_shape_x; long count = 0; int3 p; for ( p.z = from.z; p.z < to.z; p.z++ ) { for ( p.y = from.y; p.y < to.y; p.y++ ) { for ( p.x = from.x; p.x < to.x; p.x++ ) { long linear_idx = p.z * z_stride + p.y * y_stride + p.x; long pixel_addr = linear_idx * N_FEATURES; if ( labels[linear_idx] == linear_cidx + 1 ) { for ( int w = 0; w < N_FEATURES; w++ ) { f[w] += data[pixel_addr + w]; } f[N_FEATURES + 0] += p.z; f[N_FEATURES + 1] += p.y; f[N_FEATURES + 2] += p.x; count += 1; } } } } if ( count > 0 ) { for ( int w = 0; w < c_stride; w++ ) { centers[center_addr + w] = f[w] / count; } } else { centers[center_addr] = DLIMIT; } }
48e7aee58abcf7d8fe1425348b72493b7b473472.cu
/* Indexing: idx = pixel/voxel index in cartesian coordinates cidx = center index in cartesian coordinates linear_idx = pixel/voxel index in flat array linear_cidx = center index in flat array Center Stride: c_stride = number_of_features + image_dimention center_addr = linear_cidx * c_stride Image Stride: z_stride = image_shape.y * image_shape.x y_stride = image_shape.x x_stride = 1 Transformations 3D: linear_idx = idx.z * z_stride + idx.y * y_stride + idx.x pixel_addr = linear_idx * n_features idx.z = linear_idx / z_stride plane_idx = linear_idx % z_stride idx.y = plane_idx / y_stride idx.x = plane_idx % y_stride Transformations 2D: linear_idx = idx.y * y_stride + idx.x pixel_addr = linear_idx * n_features idx.y = linear_idx / y_stride idx.x = linear_idx % y_stride */ #define DLIMIT 99999999 #define N_FEATURES {{ n_features }} #define N_CLUSTERS {{ n_clusters }} #define SS {{ SS }}f #define __min(a, b) (((a) < (b)) ? (a) : (b)) #define __max(a, b) (((a) >= (b)) ? (a) : (b)) #define sp_grid_x {{ sp_grid[0] }} #define sp_grid_y {{ sp_grid[1] }} #define sp_grid_z {{ sp_grid[2] }} #define sp_shape_x {{ sp_shape[0] }} #define sp_shape_y {{ sp_shape[1] }} #define sp_shape_z {{ sp_shape[2] }} #define im_shape_x {{ im_shape[0] }} #define im_shape_y {{ im_shape[1] }} #define im_shape_z {{ im_shape[2] }} #define spacing_x {{ spacing[0] }} #define spacing_y {{ spacing[1] }} #define spacing_z {{ spacing[2] }} __device__ float slic_distance(const int3 idx, const float* pixel, const long center_addr, const float* centers ) { // Color diff float color_diff = 0; for ( int w = 0; w < N_FEATURES; w++ ) { float d = pixel[w] - centers[center_addr + w]; color_diff += d * d; } // Position diff float3 pd; pd.z = (idx.z - centers[center_addr + N_FEATURES + 0]) * spacing_z; pd.y = (idx.y - centers[center_addr + N_FEATURES + 1]) * spacing_y; pd.x = (idx.x - centers[center_addr + N_FEATURES + 2]) * spacing_x; float position_diff = pd.z * pd.z + pd.y * pd.y + pd.x * pd.x; float dist = color_diff + position_diff / (SS); return dist; } __global__ void init_clusters(const float* data, float* centers ) { const long linear_cidx = threadIdx.x + (blockIdx.x * blockDim.x); if ( linear_cidx >= N_CLUSTERS ) { return; } // calculating the (0,0,0) index of each superpixel block // using linear to cartesian index transformation int3 cidx; int plane_size = sp_grid_y * sp_grid_x; cidx.z = linear_cidx / plane_size; int plane_idx = linear_cidx % plane_size; cidx.y = plane_idx / sp_grid_x; cidx.x = plane_idx % sp_grid_x; // centering index into middle of suprepixel block cidx.z = cidx.z * sp_shape_z + sp_shape_z / 2; cidx.y = cidx.y * sp_shape_y + sp_shape_y / 2; cidx.x = cidx.x * sp_shape_x + sp_shape_x / 2; //saving cluster center positions // note: the color is not initialized, but is kept at zero. const int c_stride = N_FEATURES + 3; centers[linear_cidx * c_stride + N_FEATURES + 0] = cidx.z; centers[linear_cidx * c_stride + N_FEATURES + 1] = cidx.y; centers[linear_cidx * c_stride + N_FEATURES + 2] = cidx.x; } __global__ void expectation(const float* data, const float* centers, unsigned int* labels ) { int3 idx; idx.z = threadIdx.x + (blockIdx.x * blockDim.x); idx.y = threadIdx.y + (blockIdx.y * blockDim.y); idx.x = threadIdx.z + (blockIdx.z * blockDim.z); if ( idx.x >= im_shape_x || idx.y >= im_shape_y || idx.z >= im_shape_z ) { return; } long z_stride = im_shape_x * im_shape_y; long y_stride = im_shape_x; const long linear_idx = idx.z * z_stride + idx.y * y_stride + idx.x; const long pixel_addr = linear_idx * N_FEATURES; float pixel[N_FEATURES]; for ( int w = 0; w < N_FEATURES; w++ ) { pixel[w] = data[pixel_addr + w]; } int4 cidx, iter_cidx; long iter_linear_cidx; long closest_linear_cidx = 0; // approx center grid positoin cidx.z = __max(0, __min(idx.z / sp_shape_z, sp_grid_z - 1)); cidx.y = __max(0, __min(idx.y / sp_shape_y, sp_grid_y - 1)); cidx.x = __max(0, __min(idx.x / sp_shape_x, sp_grid_x - 1)); const int c_stride = N_FEATURES + 3; float minimum_distance = DLIMIT; const int R = 2; for ( int k = -R; k <= R; k++ ) { for ( int j = -R; j <= R; j++ ) { for ( int i = -R; i <= R; i++ ) { iter_cidx.z = cidx.z + k; iter_cidx.y = cidx.y + j; iter_cidx.x = cidx.x + i; if ( iter_cidx.y < 0 || iter_cidx.y >= sp_grid_y || iter_cidx.z < 0 || iter_cidx.z >= sp_grid_z || iter_cidx.x < 0 || iter_cidx.x >= sp_grid_x ) {continue;} iter_linear_cidx = iter_cidx.z * sp_grid_y * sp_grid_x + iter_cidx.y * sp_grid_x + iter_cidx.x; long iter_center_addr = iter_linear_cidx * c_stride; if ( centers[iter_center_addr] == DLIMIT ) { continue; } float dist = slic_distance(idx, pixel, iter_center_addr, centers); // Wrapup if ( dist < minimum_distance ) { minimum_distance = dist; closest_linear_cidx = iter_linear_cidx; } } } } labels[linear_idx] = closest_linear_cidx + 1; } __global__ void maximization(const float* data, const unsigned int* labels, float* centers ) { const long linear_cidx = threadIdx.x + (blockIdx.x * blockDim.x); const int c_stride = N_FEATURES + 3; const long center_addr = linear_cidx * c_stride; if ( linear_cidx >= N_CLUSTERS ) { return; } int3 cidx; cidx.z = (int) centers[center_addr + N_FEATURES + 0]; cidx.y = (int) centers[center_addr + N_FEATURES + 1]; cidx.x = (int) centers[center_addr + N_FEATURES + 2]; float ratio = 2.0f; int3 from; from.z = __max(cidx.z - sp_shape_z * ratio, 0); from.y = __max(cidx.y - sp_shape_y * ratio, 0); from.x = __max(cidx.x - sp_shape_x * ratio, 0); int3 to; to.z = __min(cidx.z + sp_shape_z * ratio, im_shape_z); to.y = __min(cidx.y + sp_shape_y * ratio, im_shape_y); to.x = __min(cidx.x + sp_shape_x * ratio, im_shape_x); float f[c_stride]; for ( int k = 0; k < c_stride; k++ ) {f[k] = 0;} long z_stride = im_shape_x * im_shape_y; long y_stride = im_shape_x; long count = 0; int3 p; for ( p.z = from.z; p.z < to.z; p.z++ ) { for ( p.y = from.y; p.y < to.y; p.y++ ) { for ( p.x = from.x; p.x < to.x; p.x++ ) { long linear_idx = p.z * z_stride + p.y * y_stride + p.x; long pixel_addr = linear_idx * N_FEATURES; if ( labels[linear_idx] == linear_cidx + 1 ) { for ( int w = 0; w < N_FEATURES; w++ ) { f[w] += data[pixel_addr + w]; } f[N_FEATURES + 0] += p.z; f[N_FEATURES + 1] += p.y; f[N_FEATURES + 2] += p.x; count += 1; } } } } if ( count > 0 ) { for ( int w = 0; w < c_stride; w++ ) { centers[center_addr + w] = f[w] / count; } } else { centers[center_addr] = DLIMIT; } }
2cf7582314423d6d72062862d337dc65738054eb.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/HIPBlas.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/native/hip/vol2col.cuh> namespace at { namespace native { namespace { static inline void slow_conv_transpose3d_shape_check( const Tensor& input, const Tensor& grad_output, const Tensor& weight, const Tensor& bias, int kernel_depth, int kernel_width, int kernel_height, int stride_depth, int stride_width, int stride_height, int padding_depth, int padding_width, int padding_height, int dilation_depth, int dilation_width, int dilation_height, int output_padding_depth, int output_padding_width, int output_padding_height, int weight_nullable) { TORCH_CHECK( input.numel() != 0 && (input.dim() == 4 || input.dim() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input, but got: ", input.sizes()); TORCH_CHECK( stride_depth > 0 && stride_width > 0 && stride_height > 0, "stride should be greater than zero, but got stride_depth: ", stride_depth, " stride_height: ", stride_height, " stride_width: ", stride_width); TORCH_CHECK( dilation_depth > 0 && dilation_width > 0 && dilation_height > 0, "dilation should be greater than zero, but got dilation_depth: ", dilation_depth, ", dilation_height: ", dilation_height, ", dilation_width: ", dilation_width); TORCH_CHECK( (output_padding_depth < stride_depth || output_padding_depth < dilation_depth) && (output_padding_width < stride_width || output_padding_width < dilation_width) && (output_padding_height < stride_height || output_padding_height < dilation_height), "output padding must be smaller than either stride or dilation,", " but got output_padding_depth: ", output_padding_depth, " output_padding_height: ", output_padding_height, " output_padding_width: ", output_padding_width, " stride_depth: ", stride_depth, " stride_height: ", stride_height, " stride_width: ", stride_width, " dilation_depth: ", dilation_depth, " dilation_height: ", dilation_height, " dilation_width: ", dilation_width); // number of input & output planes and kernel size is indirectly defined by // the weight tensor if (weight.defined()) { TORCH_CHECK( weight.numel() != 0 && weight.dim() == 5, "non-empty 5D (n_output_plane x n_input_plane ", "x kernel_depth x kernel_height x kernel_width) tensor ", "expected for weight, but got: ", weight.sizes()); if (bias.defined()) { check_dim_size(bias, 1, 0, weight.size(1)); } } else if (!weight_nullable) { AT_ERROR("weight tensor is expected to be non-nullable"); } int ndim = input.dim(); int dimf = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (ndim == 5) { dimf++; dimd++; dimh++; dimw++; } if (weight.defined()) { const int64_t n_input_plane = weight.size(0); check_dim_size(input, ndim, dimf, n_input_plane); } int64_t input_width = input.size(dimw); int64_t input_height = input.size(dimh); int64_t input_depth = input.size(dimd); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; if (output_depth < 1 || output_width < 1 || output_height < 1) { AT_ERROR( "Given input size per channel: (", input_depth, " x ", input_height, " x ", input_width, "). Calculated output size per channel: (", output_depth, " x ", output_height, " x ", output_width, "). Output size is too small"); } if (grad_output.defined()) { if (weight.defined()) { const int64_t n_output_plane = weight.size(1); check_dim_size(grad_output, ndim, dimf, n_output_plane); } else if (bias.defined()) { const int64_t n_output_plane = bias.size(0); check_dim_size(grad_output, ndim, dimf, n_output_plane); } check_dim_size(grad_output, ndim, dimd, output_depth); check_dim_size(grad_output, ndim, dimh, output_height); check_dim_size(grad_output, ndim, dimw, output_width); } } void slow_conv_transpose3d_out_cuda_template( Tensor& output, const Tensor& input_, const Tensor& weight_, IntArrayRef kernel_size, const Tensor& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor& finput, Tensor& fgrad_input) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; Tensor columns = finput; Tensor ones = fgrad_input; int n_input_plane = weight_.size(0); int n_output_plane = weight_.size(1); TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2}, weight_arg{weight_, "weight", 3}, bias_arg{bias, "bias", 4}, columns_arg{columns, "columns", 5}, ones_arg{ones, "ones", 6}; checkAllSameGPU( "slow_conv_transpose3d_out_cuda", {input_arg, output_arg, weight_arg, bias_arg, columns_arg, ones_arg}); slow_conv_transpose3d_shape_check( input_, Tensor(), weight_, bias, kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 0); TORCH_CHECK( !bias.defined() || bias.is_contiguous(), "bias tensor has to be contiguous"); Tensor input = input_.contiguous(); Tensor weight = weight_.contiguous(); int is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Resize output output.resize_( {batch_size, n_output_plane, output_depth, output_height, output_width}); // Resize temporary columns columns.resize_({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}); // Define a buffer of ones, for bias accumulation // Note: this buffer can be shared with other modules, it only ever gets // increased, and always contains ones. if (ones.dim() != 3 || ones.size(0) * ones.size(1) * ones.size(2) < output_depth * output_height * output_width) { // Resize plane and fill with ones... ones.resize_({output_depth, output_height, output_width}); ones.fill_(1); } AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "slow_conv_transpose3d_out_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; // Helpers Tensor input_n; Tensor output_n; // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per output: input_n = input.select(0, elt); output_n = output.select(0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4); int64_t n = columns.size(1); int64_t k = weight.size(0); // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) at::cuda::blas::gemm<scalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), 'n', 't', n, m, k, static_cast<scalar_t>(1), input_n.data_ptr<scalar_t>(), n, weight.data_ptr<scalar_t>(), m, static_cast<scalar_t>(0), columns.data_ptr<scalar_t>(), n); // Unpack columns back into input: at::native::col2vol<scalar_t, accscalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), columns.data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, output_n.data_ptr<scalar_t>()); // Do Bias after: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m_ = n_output_plane; int64_t n_ = output_depth * output_height * output_width; int64_t k_ = 1; // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) if (bias.defined()) { at::cuda::blas::gemm<scalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), 't', 'n', n_, m_, k_, static_cast<scalar_t>(1), ones.data_ptr<scalar_t>(), k_, bias.data_ptr<scalar_t>(), k_, static_cast<scalar_t>(1), output_n.data_ptr<scalar_t>(), n_); } } // Resize output if (is_batch) { output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {n_input_plane, input_depth, input_height, input_width}); } }); } void slow_conv_transpose3d_backward_out_cuda_template( const Tensor& input_, const Tensor& grad_output_, Tensor& grad_input, const Tensor& weight_, const Tensor& finput, const Tensor& fgrad_input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); Tensor grad_columns = finput; int n_input_plane = weight_.size(0); int n_output_plane = weight_.size(1); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; TensorArg input_arg{input_, "input", 1}, grad_output_arg{grad_output_, "grad_output", 2}, weight_arg{weight_, "weight", 3}, grad_columns_arg{grad_columns, "grad_columns", 4}, grad_input_arg{grad_input, "grad_input", 5}; checkAllSameGPU( "slow_conv_transpose3d_backward_out_cuda", {input_arg, grad_output_arg, weight_arg, grad_columns_arg, grad_input_arg}); slow_conv_transpose3d_shape_check( input_, grad_output_, weight_, Tensor(), kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 0); Tensor input = input_.contiguous(); Tensor grad_output = grad_output_.contiguous(); Tensor weight = weight_.contiguous(); bool is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); grad_output.resize_({1, grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Resize output grad_input.resize_( {batch_size, n_input_plane, input_depth, input_height, input_width}); // Resize temporary columns grad_columns.resize_( {n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "slow_conv_transpose3d_backward_out_cuda", [&] { // Helpers Tensor grad_input_n; Tensor grad_output_n; // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per sample: grad_input_n = grad_input.select(0, elt); grad_output_n = grad_output.select(0, elt); // Extract columns: at::native::vol2col<scalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_output_n.data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, grad_columns.data_ptr<scalar_t>()); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = weight.size(0); int64_t n = grad_columns.size(1); int64_t k = weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4); // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) at::cuda::blas::gemm<scalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), 'n', 'n', n, m, k, static_cast<scalar_t>(1), grad_columns.data_ptr<scalar_t>(), n, weight.data_ptr<scalar_t>(), k, static_cast<scalar_t>(0), grad_input_n.data_ptr<scalar_t>(), n); } // Resize output if (is_batch) { grad_output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {n_input_plane, input_depth, input_height, input_width}); grad_input.resize_( {n_input_plane, input_depth, input_height, input_width}); } }); } void slow_conv_transpose3d_acc_grad_parameters_cuda( const Tensor& input_, const Tensor& grad_output_, Tensor& grad_weight, Tensor& grad_bias, const Tensor& finput, const Tensor& fgrad_input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, int scale_) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; Tensor columns = finput; Tensor ones = fgrad_input; TensorArg input_arg{input_, "input", 1}, grad_output_arg{grad_output_, "grad_output", 2}, grad_weight_arg{grad_weight, "grad_weight", 3}, grad_bias_arg{grad_bias, "grad_bias", 4}, columns_arg{columns, "columns", 5}, ones_arg{ones, "ones", 6}; checkAllSameGPU( "slow_conv_transpose3d_acc_grad_parameters_cuda", {input_arg, grad_output_arg, grad_weight_arg, grad_bias_arg, columns_arg, ones_arg}); slow_conv_transpose3d_shape_check( input_, grad_output_, grad_weight, grad_bias, kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 1); int n_output_plane; if (grad_weight.defined()) { n_output_plane = grad_weight.size(1); } else if (grad_bias.defined()) { n_output_plane = grad_bias.size(0); } else { return; } if (grad_weight.defined()) { TORCH_CHECK( grad_weight.is_contiguous(), "grad_weight needs to be contiguous"); } if (grad_bias.defined()) { TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous"); TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous"); } Tensor input = input_.contiguous(); Tensor grad_output = grad_output_.contiguous(); bool is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); grad_output.resize_({1, grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Define a buffer of ones, for bias accumulation if (ones.dim() != 3 || ones.size(0) * ones.size(1) * ones.size(2) < output_depth * output_height * output_width) { // Resize plane and fill with ones... ones.resize_({output_depth, output_height, output_width}); ones.fill_(1); } // Resize temporary columns columns.resize_({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "slow_conv_transpose3d_acc_grad_parameters_cuda", [&] { // Helpers Tensor input_n; Tensor grad_output_n; scalar_t scale = static_cast<scalar_t>(scale_); // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per output: grad_output_n = grad_output.select(0, elt); // Do Weight: if (grad_weight.defined()) { // Matrix mulitply per output: input_n = input.select(0, elt); // Extract columns: at::native::vol2col<scalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_output_n.data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, columns.data_ptr<scalar_t>()); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t n = columns.size(0); // n_output_plane * kt * kh * kw int64_t m = input_n.size(0); // n_input_plane int64_t k = columns.size(1); // input_height * input_width // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) at::cuda::blas::gemm<scalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), 't', 'n', n, m, k, scale, columns.data_ptr<scalar_t>(), k, input_n.data_ptr<scalar_t>(), k, static_cast<scalar_t>(1), grad_weight.data_ptr<scalar_t>(), n); } // Do Bias: if (grad_bias.defined()) { // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m_ = n_output_plane; int64_t k_ = output_depth * output_height * output_width; // Do GEMV (note: this is a bit confusing because gemv assumes // column-major matrices) at::cuda::blas::gemv<scalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), 't', k_, m_, scale, grad_output_n.data_ptr<scalar_t>(), k_, ones.data_ptr<scalar_t>(), 1, static_cast<scalar_t>(1), grad_bias.data_ptr<scalar_t>(), 1); } } // Resize if (is_batch) { grad_output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {input.size(1), input_depth, input_height, input_width}); } }); } } // namespace Tensor& slow_conv_transpose3d_out_cuda( Tensor& output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const Tensor& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { Tensor finput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor fgrad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); slow_conv_transpose3d_out_cuda_template( output, input, weight, kernel_size, bias, stride, padding, output_padding, dilation, finput, fgrad); return output; } Tensor slow_conv_transpose3d_cuda( const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const Tensor& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor finput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor fgrad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); slow_conv_transpose3d_out_cuda_template( output, input, weight, kernel_size, bias, stride, padding, output_padding, dilation, finput, fgrad); return output; } std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose3d_backward_out_cuda( Tensor& grad_input, Tensor& grad_weight, Tensor& grad_bias, const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor& finput, const Tensor& fgrad) { if (grad_input.defined()) { slow_conv_transpose3d_backward_out_cuda_template( input, grad_output, grad_input, weight, finput, fgrad, kernel_size, stride, padding, output_padding, dilation); } if (grad_weight.defined()) { grad_weight.resize_(weight.sizes()); grad_weight.zero_(); } if (grad_bias.defined()) { grad_bias.resize_({weight.size(1)}); grad_bias.zero_(); } if (grad_weight.defined() || grad_bias.defined()) { slow_conv_transpose3d_acc_grad_parameters_cuda( input, grad_output, grad_weight, grad_bias, finput, fgrad, kernel_size, stride, padding, output_padding, dilation, 1); } return std::tuple<Tensor&, Tensor&, Tensor&>( grad_input, grad_weight, grad_bias); } std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose3d_backward_cuda( const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor& finput, const Tensor& fgrad, std::array<bool, 3> output_mask) { Tensor grad_input; Tensor grad_weight; Tensor grad_bias; if (output_mask[0]) { grad_input = at::empty({0}, grad_output.options()); } else { grad_input = Tensor(); } if (output_mask[1]) { grad_weight = at::empty({0}, grad_output.options()); } else { grad_weight = Tensor(); } if (output_mask[2]) { grad_bias = at::empty({0}, grad_output.options()); } else { grad_bias = Tensor(); } if (grad_input.defined()) { slow_conv_transpose3d_backward_out_cuda_template( input, grad_output, grad_input, weight, finput, fgrad, kernel_size, stride, padding, output_padding, dilation); } if (grad_weight.defined()) { grad_weight.resize_(weight.sizes()); grad_weight.zero_(); } if (grad_bias.defined()) { grad_bias.resize_({weight.size(1)}); grad_bias.zero_(); } if (grad_weight.defined() || grad_bias.defined()) { slow_conv_transpose3d_acc_grad_parameters_cuda( input, grad_output, grad_weight, grad_bias, finput, fgrad, kernel_size, stride, padding, output_padding, dilation, 1); } return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias); } } // namespace native } // namespace at
2cf7582314423d6d72062862d337dc65738054eb.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/CUDABlas.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/native/cuda/vol2col.cuh> namespace at { namespace native { namespace { static inline void slow_conv_transpose3d_shape_check( const Tensor& input, const Tensor& grad_output, const Tensor& weight, const Tensor& bias, int kernel_depth, int kernel_width, int kernel_height, int stride_depth, int stride_width, int stride_height, int padding_depth, int padding_width, int padding_height, int dilation_depth, int dilation_width, int dilation_height, int output_padding_depth, int output_padding_width, int output_padding_height, int weight_nullable) { TORCH_CHECK( input.numel() != 0 && (input.dim() == 4 || input.dim() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input, but got: ", input.sizes()); TORCH_CHECK( stride_depth > 0 && stride_width > 0 && stride_height > 0, "stride should be greater than zero, but got stride_depth: ", stride_depth, " stride_height: ", stride_height, " stride_width: ", stride_width); TORCH_CHECK( dilation_depth > 0 && dilation_width > 0 && dilation_height > 0, "dilation should be greater than zero, but got dilation_depth: ", dilation_depth, ", dilation_height: ", dilation_height, ", dilation_width: ", dilation_width); TORCH_CHECK( (output_padding_depth < stride_depth || output_padding_depth < dilation_depth) && (output_padding_width < stride_width || output_padding_width < dilation_width) && (output_padding_height < stride_height || output_padding_height < dilation_height), "output padding must be smaller than either stride or dilation,", " but got output_padding_depth: ", output_padding_depth, " output_padding_height: ", output_padding_height, " output_padding_width: ", output_padding_width, " stride_depth: ", stride_depth, " stride_height: ", stride_height, " stride_width: ", stride_width, " dilation_depth: ", dilation_depth, " dilation_height: ", dilation_height, " dilation_width: ", dilation_width); // number of input & output planes and kernel size is indirectly defined by // the weight tensor if (weight.defined()) { TORCH_CHECK( weight.numel() != 0 && weight.dim() == 5, "non-empty 5D (n_output_plane x n_input_plane ", "x kernel_depth x kernel_height x kernel_width) tensor ", "expected for weight, but got: ", weight.sizes()); if (bias.defined()) { check_dim_size(bias, 1, 0, weight.size(1)); } } else if (!weight_nullable) { AT_ERROR("weight tensor is expected to be non-nullable"); } int ndim = input.dim(); int dimf = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (ndim == 5) { dimf++; dimd++; dimh++; dimw++; } if (weight.defined()) { const int64_t n_input_plane = weight.size(0); check_dim_size(input, ndim, dimf, n_input_plane); } int64_t input_width = input.size(dimw); int64_t input_height = input.size(dimh); int64_t input_depth = input.size(dimd); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; if (output_depth < 1 || output_width < 1 || output_height < 1) { AT_ERROR( "Given input size per channel: (", input_depth, " x ", input_height, " x ", input_width, "). Calculated output size per channel: (", output_depth, " x ", output_height, " x ", output_width, "). Output size is too small"); } if (grad_output.defined()) { if (weight.defined()) { const int64_t n_output_plane = weight.size(1); check_dim_size(grad_output, ndim, dimf, n_output_plane); } else if (bias.defined()) { const int64_t n_output_plane = bias.size(0); check_dim_size(grad_output, ndim, dimf, n_output_plane); } check_dim_size(grad_output, ndim, dimd, output_depth); check_dim_size(grad_output, ndim, dimh, output_height); check_dim_size(grad_output, ndim, dimw, output_width); } } void slow_conv_transpose3d_out_cuda_template( Tensor& output, const Tensor& input_, const Tensor& weight_, IntArrayRef kernel_size, const Tensor& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor& finput, Tensor& fgrad_input) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; Tensor columns = finput; Tensor ones = fgrad_input; int n_input_plane = weight_.size(0); int n_output_plane = weight_.size(1); TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2}, weight_arg{weight_, "weight", 3}, bias_arg{bias, "bias", 4}, columns_arg{columns, "columns", 5}, ones_arg{ones, "ones", 6}; checkAllSameGPU( "slow_conv_transpose3d_out_cuda", {input_arg, output_arg, weight_arg, bias_arg, columns_arg, ones_arg}); slow_conv_transpose3d_shape_check( input_, Tensor(), weight_, bias, kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 0); TORCH_CHECK( !bias.defined() || bias.is_contiguous(), "bias tensor has to be contiguous"); Tensor input = input_.contiguous(); Tensor weight = weight_.contiguous(); int is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Resize output output.resize_( {batch_size, n_output_plane, output_depth, output_height, output_width}); // Resize temporary columns columns.resize_({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}); // Define a buffer of ones, for bias accumulation // Note: this buffer can be shared with other modules, it only ever gets // increased, and always contains ones. if (ones.dim() != 3 || ones.size(0) * ones.size(1) * ones.size(2) < output_depth * output_height * output_width) { // Resize plane and fill with ones... ones.resize_({output_depth, output_height, output_width}); ones.fill_(1); } AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "slow_conv_transpose3d_out_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; // Helpers Tensor input_n; Tensor output_n; // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per output: input_n = input.select(0, elt); output_n = output.select(0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4); int64_t n = columns.size(1); int64_t k = weight.size(0); // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) at::cuda::blas::gemm<scalar_t>( at::cuda::getCurrentCUDAStream(), 'n', 't', n, m, k, static_cast<scalar_t>(1), input_n.data_ptr<scalar_t>(), n, weight.data_ptr<scalar_t>(), m, static_cast<scalar_t>(0), columns.data_ptr<scalar_t>(), n); // Unpack columns back into input: at::native::col2vol<scalar_t, accscalar_t>( at::cuda::getCurrentCUDAStream(), columns.data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, output_n.data_ptr<scalar_t>()); // Do Bias after: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m_ = n_output_plane; int64_t n_ = output_depth * output_height * output_width; int64_t k_ = 1; // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) if (bias.defined()) { at::cuda::blas::gemm<scalar_t>( at::cuda::getCurrentCUDAStream(), 't', 'n', n_, m_, k_, static_cast<scalar_t>(1), ones.data_ptr<scalar_t>(), k_, bias.data_ptr<scalar_t>(), k_, static_cast<scalar_t>(1), output_n.data_ptr<scalar_t>(), n_); } } // Resize output if (is_batch) { output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {n_input_plane, input_depth, input_height, input_width}); } }); } void slow_conv_transpose3d_backward_out_cuda_template( const Tensor& input_, const Tensor& grad_output_, Tensor& grad_input, const Tensor& weight_, const Tensor& finput, const Tensor& fgrad_input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); Tensor grad_columns = finput; int n_input_plane = weight_.size(0); int n_output_plane = weight_.size(1); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; TensorArg input_arg{input_, "input", 1}, grad_output_arg{grad_output_, "grad_output", 2}, weight_arg{weight_, "weight", 3}, grad_columns_arg{grad_columns, "grad_columns", 4}, grad_input_arg{grad_input, "grad_input", 5}; checkAllSameGPU( "slow_conv_transpose3d_backward_out_cuda", {input_arg, grad_output_arg, weight_arg, grad_columns_arg, grad_input_arg}); slow_conv_transpose3d_shape_check( input_, grad_output_, weight_, Tensor(), kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 0); Tensor input = input_.contiguous(); Tensor grad_output = grad_output_.contiguous(); Tensor weight = weight_.contiguous(); bool is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); grad_output.resize_({1, grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Resize output grad_input.resize_( {batch_size, n_input_plane, input_depth, input_height, input_width}); // Resize temporary columns grad_columns.resize_( {n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "slow_conv_transpose3d_backward_out_cuda", [&] { // Helpers Tensor grad_input_n; Tensor grad_output_n; // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per sample: grad_input_n = grad_input.select(0, elt); grad_output_n = grad_output.select(0, elt); // Extract columns: at::native::vol2col<scalar_t>( at::cuda::getCurrentCUDAStream(), grad_output_n.data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, grad_columns.data_ptr<scalar_t>()); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = weight.size(0); int64_t n = grad_columns.size(1); int64_t k = weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4); // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) at::cuda::blas::gemm<scalar_t>( at::cuda::getCurrentCUDAStream(), 'n', 'n', n, m, k, static_cast<scalar_t>(1), grad_columns.data_ptr<scalar_t>(), n, weight.data_ptr<scalar_t>(), k, static_cast<scalar_t>(0), grad_input_n.data_ptr<scalar_t>(), n); } // Resize output if (is_batch) { grad_output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {n_input_plane, input_depth, input_height, input_width}); grad_input.resize_( {n_input_plane, input_depth, input_height, input_width}); } }); } void slow_conv_transpose3d_acc_grad_parameters_cuda( const Tensor& input_, const Tensor& grad_output_, Tensor& grad_weight, Tensor& grad_bias, const Tensor& finput, const Tensor& fgrad_input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, int scale_) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; Tensor columns = finput; Tensor ones = fgrad_input; TensorArg input_arg{input_, "input", 1}, grad_output_arg{grad_output_, "grad_output", 2}, grad_weight_arg{grad_weight, "grad_weight", 3}, grad_bias_arg{grad_bias, "grad_bias", 4}, columns_arg{columns, "columns", 5}, ones_arg{ones, "ones", 6}; checkAllSameGPU( "slow_conv_transpose3d_acc_grad_parameters_cuda", {input_arg, grad_output_arg, grad_weight_arg, grad_bias_arg, columns_arg, ones_arg}); slow_conv_transpose3d_shape_check( input_, grad_output_, grad_weight, grad_bias, kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 1); int n_output_plane; if (grad_weight.defined()) { n_output_plane = grad_weight.size(1); } else if (grad_bias.defined()) { n_output_plane = grad_bias.size(0); } else { return; } if (grad_weight.defined()) { TORCH_CHECK( grad_weight.is_contiguous(), "grad_weight needs to be contiguous"); } if (grad_bias.defined()) { TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous"); TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous"); } Tensor input = input_.contiguous(); Tensor grad_output = grad_output_.contiguous(); bool is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); grad_output.resize_({1, grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Define a buffer of ones, for bias accumulation if (ones.dim() != 3 || ones.size(0) * ones.size(1) * ones.size(2) < output_depth * output_height * output_width) { // Resize plane and fill with ones... ones.resize_({output_depth, output_height, output_width}); ones.fill_(1); } // Resize temporary columns columns.resize_({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "slow_conv_transpose3d_acc_grad_parameters_cuda", [&] { // Helpers Tensor input_n; Tensor grad_output_n; scalar_t scale = static_cast<scalar_t>(scale_); // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per output: grad_output_n = grad_output.select(0, elt); // Do Weight: if (grad_weight.defined()) { // Matrix mulitply per output: input_n = input.select(0, elt); // Extract columns: at::native::vol2col<scalar_t>( at::cuda::getCurrentCUDAStream(), grad_output_n.data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, columns.data_ptr<scalar_t>()); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t n = columns.size(0); // n_output_plane * kt * kh * kw int64_t m = input_n.size(0); // n_input_plane int64_t k = columns.size(1); // input_height * input_width // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) at::cuda::blas::gemm<scalar_t>( at::cuda::getCurrentCUDAStream(), 't', 'n', n, m, k, scale, columns.data_ptr<scalar_t>(), k, input_n.data_ptr<scalar_t>(), k, static_cast<scalar_t>(1), grad_weight.data_ptr<scalar_t>(), n); } // Do Bias: if (grad_bias.defined()) { // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m_ = n_output_plane; int64_t k_ = output_depth * output_height * output_width; // Do GEMV (note: this is a bit confusing because gemv assumes // column-major matrices) at::cuda::blas::gemv<scalar_t>( at::cuda::getCurrentCUDAStream(), 't', k_, m_, scale, grad_output_n.data_ptr<scalar_t>(), k_, ones.data_ptr<scalar_t>(), 1, static_cast<scalar_t>(1), grad_bias.data_ptr<scalar_t>(), 1); } } // Resize if (is_batch) { grad_output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {input.size(1), input_depth, input_height, input_width}); } }); } } // namespace Tensor& slow_conv_transpose3d_out_cuda( Tensor& output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const Tensor& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { Tensor finput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor fgrad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); slow_conv_transpose3d_out_cuda_template( output, input, weight, kernel_size, bias, stride, padding, output_padding, dilation, finput, fgrad); return output; } Tensor slow_conv_transpose3d_cuda( const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const Tensor& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor finput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor fgrad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); slow_conv_transpose3d_out_cuda_template( output, input, weight, kernel_size, bias, stride, padding, output_padding, dilation, finput, fgrad); return output; } std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose3d_backward_out_cuda( Tensor& grad_input, Tensor& grad_weight, Tensor& grad_bias, const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor& finput, const Tensor& fgrad) { if (grad_input.defined()) { slow_conv_transpose3d_backward_out_cuda_template( input, grad_output, grad_input, weight, finput, fgrad, kernel_size, stride, padding, output_padding, dilation); } if (grad_weight.defined()) { grad_weight.resize_(weight.sizes()); grad_weight.zero_(); } if (grad_bias.defined()) { grad_bias.resize_({weight.size(1)}); grad_bias.zero_(); } if (grad_weight.defined() || grad_bias.defined()) { slow_conv_transpose3d_acc_grad_parameters_cuda( input, grad_output, grad_weight, grad_bias, finput, fgrad, kernel_size, stride, padding, output_padding, dilation, 1); } return std::tuple<Tensor&, Tensor&, Tensor&>( grad_input, grad_weight, grad_bias); } std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose3d_backward_cuda( const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor& finput, const Tensor& fgrad, std::array<bool, 3> output_mask) { Tensor grad_input; Tensor grad_weight; Tensor grad_bias; if (output_mask[0]) { grad_input = at::empty({0}, grad_output.options()); } else { grad_input = Tensor(); } if (output_mask[1]) { grad_weight = at::empty({0}, grad_output.options()); } else { grad_weight = Tensor(); } if (output_mask[2]) { grad_bias = at::empty({0}, grad_output.options()); } else { grad_bias = Tensor(); } if (grad_input.defined()) { slow_conv_transpose3d_backward_out_cuda_template( input, grad_output, grad_input, weight, finput, fgrad, kernel_size, stride, padding, output_padding, dilation); } if (grad_weight.defined()) { grad_weight.resize_(weight.sizes()); grad_weight.zero_(); } if (grad_bias.defined()) { grad_bias.resize_({weight.size(1)}); grad_bias.zero_(); } if (grad_weight.defined() || grad_bias.defined()) { slow_conv_transpose3d_acc_grad_parameters_cuda( input, grad_output, grad_weight, grad_bias, finput, fgrad, kernel_size, stride, padding, output_padding, dilation, 1); } return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias); } } // namespace native } // namespace at
e2c95a74753ca7822273fde508bc4f5b2d41b70f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iomanip> #include <hiprand/hiprand_kernel.h> #define RUNS_PER_THREAD 1000000 #include <time.h> #include <sys/time.h> #ifdef __MACH__ #include <mach/clock.h> #include <mach/mach.h> #endif __host__ timespec GetTime(); __host__ double InSeconds(const timespec ts); __global__ void ThrowDice(const long seed, int *sum_device, hiprandState_t* rng_states) { int index = threadIdx.x + blockDim.x * blockIdx.x; int *sum = &sum_device[index]; hiprandState_t *rng_state = &rng_states[index]; hiprand_init(seed,index,0,rng_state); int sum2 = 0; for (int i=0;i<RUNS_PER_THREAD;i++) { float u1 = hiprand_uniform(rng_state); float u2 = hiprand_uniform(rng_state); float r = sqrt(u1*u1 + u2*u2); sum2 += 1 - (int)r; } *sum = sum2; } __host__ double FindPi() { int threads = 65536 / 2; int threadsPerBlock = 256; int blocksPerGrid = (threads - 1) / threadsPerBlock + 1; int *sum_host = new int[threads]; for (int i=0;i<threads;i++) sum_host[i] = 0; int *sum_device = 0; hiprandState_t *rng_states = 0; hipMalloc((void**)&sum_device,sizeof(int)*threads); hipMalloc((void**)&rng_states,sizeof(hiprandState_t)*threads); long int seed = (long int)InSeconds(GetTime()); hipLaunchKernelGGL(( ThrowDice), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, seed,sum_device,rng_states); hipMemcpy((void*)sum_host,(void*)sum_device,threads*sizeof(int),hipMemcpyDeviceToHost); hipFree(sum_device); hipFree(rng_states); long int sum = 0; for (int i=0;i<threads;i++) { sum += sum_host[i]; } double runs = ((double)threads) * RUNS_PER_THREAD; double result = 4.0 * (double)sum / runs; printf("%li / %e hit inside the circle.\n",sum,runs); printf("Result after %.2e runs: %.10f\n",runs,result); return result; } __host__ main() { int iterations = 1; double final = 0; for (int i=0;i<iterations;i++) { final += FindPi(); } final /= (double)iterations; if (iterations > 1) printf("Sum of results: %.10f\n",final); return 0; } __host__ timespec GetTime() { struct timespec ts; #ifdef __MACH__ // OS X does not have clock_gettime, use clock_get_time clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts.tv_sec = mts.tv_sec; ts.tv_nsec = mts.tv_nsec; #else clock_gettime(CLOCK_REALTIME, &ts); #endif return ts; } __host__ double InSeconds(const timespec ts) { double s(ts.tv_sec); return s + (1e-9*(double)ts.tv_nsec); }
e2c95a74753ca7822273fde508bc4f5b2d41b70f.cu
#include <stdio.h> #include <iomanip> #include <curand_kernel.h> #define RUNS_PER_THREAD 1000000 #include <time.h> #include <sys/time.h> #ifdef __MACH__ #include <mach/clock.h> #include <mach/mach.h> #endif __host__ timespec GetTime(); __host__ double InSeconds(const timespec ts); __global__ void ThrowDice(const long seed, int *sum_device, curandState* rng_states) { int index = threadIdx.x + blockDim.x * blockIdx.x; int *sum = &sum_device[index]; curandState *rng_state = &rng_states[index]; curand_init(seed,index,0,rng_state); int sum2 = 0; for (int i=0;i<RUNS_PER_THREAD;i++) { float u1 = curand_uniform(rng_state); float u2 = curand_uniform(rng_state); float r = sqrt(u1*u1 + u2*u2); sum2 += 1 - (int)r; } *sum = sum2; } __host__ double FindPi() { int threads = 65536 / 2; int threadsPerBlock = 256; int blocksPerGrid = (threads - 1) / threadsPerBlock + 1; int *sum_host = new int[threads]; for (int i=0;i<threads;i++) sum_host[i] = 0; int *sum_device = 0; curandState *rng_states = 0; cudaMalloc((void**)&sum_device,sizeof(int)*threads); cudaMalloc((void**)&rng_states,sizeof(curandState)*threads); long int seed = (long int)InSeconds(GetTime()); ThrowDice<<<blocksPerGrid,threadsPerBlock>>>(seed,sum_device,rng_states); cudaMemcpy((void*)sum_host,(void*)sum_device,threads*sizeof(int),cudaMemcpyDeviceToHost); cudaFree(sum_device); cudaFree(rng_states); long int sum = 0; for (int i=0;i<threads;i++) { sum += sum_host[i]; } double runs = ((double)threads) * RUNS_PER_THREAD; double result = 4.0 * (double)sum / runs; printf("%li / %e hit inside the circle.\n",sum,runs); printf("Result after %.2e runs: %.10f\n",runs,result); return result; } __host__ main() { int iterations = 1; double final = 0; for (int i=0;i<iterations;i++) { final += FindPi(); } final /= (double)iterations; if (iterations > 1) printf("Sum of results: %.10f\n",final); return 0; } __host__ timespec GetTime() { struct timespec ts; #ifdef __MACH__ // OS X does not have clock_gettime, use clock_get_time clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts.tv_sec = mts.tv_sec; ts.tv_nsec = mts.tv_nsec; #else clock_gettime(CLOCK_REALTIME, &ts); #endif return ts; } __host__ double InSeconds(const timespec ts) { double s(ts.tv_sec); return s + (1e-9*(double)ts.tv_nsec); }
79e552684c484862085970dff0468fba2a035997.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //===---- reduction.cu - NVPTX OpenMP reduction implementation ---- CUDA //-*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file contains the implementation of reduction with KMPC interface. // //===----------------------------------------------------------------------===// #include <complex.h> #include <stdio.h> #include "omptarget-nvptx.h" // may eventually remove this EXTERN int32_t __gpu_block_reduce() { bool isSPMDExecutionMode = isSPMDMode(); int tid = GetLogicalThreadIdInBlock(isSPMDExecutionMode); int nt = GetNumberOfOmpThreads(tid, isSPMDExecutionMode); if (nt != blockDim.x) return 0; unsigned tnum = __ACTIVEMASK(); if (tnum != (~0x0)) // assume swapSize is 32 return 0; return 1; } EXTERN int32_t __kmpc_reduce_gpu(kmp_Ident *loc, int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, void *reduce_array_size, kmp_ReductFctPtr *reductFct, kmp_CriticalName *lck) { int threadId = GetLogicalThreadIdInBlock(checkSPMDMode(loc)); omptarget_nvptx_TaskDescr *currTaskDescr = getMyTopTaskDescriptor(threadId); int numthread; if (currTaskDescr->IsParallelConstruct()) { numthread = GetNumberOfOmpThreads(threadId, checkSPMDMode(loc)); } else { numthread = GetNumberOfOmpTeams(); } if (numthread == 1) return 1; if (!__gpu_block_reduce()) return 2; if (threadIdx.x == 0) return 1; return 0; } EXTERN int32_t __kmpc_reduce_combined(kmp_Ident *loc) { return threadIdx.x == 0 ? 2 : 0; } EXTERN int32_t __kmpc_reduce_simd(kmp_Ident *loc) { return (threadIdx.x % 32 == 0) ? 1 : 0; } EXTERN void __kmpc_nvptx_end_reduce(int32_t global_tid) {} EXTERN void __kmpc_nvptx_end_reduce_nowait(int32_t global_tid) {} EXTERN int32_t __kmpc_shuffle_int32(int32_t val, int16_t delta, int16_t size) { return __SHFL_DOWN_SYNC(0xFFFFFFFF, val, delta, size); } EXTERN int64_t __kmpc_shuffle_int64(int64_t val, int16_t delta, int16_t size) { int lo, hi; asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(val)); hi = __SHFL_DOWN_SYNC(0xFFFFFFFF, hi, delta, size); lo = __SHFL_DOWN_SYNC(0xFFFFFFFF, lo, delta, size); asm volatile("mov.b64 %0, {%1,%2};" : "=l"(val) : "r"(lo), "r"(hi)); return val; } INLINE static void gpu_regular_warp_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) { for (uint32_t mask = WARPSIZE / 2; mask > 0; mask /= 2) { shflFct(reduce_data, /*LaneId - not used= */ 0, /*Offset = */ mask, /*AlgoVersion=*/0); } } INLINE static void gpu_irregular_warp_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct, uint32_t size, uint32_t tid) { uint32_t curr_size; uint32_t mask; curr_size = size; mask = curr_size / 2; while (mask > 0) { shflFct(reduce_data, /*LaneId = */ tid, /*Offset=*/mask, /*AlgoVersion=*/1); curr_size = (curr_size + 1) / 2; mask = curr_size / 2; } } INLINE static uint32_t gpu_irregular_simd_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) { uint32_t lanemask_lt; uint32_t lanemask_gt; uint32_t size, remote_id, physical_lane_id; physical_lane_id = GetThreadIdInBlock() % WARPSIZE; asm("mov.u32 %0, %%lanemask_lt;" : "=r"(lanemask_lt)); uint32_t Liveness = __ACTIVEMASK(); uint32_t logical_lane_id = __popc(Liveness & lanemask_lt) * 2; asm("mov.u32 %0, %%lanemask_gt;" : "=r"(lanemask_gt)); do { Liveness = __ACTIVEMASK(); remote_id = __ffs(Liveness & lanemask_gt); size = __popc(Liveness); logical_lane_id /= 2; shflFct(reduce_data, /*LaneId =*/logical_lane_id, /*Offset=*/remote_id - 1 - physical_lane_id, /*AlgoVersion=*/2); } while (logical_lane_id % 2 == 0 && size > 1); return (logical_lane_id == 0); } EXTERN int32_t __kmpc_nvptx_simd_reduce_nowait(int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) { uint32_t Liveness = __ACTIVEMASK(); if (Liveness == 0xffffffff) { gpu_regular_warp_reduce(reduce_data, shflFct); return GetThreadIdInBlock() % WARPSIZE == 0; // Result on lane 0 of the simd warp. } else { return gpu_irregular_simd_reduce( reduce_data, shflFct); // Result on the first active lane. } } INLINE static int32_t nvptx_parallel_reduce_nowait( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, bool isSPMDExecutionMode, bool isRuntimeUninitialized) { uint32_t BlockThreadId = GetLogicalThreadIdInBlock(isSPMDExecutionMode); uint32_t NumThreads = GetNumberOfOmpThreads(BlockThreadId, isSPMDExecutionMode); if (NumThreads == 1) return 1; /* * This reduce function handles reduction within a team. It handles * parallel regions in both L1 and L2 parallelism levels. It also * supports Generic, SPMD, and NoOMP modes. * * 1. Reduce within a warp. * 2. Warp master copies value to warp 0 via shared memory. * 3. Warp 0 reduces to a single value. * 4. The reduced value is available in the thread that returns 1. */ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE; uint32_t WarpId = BlockThreadId / WARPSIZE; // Volta execution model: // For the Generic execution mode a parallel region either has 1 thread and // beyond that, always a multiple of 32. For the SPMD execution mode we may // have any number of threads. if ((NumThreads % WARPSIZE == 0) || (WarpId < WarpsNeeded - 1)) gpu_regular_warp_reduce(reduce_data, shflFct); else if (NumThreads > 1) // Only SPMD execution mode comes thru this case. gpu_irregular_warp_reduce(reduce_data, shflFct, /*LaneCount=*/NumThreads % WARPSIZE, /*LaneId=*/GetThreadIdInBlock() % WARPSIZE); // When we have more than [warpsize] number of threads // a block reduction is performed here. // // Only L1 parallel region can enter this if condition. if (NumThreads > WARPSIZE) { // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, BlockThreadId); } return BlockThreadId == 0; #else uint32_t Liveness = __ACTIVEMASK(); if (Liveness == 0xffffffff) // Full warp gpu_regular_warp_reduce(reduce_data, shflFct); else if (!(Liveness & (Liveness + 1))) // Partial warp but contiguous lanes gpu_irregular_warp_reduce(reduce_data, shflFct, /*LaneCount=*/__popc(Liveness), /*LaneId=*/GetThreadIdInBlock() % WARPSIZE); else if (!isRuntimeUninitialized) // Dispersed lanes. Only threads in L2 // parallel region may enter here; return // early. return gpu_irregular_simd_reduce(reduce_data, shflFct); // When we have more than [warpsize] number of threads // a block reduction is performed here. // // Only L1 parallel region can enter this if condition. if (NumThreads > WARPSIZE) { uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE; // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); uint32_t WarpId = BlockThreadId / WARPSIZE; if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, BlockThreadId); return BlockThreadId == 0; } else if (isRuntimeUninitialized /* Never an L2 parallel region without the OMP runtime */) { return BlockThreadId == 0; } // Get the OMP thread Id. This is different from BlockThreadId in the case of // an L2 parallel region. return global_tid == 0; #endif // __CUDA_ARCH__ >= 700 } EXTERN __attribute__((deprecated)) int32_t __kmpc_nvptx_parallel_reduce_nowait( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) { return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, isSPMDMode(), isRuntimeUninitialized()); } EXTERN int32_t __kmpc_nvptx_parallel_reduce_nowait_v2( kmp_Ident *loc, int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) { return nvptx_parallel_reduce_nowait( global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, checkSPMDMode(loc), checkRuntimeUninitialized(loc)); } EXTERN int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_spmd( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) { return nvptx_parallel_reduce_nowait( global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, /*isSPMDExecutionMode=*/true, /*isRuntimeUninitialized=*/true); } EXTERN int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_generic( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) { return nvptx_parallel_reduce_nowait( global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, /*isSPMDExecutionMode=*/false, /*isRuntimeUninitialized=*/true); } INLINE static int32_t nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct, bool isSPMDExecutionMode) { uint32_t ThreadId = GetLogicalThreadIdInBlock(isSPMDExecutionMode); // In non-generic mode all workers participate in the teams reduction. // In generic mode only the team master participates in the teams // reduction because the workers are waiting for parallel work. uint32_t NumThreads = isSPMDExecutionMode ? GetNumberOfOmpThreads(ThreadId, /*isSPMDExecutionMode=*/true) : /*Master thread only*/ 1; uint32_t TeamId = GetBlockIdInKernel(); uint32_t NumTeams = GetNumberOfBlocksInKernel(); __shared__ volatile bool IsLastTeam; // Team masters of all teams write to the scratchpad. if (ThreadId == 0) { unsigned int *timestamp = GetTeamsReductionTimestamp(); char *scratchpad = GetTeamsReductionScratchpad(); scratchFct(reduce_data, scratchpad, TeamId, NumTeams); __threadfence(); // atomicInc increments 'timestamp' and has a range [0, NumTeams-1]. // It resets 'timestamp' back to 0 once the last team increments // this counter. unsigned val = atomicInc(timestamp, NumTeams - 1); IsLastTeam = val == NumTeams - 1; } // We have to wait on L1 barrier because in GENERIC mode the workers // are waiting on barrier 0 for work. // // If we guard this barrier as follows it leads to deadlock, probably // because of a compiler bug: if (!IsGenericMode()) __syncthreads(); uint16_t SyncWarps = (NumThreads + WARPSIZE - 1) / WARPSIZE; named_sync(L1_BARRIER, SyncWarps * WARPSIZE); // If this team is not the last, quit. if (/* Volatile read by all threads */ !IsLastTeam) return 0; // // Last team processing. // // Threads in excess of #teams do not participate in reduction of the // scratchpad values. #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 uint32_t ActiveThreads = NumThreads; if (NumTeams < NumThreads) { ActiveThreads = (NumTeams < WARPSIZE) ? 1 : NumTeams & ~((uint16_t)WARPSIZE - 1); } if (ThreadId >= ActiveThreads) return 0; // Load from scratchpad and reduce. char *scratchpad = GetTeamsReductionScratchpad(); ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0); for (uint32_t i = ActiveThreads + ThreadId; i < NumTeams; i += ActiveThreads) ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1); uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE; uint32_t WarpId = ThreadId / WARPSIZE; // Reduce across warps to the warp master. if ((ActiveThreads % WARPSIZE == 0) || (WarpId < WarpsNeeded - 1)) // Full warp gpu_regular_warp_reduce(reduce_data, shflFct); else if (ActiveThreads > 1) // Partial warp but contiguous lanes // Only SPMD execution mode comes thru this case. gpu_irregular_warp_reduce(reduce_data, shflFct, /*LaneCount=*/ActiveThreads % WARPSIZE, /*LaneId=*/ThreadId % WARPSIZE); // When we have more than [warpsize] number of threads // a block reduction is performed here. if (ActiveThreads > WARPSIZE) { // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId); } #else if (ThreadId >= NumTeams) return 0; // Load from scratchpad and reduce. char *scratchpad = GetTeamsReductionScratchpad(); ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0); for (uint32_t i = NumThreads + ThreadId; i < NumTeams; i += NumThreads) ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1); // Reduce across warps to the warp master. uint32_t Liveness = __ACTIVEMASK(); if (Liveness == 0xffffffff) // Full warp gpu_regular_warp_reduce(reduce_data, shflFct); else // Partial warp but contiguous lanes gpu_irregular_warp_reduce(reduce_data, shflFct, /*LaneCount=*/__popc(Liveness), /*LaneId=*/ThreadId % WARPSIZE); // When we have more than [warpsize] number of threads // a block reduction is performed here. uint32_t ActiveThreads = NumTeams < NumThreads ? NumTeams : NumThreads; if (ActiveThreads > WARPSIZE) { uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE; // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); uint32_t WarpId = ThreadId / WARPSIZE; if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId); } #endif // __CUDA_ARCH__ >= 700 return ThreadId == 0; } EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) { return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, scratchFct, ldFct, isSPMDMode()); } EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_simple_spmd( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) { return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, scratchFct, ldFct, /*isSPMDExecutionMode=*/true); } EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_simple_generic( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) { return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, scratchFct, ldFct, /*isSPMDExecutionMode=*/false); } EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_simple(kmp_Ident *loc, int32_t global_tid, kmp_CriticalName *crit) { if (checkSPMDMode(loc) && GetThreadIdInBlock() != 0) return 0; // The master thread of the team actually does the reduction. while (atomicCAS((uint32_t *)crit, 0, 1)) ; return 1; } EXTERN void __kmpc_nvptx_teams_end_reduce_nowait_simple(kmp_Ident *loc, int32_t global_tid, kmp_CriticalName *crit) { __threadfence_system(); (void)atomicExch((uint32_t *)crit, 0); } INLINE static bool isMaster(kmp_Ident *loc, uint32_t ThreadId) { return checkGenericMode(loc) || IsTeamMaster(ThreadId); } INLINE static uint32_t roundToWarpsize(uint32_t s) { if (s < WARPSIZE) return 1; return (s & ~(unsigned)(WARPSIZE - 1)); } __device__ static volatile uint32_t IterCnt = 0; __device__ static volatile uint32_t Cnt = 0; EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_v2( kmp_Ident *loc, int32_t global_tid, void *global_buffer, int32_t num_of_records, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_ListGlobalFctPtr lgcpyFct, kmp_ListGlobalFctPtr lgredFct, kmp_ListGlobalFctPtr glcpyFct, kmp_ListGlobalFctPtr glredFct) { // Terminate all threads in non-SPMD mode except for the master thread. if (checkGenericMode(loc) && GetThreadIdInBlock() != GetMasterThreadID()) return 0; uint32_t ThreadId = GetLogicalThreadIdInBlock(checkSPMDMode(loc)); // In non-generic mode all workers participate in the teams reduction. // In generic mode only the team master participates in the teams // reduction because the workers are waiting for parallel work. uint32_t NumThreads = checkSPMDMode(loc) ? GetNumberOfOmpThreads(ThreadId, /*isSPMDExecutionMode=*/true) : /*Master thread only*/ 1; uint32_t TeamId = GetBlockIdInKernel(); uint32_t NumTeams = GetNumberOfBlocksInKernel(); __shared__ unsigned Bound; __shared__ unsigned ChunkTeamCount; // Block progress for teams greater than the current upper // limit. We always only allow a number of teams less or equal // to the number of slots in the buffer. bool IsMaster = isMaster(loc, ThreadId); while (IsMaster) { // Atomic read Bound = atomicAdd((uint32_t *)&IterCnt, 0); if (TeamId < Bound + num_of_records) break; } if (IsMaster) { int ModBockId = TeamId % num_of_records; if (TeamId < num_of_records) lgcpyFct(global_buffer, ModBockId, reduce_data); else lgredFct(global_buffer, ModBockId, reduce_data); __threadfence_system(); // Increment team counter. // This counter is incremented by all teams in the current // BUFFER_SIZE chunk. ChunkTeamCount = atomicInc((uint32_t *)&Cnt, num_of_records - 1); } // Synchronize if (checkSPMDMode(loc)) __kmpc_barrier(loc, global_tid); // reduce_data is global or shared so before being reduced within the // warp we need to bring it in local memory: // local_reduce_data = reduce_data[i] // // Example for 3 reduction variables a, b, c (of potentially different // types): // // buffer layout (struct of arrays): // a, a, ..., a, b, b, ... b, c, c, ... c // |__________| // num_of_records // // local_data_reduce layout (struct): // a, b, c // // Each thread will have a local struct containing the values to be // reduced: // 1. do reduction within each warp. // 2. do reduction across warps. // 3. write the final result to the main reduction variable // by returning 1 in the thread holding the reduction result. // Check if this is the very last team. unsigned NumRecs = min(NumTeams, num_of_records); if (ChunkTeamCount == NumTeams - Bound - 1) { // // Last team processing. // if (ThreadId >= NumRecs) return 0; NumThreads = roundToWarpsize(min(NumThreads, NumRecs)); if (ThreadId >= NumThreads) return 0; // Load from buffer and reduce. glcpyFct(global_buffer, ThreadId, reduce_data); for (uint32_t i = NumThreads + ThreadId; i < NumRecs; i += NumThreads) glredFct(global_buffer, i, reduce_data); // Reduce across warps to the warp master. if (NumThreads > 1) { gpu_regular_warp_reduce(reduce_data, shflFct); // When we have more than [warpsize] number of threads // a block reduction is performed here. uint32_t ActiveThreads = min(NumRecs, NumThreads); if (ActiveThreads > WARPSIZE) { uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE; // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); uint32_t WarpId = ThreadId / WARPSIZE; if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId); } } if (IsMaster) { Cnt = 0; IterCnt = 0; return 1; } return 0; } if (IsMaster && ChunkTeamCount == num_of_records - 1) { // Allow SIZE number of teams to proceed writing their // intermediate results to the global buffer. atomicAdd((uint32_t *)&IterCnt, num_of_records); } return 0; }
79e552684c484862085970dff0468fba2a035997.cu
//===---- reduction.cu - NVPTX OpenMP reduction implementation ---- CUDA //-*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file contains the implementation of reduction with KMPC interface. // //===----------------------------------------------------------------------===// #include <complex.h> #include <stdio.h> #include "omptarget-nvptx.h" // may eventually remove this EXTERN int32_t __gpu_block_reduce() { bool isSPMDExecutionMode = isSPMDMode(); int tid = GetLogicalThreadIdInBlock(isSPMDExecutionMode); int nt = GetNumberOfOmpThreads(tid, isSPMDExecutionMode); if (nt != blockDim.x) return 0; unsigned tnum = __ACTIVEMASK(); if (tnum != (~0x0)) // assume swapSize is 32 return 0; return 1; } EXTERN int32_t __kmpc_reduce_gpu(kmp_Ident *loc, int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, void *reduce_array_size, kmp_ReductFctPtr *reductFct, kmp_CriticalName *lck) { int threadId = GetLogicalThreadIdInBlock(checkSPMDMode(loc)); omptarget_nvptx_TaskDescr *currTaskDescr = getMyTopTaskDescriptor(threadId); int numthread; if (currTaskDescr->IsParallelConstruct()) { numthread = GetNumberOfOmpThreads(threadId, checkSPMDMode(loc)); } else { numthread = GetNumberOfOmpTeams(); } if (numthread == 1) return 1; if (!__gpu_block_reduce()) return 2; if (threadIdx.x == 0) return 1; return 0; } EXTERN int32_t __kmpc_reduce_combined(kmp_Ident *loc) { return threadIdx.x == 0 ? 2 : 0; } EXTERN int32_t __kmpc_reduce_simd(kmp_Ident *loc) { return (threadIdx.x % 32 == 0) ? 1 : 0; } EXTERN void __kmpc_nvptx_end_reduce(int32_t global_tid) {} EXTERN void __kmpc_nvptx_end_reduce_nowait(int32_t global_tid) {} EXTERN int32_t __kmpc_shuffle_int32(int32_t val, int16_t delta, int16_t size) { return __SHFL_DOWN_SYNC(0xFFFFFFFF, val, delta, size); } EXTERN int64_t __kmpc_shuffle_int64(int64_t val, int16_t delta, int16_t size) { int lo, hi; asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(val)); hi = __SHFL_DOWN_SYNC(0xFFFFFFFF, hi, delta, size); lo = __SHFL_DOWN_SYNC(0xFFFFFFFF, lo, delta, size); asm volatile("mov.b64 %0, {%1,%2};" : "=l"(val) : "r"(lo), "r"(hi)); return val; } INLINE static void gpu_regular_warp_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) { for (uint32_t mask = WARPSIZE / 2; mask > 0; mask /= 2) { shflFct(reduce_data, /*LaneId - not used= */ 0, /*Offset = */ mask, /*AlgoVersion=*/0); } } INLINE static void gpu_irregular_warp_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct, uint32_t size, uint32_t tid) { uint32_t curr_size; uint32_t mask; curr_size = size; mask = curr_size / 2; while (mask > 0) { shflFct(reduce_data, /*LaneId = */ tid, /*Offset=*/mask, /*AlgoVersion=*/1); curr_size = (curr_size + 1) / 2; mask = curr_size / 2; } } INLINE static uint32_t gpu_irregular_simd_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) { uint32_t lanemask_lt; uint32_t lanemask_gt; uint32_t size, remote_id, physical_lane_id; physical_lane_id = GetThreadIdInBlock() % WARPSIZE; asm("mov.u32 %0, %%lanemask_lt;" : "=r"(lanemask_lt)); uint32_t Liveness = __ACTIVEMASK(); uint32_t logical_lane_id = __popc(Liveness & lanemask_lt) * 2; asm("mov.u32 %0, %%lanemask_gt;" : "=r"(lanemask_gt)); do { Liveness = __ACTIVEMASK(); remote_id = __ffs(Liveness & lanemask_gt); size = __popc(Liveness); logical_lane_id /= 2; shflFct(reduce_data, /*LaneId =*/logical_lane_id, /*Offset=*/remote_id - 1 - physical_lane_id, /*AlgoVersion=*/2); } while (logical_lane_id % 2 == 0 && size > 1); return (logical_lane_id == 0); } EXTERN int32_t __kmpc_nvptx_simd_reduce_nowait(int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) { uint32_t Liveness = __ACTIVEMASK(); if (Liveness == 0xffffffff) { gpu_regular_warp_reduce(reduce_data, shflFct); return GetThreadIdInBlock() % WARPSIZE == 0; // Result on lane 0 of the simd warp. } else { return gpu_irregular_simd_reduce( reduce_data, shflFct); // Result on the first active lane. } } INLINE static int32_t nvptx_parallel_reduce_nowait( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, bool isSPMDExecutionMode, bool isRuntimeUninitialized) { uint32_t BlockThreadId = GetLogicalThreadIdInBlock(isSPMDExecutionMode); uint32_t NumThreads = GetNumberOfOmpThreads(BlockThreadId, isSPMDExecutionMode); if (NumThreads == 1) return 1; /* * This reduce function handles reduction within a team. It handles * parallel regions in both L1 and L2 parallelism levels. It also * supports Generic, SPMD, and NoOMP modes. * * 1. Reduce within a warp. * 2. Warp master copies value to warp 0 via shared memory. * 3. Warp 0 reduces to a single value. * 4. The reduced value is available in the thread that returns 1. */ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE; uint32_t WarpId = BlockThreadId / WARPSIZE; // Volta execution model: // For the Generic execution mode a parallel region either has 1 thread and // beyond that, always a multiple of 32. For the SPMD execution mode we may // have any number of threads. if ((NumThreads % WARPSIZE == 0) || (WarpId < WarpsNeeded - 1)) gpu_regular_warp_reduce(reduce_data, shflFct); else if (NumThreads > 1) // Only SPMD execution mode comes thru this case. gpu_irregular_warp_reduce(reduce_data, shflFct, /*LaneCount=*/NumThreads % WARPSIZE, /*LaneId=*/GetThreadIdInBlock() % WARPSIZE); // When we have more than [warpsize] number of threads // a block reduction is performed here. // // Only L1 parallel region can enter this if condition. if (NumThreads > WARPSIZE) { // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, BlockThreadId); } return BlockThreadId == 0; #else uint32_t Liveness = __ACTIVEMASK(); if (Liveness == 0xffffffff) // Full warp gpu_regular_warp_reduce(reduce_data, shflFct); else if (!(Liveness & (Liveness + 1))) // Partial warp but contiguous lanes gpu_irregular_warp_reduce(reduce_data, shflFct, /*LaneCount=*/__popc(Liveness), /*LaneId=*/GetThreadIdInBlock() % WARPSIZE); else if (!isRuntimeUninitialized) // Dispersed lanes. Only threads in L2 // parallel region may enter here; return // early. return gpu_irregular_simd_reduce(reduce_data, shflFct); // When we have more than [warpsize] number of threads // a block reduction is performed here. // // Only L1 parallel region can enter this if condition. if (NumThreads > WARPSIZE) { uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE; // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); uint32_t WarpId = BlockThreadId / WARPSIZE; if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, BlockThreadId); return BlockThreadId == 0; } else if (isRuntimeUninitialized /* Never an L2 parallel region without the OMP runtime */) { return BlockThreadId == 0; } // Get the OMP thread Id. This is different from BlockThreadId in the case of // an L2 parallel region. return global_tid == 0; #endif // __CUDA_ARCH__ >= 700 } EXTERN __attribute__((deprecated)) int32_t __kmpc_nvptx_parallel_reduce_nowait( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) { return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, isSPMDMode(), isRuntimeUninitialized()); } EXTERN int32_t __kmpc_nvptx_parallel_reduce_nowait_v2( kmp_Ident *loc, int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) { return nvptx_parallel_reduce_nowait( global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, checkSPMDMode(loc), checkRuntimeUninitialized(loc)); } EXTERN int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_spmd( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) { return nvptx_parallel_reduce_nowait( global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, /*isSPMDExecutionMode=*/true, /*isRuntimeUninitialized=*/true); } EXTERN int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_generic( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) { return nvptx_parallel_reduce_nowait( global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, /*isSPMDExecutionMode=*/false, /*isRuntimeUninitialized=*/true); } INLINE static int32_t nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct, bool isSPMDExecutionMode) { uint32_t ThreadId = GetLogicalThreadIdInBlock(isSPMDExecutionMode); // In non-generic mode all workers participate in the teams reduction. // In generic mode only the team master participates in the teams // reduction because the workers are waiting for parallel work. uint32_t NumThreads = isSPMDExecutionMode ? GetNumberOfOmpThreads(ThreadId, /*isSPMDExecutionMode=*/true) : /*Master thread only*/ 1; uint32_t TeamId = GetBlockIdInKernel(); uint32_t NumTeams = GetNumberOfBlocksInKernel(); __shared__ volatile bool IsLastTeam; // Team masters of all teams write to the scratchpad. if (ThreadId == 0) { unsigned int *timestamp = GetTeamsReductionTimestamp(); char *scratchpad = GetTeamsReductionScratchpad(); scratchFct(reduce_data, scratchpad, TeamId, NumTeams); __threadfence(); // atomicInc increments 'timestamp' and has a range [0, NumTeams-1]. // It resets 'timestamp' back to 0 once the last team increments // this counter. unsigned val = atomicInc(timestamp, NumTeams - 1); IsLastTeam = val == NumTeams - 1; } // We have to wait on L1 barrier because in GENERIC mode the workers // are waiting on barrier 0 for work. // // If we guard this barrier as follows it leads to deadlock, probably // because of a compiler bug: if (!IsGenericMode()) __syncthreads(); uint16_t SyncWarps = (NumThreads + WARPSIZE - 1) / WARPSIZE; named_sync(L1_BARRIER, SyncWarps * WARPSIZE); // If this team is not the last, quit. if (/* Volatile read by all threads */ !IsLastTeam) return 0; // // Last team processing. // // Threads in excess of #teams do not participate in reduction of the // scratchpad values. #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 uint32_t ActiveThreads = NumThreads; if (NumTeams < NumThreads) { ActiveThreads = (NumTeams < WARPSIZE) ? 1 : NumTeams & ~((uint16_t)WARPSIZE - 1); } if (ThreadId >= ActiveThreads) return 0; // Load from scratchpad and reduce. char *scratchpad = GetTeamsReductionScratchpad(); ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0); for (uint32_t i = ActiveThreads + ThreadId; i < NumTeams; i += ActiveThreads) ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1); uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE; uint32_t WarpId = ThreadId / WARPSIZE; // Reduce across warps to the warp master. if ((ActiveThreads % WARPSIZE == 0) || (WarpId < WarpsNeeded - 1)) // Full warp gpu_regular_warp_reduce(reduce_data, shflFct); else if (ActiveThreads > 1) // Partial warp but contiguous lanes // Only SPMD execution mode comes thru this case. gpu_irregular_warp_reduce(reduce_data, shflFct, /*LaneCount=*/ActiveThreads % WARPSIZE, /*LaneId=*/ThreadId % WARPSIZE); // When we have more than [warpsize] number of threads // a block reduction is performed here. if (ActiveThreads > WARPSIZE) { // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId); } #else if (ThreadId >= NumTeams) return 0; // Load from scratchpad and reduce. char *scratchpad = GetTeamsReductionScratchpad(); ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0); for (uint32_t i = NumThreads + ThreadId; i < NumTeams; i += NumThreads) ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1); // Reduce across warps to the warp master. uint32_t Liveness = __ACTIVEMASK(); if (Liveness == 0xffffffff) // Full warp gpu_regular_warp_reduce(reduce_data, shflFct); else // Partial warp but contiguous lanes gpu_irregular_warp_reduce(reduce_data, shflFct, /*LaneCount=*/__popc(Liveness), /*LaneId=*/ThreadId % WARPSIZE); // When we have more than [warpsize] number of threads // a block reduction is performed here. uint32_t ActiveThreads = NumTeams < NumThreads ? NumTeams : NumThreads; if (ActiveThreads > WARPSIZE) { uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE; // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); uint32_t WarpId = ThreadId / WARPSIZE; if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId); } #endif // __CUDA_ARCH__ >= 700 return ThreadId == 0; } EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) { return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, scratchFct, ldFct, isSPMDMode()); } EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_simple_spmd( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) { return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, scratchFct, ldFct, /*isSPMDExecutionMode=*/true); } EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_simple_generic( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) { return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, scratchFct, ldFct, /*isSPMDExecutionMode=*/false); } EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_simple(kmp_Ident *loc, int32_t global_tid, kmp_CriticalName *crit) { if (checkSPMDMode(loc) && GetThreadIdInBlock() != 0) return 0; // The master thread of the team actually does the reduction. while (atomicCAS((uint32_t *)crit, 0, 1)) ; return 1; } EXTERN void __kmpc_nvptx_teams_end_reduce_nowait_simple(kmp_Ident *loc, int32_t global_tid, kmp_CriticalName *crit) { __threadfence_system(); (void)atomicExch((uint32_t *)crit, 0); } INLINE static bool isMaster(kmp_Ident *loc, uint32_t ThreadId) { return checkGenericMode(loc) || IsTeamMaster(ThreadId); } INLINE static uint32_t roundToWarpsize(uint32_t s) { if (s < WARPSIZE) return 1; return (s & ~(unsigned)(WARPSIZE - 1)); } __device__ static volatile uint32_t IterCnt = 0; __device__ static volatile uint32_t Cnt = 0; EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_v2( kmp_Ident *loc, int32_t global_tid, void *global_buffer, int32_t num_of_records, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_ListGlobalFctPtr lgcpyFct, kmp_ListGlobalFctPtr lgredFct, kmp_ListGlobalFctPtr glcpyFct, kmp_ListGlobalFctPtr glredFct) { // Terminate all threads in non-SPMD mode except for the master thread. if (checkGenericMode(loc) && GetThreadIdInBlock() != GetMasterThreadID()) return 0; uint32_t ThreadId = GetLogicalThreadIdInBlock(checkSPMDMode(loc)); // In non-generic mode all workers participate in the teams reduction. // In generic mode only the team master participates in the teams // reduction because the workers are waiting for parallel work. uint32_t NumThreads = checkSPMDMode(loc) ? GetNumberOfOmpThreads(ThreadId, /*isSPMDExecutionMode=*/true) : /*Master thread only*/ 1; uint32_t TeamId = GetBlockIdInKernel(); uint32_t NumTeams = GetNumberOfBlocksInKernel(); __shared__ unsigned Bound; __shared__ unsigned ChunkTeamCount; // Block progress for teams greater than the current upper // limit. We always only allow a number of teams less or equal // to the number of slots in the buffer. bool IsMaster = isMaster(loc, ThreadId); while (IsMaster) { // Atomic read Bound = atomicAdd((uint32_t *)&IterCnt, 0); if (TeamId < Bound + num_of_records) break; } if (IsMaster) { int ModBockId = TeamId % num_of_records; if (TeamId < num_of_records) lgcpyFct(global_buffer, ModBockId, reduce_data); else lgredFct(global_buffer, ModBockId, reduce_data); __threadfence_system(); // Increment team counter. // This counter is incremented by all teams in the current // BUFFER_SIZE chunk. ChunkTeamCount = atomicInc((uint32_t *)&Cnt, num_of_records - 1); } // Synchronize if (checkSPMDMode(loc)) __kmpc_barrier(loc, global_tid); // reduce_data is global or shared so before being reduced within the // warp we need to bring it in local memory: // local_reduce_data = reduce_data[i] // // Example for 3 reduction variables a, b, c (of potentially different // types): // // buffer layout (struct of arrays): // a, a, ..., a, b, b, ... b, c, c, ... c // |__________| // num_of_records // // local_data_reduce layout (struct): // a, b, c // // Each thread will have a local struct containing the values to be // reduced: // 1. do reduction within each warp. // 2. do reduction across warps. // 3. write the final result to the main reduction variable // by returning 1 in the thread holding the reduction result. // Check if this is the very last team. unsigned NumRecs = min(NumTeams, num_of_records); if (ChunkTeamCount == NumTeams - Bound - 1) { // // Last team processing. // if (ThreadId >= NumRecs) return 0; NumThreads = roundToWarpsize(min(NumThreads, NumRecs)); if (ThreadId >= NumThreads) return 0; // Load from buffer and reduce. glcpyFct(global_buffer, ThreadId, reduce_data); for (uint32_t i = NumThreads + ThreadId; i < NumRecs; i += NumThreads) glredFct(global_buffer, i, reduce_data); // Reduce across warps to the warp master. if (NumThreads > 1) { gpu_regular_warp_reduce(reduce_data, shflFct); // When we have more than [warpsize] number of threads // a block reduction is performed here. uint32_t ActiveThreads = min(NumRecs, NumThreads); if (ActiveThreads > WARPSIZE) { uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE; // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); uint32_t WarpId = ThreadId / WARPSIZE; if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId); } } if (IsMaster) { Cnt = 0; IterCnt = 0; return 1; } return 0; } if (IsMaster && ChunkTeamCount == num_of_records - 1) { // Allow SIZE number of teams to proceed writing their // intermediate results to the global buffer. atomicAdd((uint32_t *)&IterCnt, num_of_records); } return 0; }
d7c931e29e27e9554a2b5f760f996015af74b493.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/get_value.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/sequence.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/scalar/scalar.hpp> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/error.hpp> #include <nvtext/detail/load_hash_file.hpp> #include <nvtext/subword_tokenize.hpp> #include <text/subword/detail/wordpiece_tokenizer.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/for_each.h> #include <thrust/functional.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/tabulate.h> #include <thrust/transform_scan.h> namespace nvtext { namespace detail { namespace { /** * @brief Convert tokens and row2tensor map to final tensor data. * * @param[in] token_ids Tokens from tokenizer * @param[in] offsets Offsets to each string's output row of tokens * @param[in] row2tensor String to tensor token counts * @param[in] row2row_within_tensor Token counts within sub-rows of the output * @param[in] max_sequence_length Maximum number of tokens in a row * @param[in] nrows_tensor_token_ids Total number of output tensor rows * @param[in] stride Number of tokens in sub-rows * @param[in] do_truncate True if tokens should not spill into sub-rows in the output * @param[out] final_tensor Output vector of token-ids * @param[out] attn_mask Identifies valid token id entries * @param[out] metadata Additional data per row */ __global__ void kernel_compute_tensor_metadata( // input uint32_t const* token_ids, cudf::size_type const* offsets, uint32_t const* row2tensor, uint32_t const* row2row_within_tensor, uint32_t max_sequence_length, uint32_t nrows_tensor_token_ids, uint32_t stride, bool do_truncate, // output uint32_t* final_tensor, uint32_t* attn_mask, uint32_t* metadata) { cudf::thread_index_type const output_idx = threadIdx.x + static_cast<cudf::thread_index_type>(blockIdx.x) * static_cast<cudf::thread_index_type>(blockDim.x); if (output_idx >= (static_cast<cudf::thread_index_type>(nrows_tensor_token_ids) * static_cast<cudf::thread_index_type>(max_sequence_length))) { return; } uint32_t const absolute_row_id = output_idx / max_sequence_length; uint32_t const tensor_id = row2tensor[absolute_row_id]; uint32_t const row_within_tensor = row2row_within_tensor[absolute_row_id]; uint32_t const offset_token_ids_tensor = offsets[tensor_id]; uint32_t const n_tokens_tensor = offsets[tensor_id + 1] - offset_token_ids_tensor; // check for last row within tensor bool const last_row_of_tensor = (absolute_row_id == nrows_tensor_token_ids - 1) || (row2tensor[absolute_row_id + 1] != tensor_id); // compute input offset to retrieve token ids uint32_t const token_idx = output_idx % max_sequence_length; uint32_t const row_offset_token_ids = offset_token_ids_tensor + token_idx + (row_within_tensor ? (max_sequence_length + (stride * (row_within_tensor - 1))) : 0); if (row_within_tensor == 0) { if (token_idx < n_tokens_tensor) { // copy token ids final_tensor[output_idx] = token_ids[row_offset_token_ids]; attn_mask[output_idx] = 1; } else { // pad with 0 final_tensor[output_idx] = 0; attn_mask[output_idx] = 0; } } else { uint32_t const n_replicates = max_sequence_length - stride; if ((row_offset_token_ids - n_replicates) < (offset_token_ids_tensor + n_tokens_tensor)) { // replicate elements from previous row or copy new tokens final_tensor[output_idx] = token_ids[row_offset_token_ids - n_replicates]; attn_mask[output_idx] = 1; } else { // pad with 0 final_tensor[output_idx] = 0; attn_mask[output_idx] = 0; } } // write metadata if (token_idx == 0) { auto const metadata_idx = absolute_row_id * 3; // three metadata values per output row metadata[metadata_idx] = tensor_id; metadata[metadata_idx + 1] = (row_within_tensor == 0) ? 0 : (max_sequence_length - stride) / 2; metadata[metadata_idx + 2] = [&] { if (!last_row_of_tensor) return max_sequence_length - (max_sequence_length - stride) / 2 - 1; if (n_tokens_tensor <= max_sequence_length) // we fit, all good return (n_tokens_tensor > 0) ? (n_tokens_tensor - 1) : 0; if (do_truncate) return (max_sequence_length - 1); auto const final_row_value = (max_sequence_length - stride) + (n_tokens_tensor - max_sequence_length) % stride; return (final_row_value > 0) ? (final_row_value - 1) : 0; }(); } } // this happens if there are no tokens in the input tokenizer_result build_empty_result(cudf::size_type size, uint32_t max_sequence_length, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto zero = cudf::numeric_scalar<uint32_t>(0, true, stream); auto ids = cudf::detail::sequence(size * max_sequence_length, zero, zero, stream, mr); auto mask = cudf::detail::sequence(size * max_sequence_length, zero, zero, stream, mr); auto metadata = cudf::make_numeric_column( cudf::data_type{cudf::type_id::UINT32}, size * 3, cudf::mask_state::UNALLOCATED, stream, mr); thrust::tabulate(rmm::exec_policy(stream), metadata->mutable_view().begin<uint32_t>(), metadata->mutable_view().end<uint32_t>(), [] __device__(auto idx) { return ((idx % 3) == 0) ? idx : 0; }); metadata->set_null_count(0); return tokenizer_result{ 0, max_sequence_length, std::move(ids), std::move(mask), std::move(metadata)}; } } // namespace tokenizer_result subword_tokenize(cudf::strings_column_view const& strings, hashed_vocabulary const& vocab_table, uint32_t max_sequence_length, uint32_t stride, bool do_lower_case, bool do_truncate, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(stride <= max_sequence_length, "stride must be less than or equal to max_sequence_length"); auto const strings_count = strings.size(); if (strings_count == strings.null_count()) { // empty or all-null returns empty return tokenizer_result{0, max_sequence_length, cudf::make_empty_column(cudf::data_type{cudf::type_id::UINT32}), cudf::make_empty_column(cudf::data_type{cudf::type_id::UINT32}), cudf::make_empty_column(cudf::data_type{cudf::type_id::UINT32})}; } CUDF_EXPECTS( max_sequence_length <= (static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max()) / strings_count), "max_sequence_length times number of input rows exceeds the column size limit", std::overflow_error); auto const offsets = strings.offsets(); auto const d_offsets = offsets.data<cudf::size_type>() + strings.offset(); auto const offset = cudf::detail::get_value<cudf::size_type>(offsets, strings.offset(), stream); auto const d_chars = strings.chars().data<char>() + offset; // Create tokenizer wordpiece_tokenizer tokenizer( vocab_table, max_sequence_length, stride, do_truncate, do_lower_case); // Run tokenizer auto const tokens = tokenizer.tokenize(d_chars, d_offsets, strings_count, stream); // assign output components auto device_token_ids = tokens.first->data(); auto device_offsets = tokens.second->data(); // Format output from tokenizer // Each string can create 1 or more tensor entries. // Compute the string-per-tensor offsets values by scanning // over the number of tokens for each string. rmm::device_uvector<uint32_t> offsets_per_tensor(strings_count + 1, stream); auto d_offsets_per_tensor = offsets_per_tensor.data(); thrust::transform_exclusive_scan( rmm::exec_policy(stream), thrust::make_counting_iterator<cudf::size_type>(0), thrust::make_counting_iterator<cudf::size_type>(strings_count + 1), offsets_per_tensor.begin(), [device_offsets, do_truncate, max_sequence_length, stride, strings_count] __device__( cudf::size_type idx) { uint32_t const num_tokens = idx < strings_count ? device_offsets[idx + 1] - device_offsets[idx] : 0; if (do_truncate || num_tokens <= max_sequence_length) return uint32_t{1}; return 1 + ((num_tokens - max_sequence_length + stride - 1) / stride); }, uint32_t{0}, thrust::plus<uint32_t>()); // last element is the total number of output rows uint32_t const nrows_tensor_token_ids = offsets_per_tensor.element(strings_count, stream); // if there are no tokens at all, build a specific empty result if (nrows_tensor_token_ids == 0) { return build_empty_result(strings_count, max_sequence_length, stream, mr); } // compute global_row to tensor, and global_row to within_tensor_row correspondence rmm::device_uvector<uint32_t> row2tensor(nrows_tensor_token_ids, stream); auto d_row2tensor = row2tensor.data(); rmm::device_uvector<uint32_t> row2row_within_tensor(nrows_tensor_token_ids, stream); auto d_row2row_within_tensor = row2row_within_tensor.data(); thrust::for_each_n( rmm::exec_policy(stream), thrust::make_counting_iterator<uint32_t>(0), strings_count, [d_offsets_per_tensor, d_row2tensor, d_row2row_within_tensor] __device__(auto idx) { uint32_t offset = d_offsets_per_tensor[idx]; uint32_t nrows = d_offsets_per_tensor[idx + 1] - offset; for (uint32_t jdx = 0; jdx < nrows; ++jdx) { d_row2tensor[jdx + offset] = idx; d_row2row_within_tensor[jdx + offset] = jdx; } }); // create output data columns auto tensor_token_ids = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT32}, nrows_tensor_token_ids * max_sequence_length, cudf::mask_state::UNALLOCATED, stream, mr); auto tensor_attention_mask = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT32}, nrows_tensor_token_ids * max_sequence_length, cudf::mask_state::UNALLOCATED, stream, mr); auto tensor_metadata = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT32}, nrows_tensor_token_ids * 3, cudf::mask_state::UNALLOCATED, stream, mr); // compute final-tensor, mask, and metadata constexpr int block_size = 256; cudf::detail::grid_1d const grid{ static_cast<cudf::size_type>(nrows_tensor_token_ids * max_sequence_length), block_size}; hipLaunchKernelGGL(( kernel_compute_tensor_metadata), dim3(grid.num_blocks), dim3(grid.num_threads_per_block), 0, stream.value(), device_token_ids, device_offsets, d_row2tensor, d_row2row_within_tensor, max_sequence_length, nrows_tensor_token_ids, stride, do_truncate, tensor_token_ids->mutable_view().data<uint32_t>(), tensor_attention_mask->mutable_view().data<uint32_t>(), tensor_metadata->mutable_view().data<uint32_t>()); return tokenizer_result{nrows_tensor_token_ids, max_sequence_length, std::move(tensor_token_ids), std::move(tensor_attention_mask), std::move(tensor_metadata)}; } } // namespace detail tokenizer_result subword_tokenize(cudf::strings_column_view const& strings, hashed_vocabulary const& vocabulary_table, uint32_t max_sequence_length, uint32_t stride, bool do_lower_case, bool do_truncate, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::subword_tokenize(strings, vocabulary_table, max_sequence_length, stride, do_lower_case, do_truncate, cudf::get_default_stream(), mr); } } // namespace nvtext
d7c931e29e27e9554a2b5f760f996015af74b493.cu
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/get_value.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/sequence.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/scalar/scalar.hpp> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/error.hpp> #include <nvtext/detail/load_hash_file.hpp> #include <nvtext/subword_tokenize.hpp> #include <text/subword/detail/wordpiece_tokenizer.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/for_each.h> #include <thrust/functional.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/tabulate.h> #include <thrust/transform_scan.h> namespace nvtext { namespace detail { namespace { /** * @brief Convert tokens and row2tensor map to final tensor data. * * @param[in] token_ids Tokens from tokenizer * @param[in] offsets Offsets to each string's output row of tokens * @param[in] row2tensor String to tensor token counts * @param[in] row2row_within_tensor Token counts within sub-rows of the output * @param[in] max_sequence_length Maximum number of tokens in a row * @param[in] nrows_tensor_token_ids Total number of output tensor rows * @param[in] stride Number of tokens in sub-rows * @param[in] do_truncate True if tokens should not spill into sub-rows in the output * @param[out] final_tensor Output vector of token-ids * @param[out] attn_mask Identifies valid token id entries * @param[out] metadata Additional data per row */ __global__ void kernel_compute_tensor_metadata( // input uint32_t const* token_ids, cudf::size_type const* offsets, uint32_t const* row2tensor, uint32_t const* row2row_within_tensor, uint32_t max_sequence_length, uint32_t nrows_tensor_token_ids, uint32_t stride, bool do_truncate, // output uint32_t* final_tensor, uint32_t* attn_mask, uint32_t* metadata) { cudf::thread_index_type const output_idx = threadIdx.x + static_cast<cudf::thread_index_type>(blockIdx.x) * static_cast<cudf::thread_index_type>(blockDim.x); if (output_idx >= (static_cast<cudf::thread_index_type>(nrows_tensor_token_ids) * static_cast<cudf::thread_index_type>(max_sequence_length))) { return; } uint32_t const absolute_row_id = output_idx / max_sequence_length; uint32_t const tensor_id = row2tensor[absolute_row_id]; uint32_t const row_within_tensor = row2row_within_tensor[absolute_row_id]; uint32_t const offset_token_ids_tensor = offsets[tensor_id]; uint32_t const n_tokens_tensor = offsets[tensor_id + 1] - offset_token_ids_tensor; // check for last row within tensor bool const last_row_of_tensor = (absolute_row_id == nrows_tensor_token_ids - 1) || (row2tensor[absolute_row_id + 1] != tensor_id); // compute input offset to retrieve token ids uint32_t const token_idx = output_idx % max_sequence_length; uint32_t const row_offset_token_ids = offset_token_ids_tensor + token_idx + (row_within_tensor ? (max_sequence_length + (stride * (row_within_tensor - 1))) : 0); if (row_within_tensor == 0) { if (token_idx < n_tokens_tensor) { // copy token ids final_tensor[output_idx] = token_ids[row_offset_token_ids]; attn_mask[output_idx] = 1; } else { // pad with 0 final_tensor[output_idx] = 0; attn_mask[output_idx] = 0; } } else { uint32_t const n_replicates = max_sequence_length - stride; if ((row_offset_token_ids - n_replicates) < (offset_token_ids_tensor + n_tokens_tensor)) { // replicate elements from previous row or copy new tokens final_tensor[output_idx] = token_ids[row_offset_token_ids - n_replicates]; attn_mask[output_idx] = 1; } else { // pad with 0 final_tensor[output_idx] = 0; attn_mask[output_idx] = 0; } } // write metadata if (token_idx == 0) { auto const metadata_idx = absolute_row_id * 3; // three metadata values per output row metadata[metadata_idx] = tensor_id; metadata[metadata_idx + 1] = (row_within_tensor == 0) ? 0 : (max_sequence_length - stride) / 2; metadata[metadata_idx + 2] = [&] { if (!last_row_of_tensor) return max_sequence_length - (max_sequence_length - stride) / 2 - 1; if (n_tokens_tensor <= max_sequence_length) // we fit, all good return (n_tokens_tensor > 0) ? (n_tokens_tensor - 1) : 0; if (do_truncate) return (max_sequence_length - 1); auto const final_row_value = (max_sequence_length - stride) + (n_tokens_tensor - max_sequence_length) % stride; return (final_row_value > 0) ? (final_row_value - 1) : 0; }(); } } // this happens if there are no tokens in the input tokenizer_result build_empty_result(cudf::size_type size, uint32_t max_sequence_length, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto zero = cudf::numeric_scalar<uint32_t>(0, true, stream); auto ids = cudf::detail::sequence(size * max_sequence_length, zero, zero, stream, mr); auto mask = cudf::detail::sequence(size * max_sequence_length, zero, zero, stream, mr); auto metadata = cudf::make_numeric_column( cudf::data_type{cudf::type_id::UINT32}, size * 3, cudf::mask_state::UNALLOCATED, stream, mr); thrust::tabulate(rmm::exec_policy(stream), metadata->mutable_view().begin<uint32_t>(), metadata->mutable_view().end<uint32_t>(), [] __device__(auto idx) { return ((idx % 3) == 0) ? idx : 0; }); metadata->set_null_count(0); return tokenizer_result{ 0, max_sequence_length, std::move(ids), std::move(mask), std::move(metadata)}; } } // namespace tokenizer_result subword_tokenize(cudf::strings_column_view const& strings, hashed_vocabulary const& vocab_table, uint32_t max_sequence_length, uint32_t stride, bool do_lower_case, bool do_truncate, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(stride <= max_sequence_length, "stride must be less than or equal to max_sequence_length"); auto const strings_count = strings.size(); if (strings_count == strings.null_count()) { // empty or all-null returns empty return tokenizer_result{0, max_sequence_length, cudf::make_empty_column(cudf::data_type{cudf::type_id::UINT32}), cudf::make_empty_column(cudf::data_type{cudf::type_id::UINT32}), cudf::make_empty_column(cudf::data_type{cudf::type_id::UINT32})}; } CUDF_EXPECTS( max_sequence_length <= (static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max()) / strings_count), "max_sequence_length times number of input rows exceeds the column size limit", std::overflow_error); auto const offsets = strings.offsets(); auto const d_offsets = offsets.data<cudf::size_type>() + strings.offset(); auto const offset = cudf::detail::get_value<cudf::size_type>(offsets, strings.offset(), stream); auto const d_chars = strings.chars().data<char>() + offset; // Create tokenizer wordpiece_tokenizer tokenizer( vocab_table, max_sequence_length, stride, do_truncate, do_lower_case); // Run tokenizer auto const tokens = tokenizer.tokenize(d_chars, d_offsets, strings_count, stream); // assign output components auto device_token_ids = tokens.first->data(); auto device_offsets = tokens.second->data(); // Format output from tokenizer // Each string can create 1 or more tensor entries. // Compute the string-per-tensor offsets values by scanning // over the number of tokens for each string. rmm::device_uvector<uint32_t> offsets_per_tensor(strings_count + 1, stream); auto d_offsets_per_tensor = offsets_per_tensor.data(); thrust::transform_exclusive_scan( rmm::exec_policy(stream), thrust::make_counting_iterator<cudf::size_type>(0), thrust::make_counting_iterator<cudf::size_type>(strings_count + 1), offsets_per_tensor.begin(), [device_offsets, do_truncate, max_sequence_length, stride, strings_count] __device__( cudf::size_type idx) { uint32_t const num_tokens = idx < strings_count ? device_offsets[idx + 1] - device_offsets[idx] : 0; if (do_truncate || num_tokens <= max_sequence_length) return uint32_t{1}; return 1 + ((num_tokens - max_sequence_length + stride - 1) / stride); }, uint32_t{0}, thrust::plus<uint32_t>()); // last element is the total number of output rows uint32_t const nrows_tensor_token_ids = offsets_per_tensor.element(strings_count, stream); // if there are no tokens at all, build a specific empty result if (nrows_tensor_token_ids == 0) { return build_empty_result(strings_count, max_sequence_length, stream, mr); } // compute global_row to tensor, and global_row to within_tensor_row correspondence rmm::device_uvector<uint32_t> row2tensor(nrows_tensor_token_ids, stream); auto d_row2tensor = row2tensor.data(); rmm::device_uvector<uint32_t> row2row_within_tensor(nrows_tensor_token_ids, stream); auto d_row2row_within_tensor = row2row_within_tensor.data(); thrust::for_each_n( rmm::exec_policy(stream), thrust::make_counting_iterator<uint32_t>(0), strings_count, [d_offsets_per_tensor, d_row2tensor, d_row2row_within_tensor] __device__(auto idx) { uint32_t offset = d_offsets_per_tensor[idx]; uint32_t nrows = d_offsets_per_tensor[idx + 1] - offset; for (uint32_t jdx = 0; jdx < nrows; ++jdx) { d_row2tensor[jdx + offset] = idx; d_row2row_within_tensor[jdx + offset] = jdx; } }); // create output data columns auto tensor_token_ids = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT32}, nrows_tensor_token_ids * max_sequence_length, cudf::mask_state::UNALLOCATED, stream, mr); auto tensor_attention_mask = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT32}, nrows_tensor_token_ids * max_sequence_length, cudf::mask_state::UNALLOCATED, stream, mr); auto tensor_metadata = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT32}, nrows_tensor_token_ids * 3, cudf::mask_state::UNALLOCATED, stream, mr); // compute final-tensor, mask, and metadata constexpr int block_size = 256; cudf::detail::grid_1d const grid{ static_cast<cudf::size_type>(nrows_tensor_token_ids * max_sequence_length), block_size}; kernel_compute_tensor_metadata<<<grid.num_blocks, grid.num_threads_per_block, 0, stream.value()>>>( device_token_ids, device_offsets, d_row2tensor, d_row2row_within_tensor, max_sequence_length, nrows_tensor_token_ids, stride, do_truncate, tensor_token_ids->mutable_view().data<uint32_t>(), tensor_attention_mask->mutable_view().data<uint32_t>(), tensor_metadata->mutable_view().data<uint32_t>()); return tokenizer_result{nrows_tensor_token_ids, max_sequence_length, std::move(tensor_token_ids), std::move(tensor_attention_mask), std::move(tensor_metadata)}; } } // namespace detail tokenizer_result subword_tokenize(cudf::strings_column_view const& strings, hashed_vocabulary const& vocabulary_table, uint32_t max_sequence_length, uint32_t stride, bool do_lower_case, bool do_truncate, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::subword_tokenize(strings, vocabulary_table, max_sequence_length, stride, do_lower_case, do_truncate, cudf::get_default_stream(), mr); } } // namespace nvtext
0e402a40c94c4826c1fee0655baa8ee527b54e49.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cstdio> __global__ void helloFromGPU(void) { printf("Hello from GPU - block: %d - thread: %d. \n", blockIdx.x, threadIdx.x); } int main() { std::cout << "Hello from CPU. " << std::endl; hipLaunchKernelGGL(( helloFromGPU), dim3(2), dim3(5), 0, 0, ); //hipDeviceReset(); hipDeviceSynchronize(); return 0; }
0e402a40c94c4826c1fee0655baa8ee527b54e49.cu
#include <iostream> #include <cstdio> __global__ void helloFromGPU(void) { printf("Hello from GPU - block: %d - thread: %d. \n", blockIdx.x, threadIdx.x); } int main() { std::cout << "Hello from CPU. " << std::endl; helloFromGPU<<<2, 5>>>(); //cudaDeviceReset(); cudaDeviceSynchronize(); return 0; }
b1389e84e39efc89e957b0acdae7c1d57b8f1b55.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/core/Tensor.h> #include <ATen/hip/HIPContext.h> #include <ATen/native/Resize.h> #include <ATen/hip/Atomic.cuh> #include <ATen/hip/HIPApplyUtils.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/bincount_native.h> #include <ATen/ops/empty.h> #include <ATen/ops/histc_native.h> #include <ATen/ops/zeros.h> #endif namespace at { namespace cuda { #define RATIO_OF_GMEM_ATOMIC_ADD_TO_SMEM_ATOMIC_ADD 8 #define FOR_KERNEL_LOOP(i, lim) \ for (IndexType i = blockIdx.x * blockDim.x + threadIdx.x; i < lim; \ i += gridDim.x * blockDim.x) /* Memory types used for the 3 histogram implementations. See `CUDA_tensor_histogram` below. */ enum class CUDAHistogramMemoryType { SHARED, GLOBAL }; namespace { template <typename input_t, typename IndexType> __device__ static IndexType getBin( input_t bVal, at::acc_type<input_t, /*is_cuda=*/true> minvalue, at::acc_type<input_t, /*is_cuda=*/true> maxvalue, int64_t nbins) { IndexType bin = (int)(((bVal - minvalue)) * nbins / (maxvalue - minvalue)); // (only applicable for histc) // while each bin is inclusive at the lower end and exclusive at the higher, // i.e. [start, end) the last bin is inclusive at both, i.e. [start, end], in // order to include maxvalue if exists therefore when bin == nbins, adjust bin // to the last bin if (bin == nbins) bin -= 1; return bin; } } /* Kernel for computing the histogram of the input. */ template < typename output_t, typename input_t, typename IndexType, int ADims, int PDims, int BDims, CUDAHistogramMemoryType MemoryType, typename Op> C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize()) __global__ void kernelHistogram1D( detail::TensorInfo<output_t, IndexType> a, /* output */ detail::TensorInfo<output_t, IndexType> p, /* partial output */ detail::TensorInfo<input_t, IndexType> b, /* input */ int64_t nbins, at::acc_type<input_t, /*is_cuda=*/true> minvalue, at::acc_type<input_t, /*is_cuda=*/true> maxvalue, IndexType totalElements, Op getOp) { extern __shared__ unsigned char my_smem[]; output_t* smem = nullptr; if (MemoryType == CUDAHistogramMemoryType::SHARED) { ////////////////////////// Shared memory ////////////////////////// // atomically add to block specific shared memory // then atomically add to the global output tensor smem = reinterpret_cast<output_t*>(my_smem); for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { smem[i] = 0; } __syncthreads(); FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `smem` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); gpuAtomicAddNoReturn(&smem[bin], getOp(linearIndex)); } } __syncthreads(); // NOTE: atomically update output bin count. // Atomic update is imp since __syncthread() will only synchronize threads // in a given block, not across blocks. for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a); gpuAtomicAddNoReturn(&a.data[aOffset], smem[i]); } } else { ////////////////////////// Global memory ////////////////////////// // atomically add to the output tensor // compute histogram for the block FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `a` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(bin, a); gpuAtomicAddNoReturn(&a.data[aOffset], getOp(linearIndex)); } } } } #define HANDLE_CASE(MEMORY_TYPE, WEIGHTS_OP, SHARED_MEM) \ hipLaunchKernelGGL(( kernelHistogram1D< \ output_t, \ input_t, \ IndexType, \ 1, \ 2, \ -1, \ MEMORY_TYPE>), dim3(grid), dim3(block), SHARED_MEM, getCurrentHIPStreamMasqueradingAsCUDA(), \ aInfo, \ pInfo, \ bInfo, \ nbins, \ minvalue, \ maxvalue, \ totalElements, \ WEIGHTS_OP); \ C10_HIP_KERNEL_LAUNCH_CHECK(); #define HANDLE_SWITCH_CASE(mType, getOp) \ switch (mType) { \ case CUDAHistogramMemoryType::SHARED: \ HANDLE_CASE(CUDAHistogramMemoryType::SHARED, getOp, sharedMem); \ break; \ default: \ HANDLE_CASE(CUDAHistogramMemoryType::GLOBAL, getOp, 0); \ } /* Calculate the frequency of the input values. `a` contains the final output or the histogram. Input `b` is assumed to be 1-D non-negative int array. `c` optionally contains the weight vector. See `help torch.bincount` for details on the math. 3 implementations based of input size and memory usage: case: enough shared mem SHARED: Each block atomically adds to it's own **shared** hist copy, then atomically updates the global tensor. case: no enough shared mem GLOBAL: all threads atomically update to a single **global** hist copy. */ template <typename output_t, typename input_t, bool HasWeights> bool CUDA_tensor_histogram( at::Tensor a, /* output */ at::Tensor b, /* input */ at::Tensor c, /* weights(optional) */ int64_t nbins, at::acc_type<input_t, /*is_cuda=*/true> minvalue, at::acc_type<input_t, /*is_cuda=*/true> maxvalue, TensorArgType aType = TensorArgType::ReadWrite, TensorArgType bType = TensorArgType::ReadOnly, TensorArgType cType = TensorArgType::ReadOnly) { checkBackend("CUDA_tensor_histogram", {a, b}, Backend::CUDA); if (HasWeights) { checkBackend("CUDA_tensor_histogram", {c}, Backend::CUDA); } auto totalElements = b.numel(); if (totalElements == 0) { return false; } const dim3 block = getApplyBlock(); dim3 grid; int64_t curDevice = current_device(); if (curDevice == -1 || !getApplyGrid(totalElements, grid, curDevice)) { return false; } CUDAHistogramMemoryType memType = CUDAHistogramMemoryType::GLOBAL; auto maxSharedMem = getCurrentDeviceProperties()->sharedMemPerBlock; auto sharedMem = nbins * sizeof(output_t) + 8; // 8 guard bytes // determine memory type to use in the kernel if (sharedMem < maxSharedMem) { // Solve equations: // (1) #(smem atomicAdd per SM) = totalElements / min(grid.x, #SM) // (2) #(gmem atomicAdd) = grid.x * nbins // (3) RATIO_OF_GMEM_ATOMIC_ADD_TO_SMEM_ATOMIC_ADD = #(gmem atomicAdd) / #(smem atomicAdd per SM) unsigned optimalGrid = ceil_div<size_t>(RATIO_OF_GMEM_ATOMIC_ADD_TO_SMEM_ATOMIC_ADD * totalElements, nbins * getCurrentDeviceProperties()->multiProcessorCount); if (optimalGrid < (unsigned)getCurrentDeviceProperties()->multiProcessorCount) { optimalGrid = 1 + (unsigned)std::sqrt(RATIO_OF_GMEM_ATOMIC_ADD_TO_SMEM_ATOMIC_ADD * totalElements / nbins); } auto optimalSteps = ceil_div<size_t>(totalElements, optimalGrid * block.x); optimalGrid = ceil_div<size_t>(totalElements, optimalSteps * block.x); grid.x = ::min(grid.x, optimalGrid); memType = CUDAHistogramMemoryType::SHARED; } using IndexType = int64_t; auto aInfo = detail::getTensorInfo<output_t, IndexType>(a); auto bInfo = detail::getTensorInfo<input_t, IndexType>(b); detail::TensorInfo<output_t, IndexType> pInfo(nullptr, 0, {}, {}); if (HasWeights) { auto cInfo = detail::getTensorInfo<output_t, IndexType>(c); const auto getWeightsOp = [cInfo] __device__(IndexType cIndex) { const IndexType cOffset = detail::IndexToOffset<output_t, IndexType, 1>::get(cIndex, cInfo); return cInfo.data[cOffset]; }; HANDLE_SWITCH_CASE(memType, getWeightsOp) } else { static const auto getDummyOp = [] __device__(IndexType) { return 1L; }; HANDLE_SWITCH_CASE(memType, getDummyOp) } return true; } #undef HANDLE_CASE #undef HANDLE_SWITCH_CASE #undef FOR_KERNEL_LOOP #undef RATIO_OF_GMEM_ATOMIC_ADD_TO_SMEM_ATOMIC_ADD } // namespace cuda namespace { ///////////////// bincount ///////////////// template <typename input_t, typename weights_t> Tensor _bincount_cuda_template( const Tensor& self, const Tensor& weights, int64_t minlength) { if (minlength < 0) { AT_ERROR("minlength should be >= 0"); } if (self.dim() == 1 && self.numel() == 0) { return at::zeros( {minlength}, kLong, c10::nullopt /* layout */, kCUDA, c10::nullopt /* pin_memory */); } if (self.dim() != 1 || (!std::is_same<input_t, uint8_t>::value && *self.min().cpu().data_ptr<input_t>() < 0)) { AT_ERROR("bincount only supports 1-d non-negative integral inputs."); } bool has_weights = weights.defined(); if (has_weights && (weights.dim() != 1 || weights.size(0) != self.size(0))) { AT_ERROR("weights should be 1-d and have the same length as input"); } const int64_t nbins = ::max(self.max().item<input_t>() + (int64_t)1, minlength); // we are using acc_type for the bounds, in particular int64_t for integers // in order to avoid overflows (e.g. using 256 bins for dtype uint8) using bounds_t = at::acc_type<input_t, /*is_cuda=*/true>; const bounds_t minvalue = 0; const bounds_t maxvalue = nbins; // alloc output counter on GPU Tensor output; if (has_weights) { output = at::zeros( {nbins}, optTypeMetaToScalarType(weights.options().dtype_opt()), weights.options().layout_opt(), weights.options().device_opt(), weights.options().pinned_memory_opt()); cuda::CUDA_tensor_histogram<weights_t, input_t, true>( output, self, weights, nbins, minvalue, maxvalue); } else { output = at::zeros( {nbins}, kLong, c10::nullopt /* layout */, DeviceType::CUDA, c10::nullopt /* pin_memory */); cuda::CUDA_tensor_histogram<int64_t, input_t, false>( output, self, weights, nbins, minvalue, maxvalue); } return output; } ///////////////// histc ///////////////// template <typename input_t> Tensor _histc_cuda_template( const Tensor& self, int64_t nbins, at::acc_type<input_t, /*is_cuda=*/true> min, at::acc_type<input_t, /*is_cuda=*/true> max) { if (nbins <= 0) { AT_ERROR("bins must be > 0"); } Tensor output = at::zeros( {nbins}, self.scalar_type(), c10::nullopt /* layout */, DeviceType::CUDA, c10::nullopt /* pin_memory */); input_t minvalue = min; input_t maxvalue = max; if (min == max && self.numel() > 0) { minvalue = *self.min().cpu().data_ptr<input_t>(); maxvalue = *self.max().cpu().data_ptr<input_t>(); } if (minvalue == maxvalue) { minvalue = minvalue - 1; maxvalue = maxvalue + 1; } #if !defined(USE_ROCM) TORCH_CHECK( !(at::_isinf(minvalue) || at::_isinf(maxvalue) || at::_isnan(minvalue) || at::_isnan(maxvalue)), "range of [", minvalue, ", ", maxvalue, "] is not finite"); #else TORCH_CHECK( !(std::isinf(minvalue) || std::isinf(maxvalue) || std::isnan(minvalue) || std::isnan(maxvalue)), "range of [", minvalue, ", ", maxvalue, "] is not finite"); #endif TORCH_CHECK(minvalue < maxvalue, "max must be larger than min"); cuda::CUDA_tensor_histogram<input_t, input_t, false>( output, self, Tensor(), nbins, minvalue, maxvalue); return output; } } // namespace namespace native { Tensor _bincount_cuda( const Tensor& self, const c10::optional<Tensor>& weights_opt, int64_t minlength) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weights_maybe_owned = at::borrow_from_optional_tensor(weights_opt); const Tensor& weights = *weights_maybe_owned; // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("_bincount_cuda"); return AT_DISPATCH_INTEGRAL_TYPES(self.scalar_type(), "bincount_cuda", [&] { const auto scalar = weights.scalar_type(); if (scalar == ScalarType::Undefined || scalar == ScalarType::Float) return _bincount_cuda_template<scalar_t, float>(self, weights, minlength); return _bincount_cuda_template<scalar_t, double>( self, weights.to(kDouble), minlength); }); } Tensor _histc_cuda( const Tensor& self, int64_t nbins, const Scalar& min, const Scalar& max) { if (self.scalar_type() == ScalarType::Half) { AT_ERROR("HalfTensor is not supported"); } // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("_histc_cuda"); return AT_DISPATCH_ALL_TYPES(self.scalar_type(), "histc", [&] { using bounds_t = at::acc_type<scalar_t, /*is_cuda=*/true>; return _histc_cuda_template<scalar_t>( self, nbins, min.to<bounds_t>(), max.to<bounds_t>()); }); } Tensor& _histc_out_cuda(const Tensor& self, int64_t bins, const Scalar& min, const Scalar& max, Tensor& result) { auto ret = _histc_cuda(self, bins, min, max); resize_output(result, ret.sizes()); result.copy_(ret); return result; } } // namespace native } // namespace at
b1389e84e39efc89e957b0acdae7c1d57b8f1b55.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/core/Tensor.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/native/Resize.h> #include <ATen/cuda/Atomic.cuh> #include <ATen/cuda/CUDAApplyUtils.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/bincount_native.h> #include <ATen/ops/empty.h> #include <ATen/ops/histc_native.h> #include <ATen/ops/zeros.h> #endif namespace at { namespace cuda { #define RATIO_OF_GMEM_ATOMIC_ADD_TO_SMEM_ATOMIC_ADD 8 #define FOR_KERNEL_LOOP(i, lim) \ for (IndexType i = blockIdx.x * blockDim.x + threadIdx.x; i < lim; \ i += gridDim.x * blockDim.x) /* Memory types used for the 3 histogram implementations. See `CUDA_tensor_histogram` below. */ enum class CUDAHistogramMemoryType { SHARED, GLOBAL }; namespace { template <typename input_t, typename IndexType> __device__ static IndexType getBin( input_t bVal, at::acc_type<input_t, /*is_cuda=*/true> minvalue, at::acc_type<input_t, /*is_cuda=*/true> maxvalue, int64_t nbins) { IndexType bin = (int)(((bVal - minvalue)) * nbins / (maxvalue - minvalue)); // (only applicable for histc) // while each bin is inclusive at the lower end and exclusive at the higher, // i.e. [start, end) the last bin is inclusive at both, i.e. [start, end], in // order to include maxvalue if exists therefore when bin == nbins, adjust bin // to the last bin if (bin == nbins) bin -= 1; return bin; } } /* Kernel for computing the histogram of the input. */ template < typename output_t, typename input_t, typename IndexType, int ADims, int PDims, int BDims, CUDAHistogramMemoryType MemoryType, typename Op> C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize()) __global__ void kernelHistogram1D( detail::TensorInfo<output_t, IndexType> a, /* output */ detail::TensorInfo<output_t, IndexType> p, /* partial output */ detail::TensorInfo<input_t, IndexType> b, /* input */ int64_t nbins, at::acc_type<input_t, /*is_cuda=*/true> minvalue, at::acc_type<input_t, /*is_cuda=*/true> maxvalue, IndexType totalElements, Op getOp) { extern __shared__ unsigned char my_smem[]; output_t* smem = nullptr; if (MemoryType == CUDAHistogramMemoryType::SHARED) { ////////////////////////// Shared memory ////////////////////////// // atomically add to block specific shared memory // then atomically add to the global output tensor smem = reinterpret_cast<output_t*>(my_smem); for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { smem[i] = 0; } __syncthreads(); FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `smem` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); gpuAtomicAddNoReturn(&smem[bin], getOp(linearIndex)); } } __syncthreads(); // NOTE: atomically update output bin count. // Atomic update is imp since __syncthread() will only synchronize threads // in a given block, not across blocks. for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a); gpuAtomicAddNoReturn(&a.data[aOffset], smem[i]); } } else { ////////////////////////// Global memory ////////////////////////// // atomically add to the output tensor // compute histogram for the block FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `a` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(bin, a); gpuAtomicAddNoReturn(&a.data[aOffset], getOp(linearIndex)); } } } } #define HANDLE_CASE(MEMORY_TYPE, WEIGHTS_OP, SHARED_MEM) \ kernelHistogram1D< \ output_t, \ input_t, \ IndexType, \ 1, \ 2, \ -1, \ MEMORY_TYPE><<<grid, block, SHARED_MEM, getCurrentCUDAStream()>>>( \ aInfo, \ pInfo, \ bInfo, \ nbins, \ minvalue, \ maxvalue, \ totalElements, \ WEIGHTS_OP); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); #define HANDLE_SWITCH_CASE(mType, getOp) \ switch (mType) { \ case CUDAHistogramMemoryType::SHARED: \ HANDLE_CASE(CUDAHistogramMemoryType::SHARED, getOp, sharedMem); \ break; \ default: \ HANDLE_CASE(CUDAHistogramMemoryType::GLOBAL, getOp, 0); \ } /* Calculate the frequency of the input values. `a` contains the final output or the histogram. Input `b` is assumed to be 1-D non-negative int array. `c` optionally contains the weight vector. See `help torch.bincount` for details on the math. 3 implementations based of input size and memory usage: case: enough shared mem SHARED: Each block atomically adds to it's own **shared** hist copy, then atomically updates the global tensor. case: no enough shared mem GLOBAL: all threads atomically update to a single **global** hist copy. */ template <typename output_t, typename input_t, bool HasWeights> bool CUDA_tensor_histogram( at::Tensor a, /* output */ at::Tensor b, /* input */ at::Tensor c, /* weights(optional) */ int64_t nbins, at::acc_type<input_t, /*is_cuda=*/true> minvalue, at::acc_type<input_t, /*is_cuda=*/true> maxvalue, TensorArgType aType = TensorArgType::ReadWrite, TensorArgType bType = TensorArgType::ReadOnly, TensorArgType cType = TensorArgType::ReadOnly) { checkBackend("CUDA_tensor_histogram", {a, b}, Backend::CUDA); if (HasWeights) { checkBackend("CUDA_tensor_histogram", {c}, Backend::CUDA); } auto totalElements = b.numel(); if (totalElements == 0) { return false; } const dim3 block = getApplyBlock(); dim3 grid; int64_t curDevice = current_device(); if (curDevice == -1 || !getApplyGrid(totalElements, grid, curDevice)) { return false; } CUDAHistogramMemoryType memType = CUDAHistogramMemoryType::GLOBAL; auto maxSharedMem = getCurrentDeviceProperties()->sharedMemPerBlock; auto sharedMem = nbins * sizeof(output_t) + 8; // 8 guard bytes // determine memory type to use in the kernel if (sharedMem < maxSharedMem) { // Solve equations: // (1) #(smem atomicAdd per SM) = totalElements / min(grid.x, #SM) // (2) #(gmem atomicAdd) = grid.x * nbins // (3) RATIO_OF_GMEM_ATOMIC_ADD_TO_SMEM_ATOMIC_ADD = #(gmem atomicAdd) / #(smem atomicAdd per SM) unsigned optimalGrid = ceil_div<size_t>(RATIO_OF_GMEM_ATOMIC_ADD_TO_SMEM_ATOMIC_ADD * totalElements, nbins * getCurrentDeviceProperties()->multiProcessorCount); if (optimalGrid < (unsigned)getCurrentDeviceProperties()->multiProcessorCount) { optimalGrid = 1 + (unsigned)std::sqrt(RATIO_OF_GMEM_ATOMIC_ADD_TO_SMEM_ATOMIC_ADD * totalElements / nbins); } auto optimalSteps = ceil_div<size_t>(totalElements, optimalGrid * block.x); optimalGrid = ceil_div<size_t>(totalElements, optimalSteps * block.x); grid.x = std::min(grid.x, optimalGrid); memType = CUDAHistogramMemoryType::SHARED; } using IndexType = int64_t; auto aInfo = detail::getTensorInfo<output_t, IndexType>(a); auto bInfo = detail::getTensorInfo<input_t, IndexType>(b); detail::TensorInfo<output_t, IndexType> pInfo(nullptr, 0, {}, {}); if (HasWeights) { auto cInfo = detail::getTensorInfo<output_t, IndexType>(c); const auto getWeightsOp = [cInfo] __device__(IndexType cIndex) { const IndexType cOffset = detail::IndexToOffset<output_t, IndexType, 1>::get(cIndex, cInfo); return cInfo.data[cOffset]; }; HANDLE_SWITCH_CASE(memType, getWeightsOp) } else { static const auto getDummyOp = [] __device__(IndexType) { return 1L; }; HANDLE_SWITCH_CASE(memType, getDummyOp) } return true; } #undef HANDLE_CASE #undef HANDLE_SWITCH_CASE #undef FOR_KERNEL_LOOP #undef RATIO_OF_GMEM_ATOMIC_ADD_TO_SMEM_ATOMIC_ADD } // namespace cuda namespace { ///////////////// bincount ///////////////// template <typename input_t, typename weights_t> Tensor _bincount_cuda_template( const Tensor& self, const Tensor& weights, int64_t minlength) { if (minlength < 0) { AT_ERROR("minlength should be >= 0"); } if (self.dim() == 1 && self.numel() == 0) { return at::zeros( {minlength}, kLong, c10::nullopt /* layout */, kCUDA, c10::nullopt /* pin_memory */); } if (self.dim() != 1 || (!std::is_same<input_t, uint8_t>::value && *self.min().cpu().data_ptr<input_t>() < 0)) { AT_ERROR("bincount only supports 1-d non-negative integral inputs."); } bool has_weights = weights.defined(); if (has_weights && (weights.dim() != 1 || weights.size(0) != self.size(0))) { AT_ERROR("weights should be 1-d and have the same length as input"); } const int64_t nbins = std::max(self.max().item<input_t>() + (int64_t)1, minlength); // we are using acc_type for the bounds, in particular int64_t for integers // in order to avoid overflows (e.g. using 256 bins for dtype uint8) using bounds_t = at::acc_type<input_t, /*is_cuda=*/true>; const bounds_t minvalue = 0; const bounds_t maxvalue = nbins; // alloc output counter on GPU Tensor output; if (has_weights) { output = at::zeros( {nbins}, optTypeMetaToScalarType(weights.options().dtype_opt()), weights.options().layout_opt(), weights.options().device_opt(), weights.options().pinned_memory_opt()); cuda::CUDA_tensor_histogram<weights_t, input_t, true>( output, self, weights, nbins, minvalue, maxvalue); } else { output = at::zeros( {nbins}, kLong, c10::nullopt /* layout */, DeviceType::CUDA, c10::nullopt /* pin_memory */); cuda::CUDA_tensor_histogram<int64_t, input_t, false>( output, self, weights, nbins, minvalue, maxvalue); } return output; } ///////////////// histc ///////////////// template <typename input_t> Tensor _histc_cuda_template( const Tensor& self, int64_t nbins, at::acc_type<input_t, /*is_cuda=*/true> min, at::acc_type<input_t, /*is_cuda=*/true> max) { if (nbins <= 0) { AT_ERROR("bins must be > 0"); } Tensor output = at::zeros( {nbins}, self.scalar_type(), c10::nullopt /* layout */, DeviceType::CUDA, c10::nullopt /* pin_memory */); input_t minvalue = min; input_t maxvalue = max; if (min == max && self.numel() > 0) { minvalue = *self.min().cpu().data_ptr<input_t>(); maxvalue = *self.max().cpu().data_ptr<input_t>(); } if (minvalue == maxvalue) { minvalue = minvalue - 1; maxvalue = maxvalue + 1; } #if !defined(USE_ROCM) TORCH_CHECK( !(at::_isinf(minvalue) || at::_isinf(maxvalue) || at::_isnan(minvalue) || at::_isnan(maxvalue)), "range of [", minvalue, ", ", maxvalue, "] is not finite"); #else TORCH_CHECK( !(std::isinf(minvalue) || std::isinf(maxvalue) || std::isnan(minvalue) || std::isnan(maxvalue)), "range of [", minvalue, ", ", maxvalue, "] is not finite"); #endif TORCH_CHECK(minvalue < maxvalue, "max must be larger than min"); cuda::CUDA_tensor_histogram<input_t, input_t, false>( output, self, Tensor(), nbins, minvalue, maxvalue); return output; } } // namespace namespace native { Tensor _bincount_cuda( const Tensor& self, const c10::optional<Tensor>& weights_opt, int64_t minlength) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weights_maybe_owned = at::borrow_from_optional_tensor(weights_opt); const Tensor& weights = *weights_maybe_owned; // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("_bincount_cuda"); return AT_DISPATCH_INTEGRAL_TYPES(self.scalar_type(), "bincount_cuda", [&] { const auto scalar = weights.scalar_type(); if (scalar == ScalarType::Undefined || scalar == ScalarType::Float) return _bincount_cuda_template<scalar_t, float>(self, weights, minlength); return _bincount_cuda_template<scalar_t, double>( self, weights.to(kDouble), minlength); }); } Tensor _histc_cuda( const Tensor& self, int64_t nbins, const Scalar& min, const Scalar& max) { if (self.scalar_type() == ScalarType::Half) { AT_ERROR("HalfTensor is not supported"); } // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("_histc_cuda"); return AT_DISPATCH_ALL_TYPES(self.scalar_type(), "histc", [&] { using bounds_t = at::acc_type<scalar_t, /*is_cuda=*/true>; return _histc_cuda_template<scalar_t>( self, nbins, min.to<bounds_t>(), max.to<bounds_t>()); }); } Tensor& _histc_out_cuda(const Tensor& self, int64_t bins, const Scalar& min, const Scalar& max, Tensor& result) { auto ret = _histc_cuda(self, bins, min, max); resize_output(result, ret.sizes()); result.copy_(ret); return result; } } // namespace native } // namespace at
07f06996dbda1a177ac98b9b98e93af6ccd6884c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //------------------------------------------------------------------------------ // Solver.cu //------------------------------------------------------------------------------ #include "Solver.h" //------------------------------------------------------------------------------ //============================================================================== // CUDA DEVICE code starts here //============================================================================== //------------------------------------------------------------------------------ __constant__ SolverConfiguration gConfiguration; // current solver's config. //------------------------------------------------------------------------------ //============================================================================== // UTLITY device kernels definition //============================================================================== //------------------------------------------------------------------------------ __device__ void computeCoordinatesOff ( int3& coordinate, // out: coordinate for [position] float3 position, const Grid& grid, float offset ) { // compute the coordinates of a point in space with respect to the given // grid coordinate.x = (int)((position.x + offset - grid.Origin.x)/grid.Spacing); coordinate.y = (int)((position.y + offset - grid.Origin.y)/grid.Spacing); coordinate.z = (int)((position.z + offset - grid.Origin.z)/grid.Spacing); // clamp coordinates if neccessary coordinate.x = max(0, min(coordinate.x, grid.Dimensions.x - 1)); coordinate.y = max(0, min(coordinate.y, grid.Dimensions.y - 1)); coordinate.z = max(0, min(coordinate.z, grid.Dimensions.z - 1)); } //------------------------------------------------------------------------------ __device__ void computeCoordinates ( int3& coordinate, // out: coordinate for [position] float3 position, const Grid& grid ) { // compute the coordinates of a point in space with respect to the given // grid coordinate.x = (int)((position.x - grid.Origin.x)/grid.Spacing); coordinate.y = (int)((position.y - grid.Origin.y)/grid.Spacing); coordinate.z = (int)((position.z - grid.Origin.z)/grid.Spacing); // clamp coordinates if neccessary coordinate.x = max(0, min(coordinate.x, grid.Dimensions.x - 1)); coordinate.y = max(0, min(coordinate.y, grid.Dimensions.y - 1)); coordinate.z = max(0, min(coordinate.z, grid.Dimensions.z - 1)); } //------------------------------------------------------------------------------ __device__ void computeHash ( unsigned int& hash, const int3& coordinate, const Grid& grid ) { // compute the hash for a grid given a coordinate within the grid hash = coordinate.x + grid.Dimensions.x* (coordinate.y + grid.Dimensions.y*coordinate.z); } //------------------------------------------------------------------------------ __device__ void computeHash ( unsigned int& hash, float3 position, const Grid& grid ) { // compute the hash for a grid given a position in world space, by first // conputing the coordinate in [grid], and then computing the hash. int3 coordinate; computeCoordinates(coordinate, position, grid); computeHash(hash, coordinate, grid); } //------------------------------------------------------------------------------ __device__ inline void computeNorm (float& norm, const float3& a) { norm = sqrt(a.x*a.x + a.y*a.y + a.z*a.z); } //------------------------------------------------------------------------------ __device__ inline void computeDistance ( float& dist, const float3& a, const float3& b ) { float3 d; d.x = a.x - b.x; d.y = a.y - b.y; d.z = a.z - b.z; computeNorm(dist, d); } //------------------------------------------------------------------------------ __device__ inline void evaluatePoly6Kernel ( float& res, // [out] result of evaluation float d, // distance between two particles float h // effective radius ) { // evaluate Muellers Poly6 Kernel float hhh = h*h*h; float coeff = 315.0f/(64.0f*M_PI*hhh*hhh*hhh); if (d < h) { float a = h*h - d*d; res = coeff*a*a*a; } else { res = 0.0f; } } //------------------------------------------------------------------------------ __device__ inline void evaluateSpikyKernelGradient ( float3& grad, const float3& xij, float h ) { float norm = 0.0f; computeNorm(norm, xij); if ((norm == 0.0f) || (norm > h)) { grad.x = 0.0f; grad.y = 0.0f; grad.z = 0.0f; return; } float hhh = h*h*h; float a = -45.0f/(M_PI*hhh*hhh)*(h - norm)*(h - norm); grad.x = a*xij.x/norm; grad.y = a*xij.y/norm; grad.z = a*xij.z/norm; } //------------------------------------------------------------------------------ __device__ inline void evaluateViscosityKernelLaplacian ( float& lapl, float dist, float h ) { if (dist < h) { float hhh = h*h*h; float coeff = 45.0f/(M_PI*hhh*hhh); lapl = coeff*(h - dist); return; } else { return; } } //------------------------------------------------------------------------------ __device__ inline void evaluateBoundaryWeight ( float& weight, float dist, float h ) { float q = 2.0f*dist/h; float coeff = 0.02f*gConfiguration.SpeedSound*gConfiguration.SpeedSound/ dist; if (q < 2.0f/3.0f) { weight = coeff*2.0f/3.0f; } else if (q < 1.0f) { weight = coeff*(2.0f*q - 3.0f/2.0f*q*q); } else if (q < 2.0f) { float a = 2.0f - q; weight = coeff*0.5f*a*a; } else { weight = 0.0f; } } //------------------------------------------------------------------------------ __device__ inline void computeDensityCell ( float& rhoi, // [out] density of particle i const float3& xi, // position of particle i const float* dPositions, unsigned int start, unsigned int end ) { // add up density contribution form particle in this cell ([start], [end]) // to the density of the particle i [rhoi]. (in fact only the kernel // weights are added up, mass is multiplied in the callee, to safe // operations) for (unsigned int j = start; j < end; j++) { float3 xj; xj.x = dPositions[3*j + 0]; xj.y = dPositions[3*j + 1]; xj.z = dPositions[3*j + 2]; float dist; computeDistance(dist, xi, xj); if (dist < gConfiguration.EffectiveRadius) { float weight = 0.0f; evaluatePoly6Kernel(weight, dist, gConfiguration.EffectiveRadius); rhoi += weight; } } } //------------------------------------------------------------------------------ __device__ inline void computeAccelerationCell ( float3& fi, float rhoi, float pi, const float3& xi, const float3& vi, const float* dDensities, const float* dPressures, const float* dPositions, const float* dVelocities, unsigned int start, unsigned int end ) { for (unsigned int j = start; j < end; j++) { float3 xj; xj.x = dPositions[3*j + 0]; xj.y = dPositions[3*j + 1]; xj.z = dPositions[3*j + 2]; float3 vj; vj.x = dVelocities[3*j + 0]; vj.y = dVelocities[3*j + 1]; vj.z = dVelocities[3*j + 2]; float rhoj = dDensities[j]; float pj = dPressures[j]; float dist; float3 xij; xij.x = xi.x - xj.x; xij.y = xi.y - xj.y; xij.z = xi.z - xj.z; computeNorm(dist, xij); if (dist != 0.0f && dist < gConfiguration.EffectiveRadius) { // evaluate the pressure force partice j exerts on particle i float coeffP = -rhoi*gConfiguration.FluidParticleMass* (pi/(rhoi*rhoi) + pj/(rhoj*rhoj)); float3 grad; evaluateSpikyKernelGradient( grad, xij, gConfiguration.EffectiveRadius ); fi.x += coeffP*grad.x; fi.y += coeffP*grad.y; fi.z += coeffP*grad.z; // evaluate the viscosity force partice j exerts on particle i float coeffV = gConfiguration.Viscosity* gConfiguration.FluidParticleMass/rhoj; float lapl = 0.0f; evaluateViscosityKernelLaplacian( lapl, dist, gConfiguration.EffectiveRadius ); float3 vji; vji.x = vj.x - vi.x; vji.y = vj.y - vi.y; vji.z = vj.z - vi.z; fi.x += coeffV*vji.x*lapl; fi.y += coeffV*vji.y*lapl; fi.z += coeffV*vji.z*lapl; // evaluate the surface tension force partice j exerts on particle i float weight; evaluatePoly6Kernel(weight, dist, gConfiguration.EffectiveRadius); float coeffT = -weight*gConfiguration.FluidParticleMass* gConfiguration.TensionCoefficient; fi.x += coeffT*xij.x; fi.y += coeffT*xij.y; fi.z += coeffT*xij.z; } } } //------------------------------------------------------------------------------ __device__ void computeBoundaryForceCell ( float3& bi, const float3& xi, const float* dPositions, unsigned int start, unsigned int end ) { for (unsigned int j = start; j < end; j++) { float3 xj; xj.x = dPositions[3*j + 0]; xj.y = dPositions[3*j + 1]; xj.z = dPositions[3*j + 2]; float3 xij; xij.x = xi.x - xj.x; xij.y = xi.y - xj.y; xij.z = xi.z - xj.z; float dist; computeNorm(dist, xij); if (dist < gConfiguration.EffectiveRadius) { float weight = 0.0f; evaluateBoundaryWeight(weight, dist, gConfiguration.EffectiveRadius); weight*= gConfiguration.BoundaryParticleMass/ (gConfiguration.FluidParticleMass + gConfiguration.BoundaryParticleMass); bi.x += weight*xij.x/dist; bi.y += weight*xij.y/dist; bi.z += weight*xij.z/dist; } } } //------------------------------------------------------------------------------ __device__ void computeVelXSPHCell( float3& velXSPH, const float3& xi, const float3& vi, const float* dPositions, const float* dVelocities, const float* dAccelerations, const float* dDensities, unsigned int start, unsigned int end, float dt ) { for (unsigned int j = start; j < end; j++) { float3 xj; xj.x = dPositions[3*j + 0]; xj.y = dPositions[3*j + 1]; xj.z = dPositions[3*j + 2]; float3 aj; aj.x = dAccelerations[3*j + 0]; aj.y = dAccelerations[3*j + 1]; aj.z = dAccelerations[3*j + 2]; float3 vj; vj.x = dVelocities[3*j + 0] + dt*aj.x; vj.y = dVelocities[3*j + 1] + dt*aj.y; vj.z = dVelocities[3*j + 2] + dt*aj.z; float rhoj = dDensities[j]; float dist; float3 xij; xij.x = xi.x - xj.x; xij.y = xi.y - xj.y; xij.z = xi.z - xj.z; computeNorm(dist, xij); if ( dist < gConfiguration.EffectiveRadius) { float3 vji; vji.x = vj.x - vi.x; vji.y = vj.y - vi.y; vji.z = vj.z - vi.z; float weight; evaluatePoly6Kernel(weight, dist, gConfiguration.EffectiveRadius); weight *= (gConfiguration.FluidParticleMass/rhoj); velXSPH.x += vji.x*weight; velXSPH.y += vji.y*weight; velXSPH.z += vji.z*weight; } } } //============================================================================== // GLOBAL device kernel definitions //============================================================================== //------------------------------------------------------------------------------ __global__ void computeHashs ( unsigned int* dHashs, // hash values of each particle unsigned int* dActiveIDs, // array of active particle ids const float* dPositions, // positions of each particle unsigned int numParticles // number of ids in the id array ) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numParticles) { return; } dActiveIDs[idx] = idx; float3 pos; pos.x = dPositions[3*idx + 0]; pos.y = dPositions[3*idx + 1]; pos.z = dPositions[3*idx + 2]; computeHash(dHashs[idx], pos, gConfiguration.Grid); }; //------------------------------------------------------------------------------ __global__ void reorderComputeCellStartEndBoundaryD ( unsigned int* dCellStart, unsigned int* dCellEnd, float* dTempPositions, const float* dPositions, const unsigned int* dSortedIDs, const unsigned int* dHashs, unsigned int numParticles ) { extern __shared__ int sharedHash[]; unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numParticles) { return; } // reorder unsigned int id = dSortedIDs[idx]; dTempPositions[3*idx + 0] = dPositions[3*id + 0]; dTempPositions[3*idx + 1] = dPositions[3*id + 1]; dTempPositions[3*idx + 2] = dPositions[3*id + 2]; // compute cell start end int hash = dHashs[idx]; sharedHash[threadIdx.x + 1] = hash; if (idx > 0 && threadIdx.x == 0) { sharedHash[0] = dHashs[idx - 1]; } __syncthreads(); if (idx == 0 || hash != sharedHash[threadIdx.x]) { dCellStart[hash] = idx; if (idx > 0) { dCellEnd[sharedHash[threadIdx.x]] = idx; } } if (idx == numParticles - 1) { dCellEnd[hash] = idx + 1; } } //------------------------------------------------------------------------------ __global__ void reorderAndComputeCellStartEndD ( unsigned int* dCellStart, unsigned int* dCellEnd, float* dTempPositions, float* dTempVelocities, unsigned int* dSortedIDs, const float* dPositions, const float* dVelocities, const unsigned int* dHashs, unsigned int numParticles ) { extern __shared__ int sharedHash[]; unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numParticles) { return; } // reorder unsigned int id = dSortedIDs[idx]; dTempPositions[3*idx + 0] = dPositions[3*id + 0]; dTempPositions[3*idx + 1] = dPositions[3*id + 1]; dTempPositions[3*idx + 2] = dPositions[3*id + 2]; dTempVelocities[3*idx + 0] = dVelocities[3*id + 0]; dTempVelocities[3*idx + 1] = dVelocities[3*id + 1]; dTempVelocities[3*idx + 2] = dVelocities[3*id + 2]; // compute cell start end ids int hash = dHashs[idx]; sharedHash[threadIdx.x + 1] = hash; if (idx > 0 && threadIdx.x == 0) { sharedHash[0] = dHashs[idx - 1]; } __syncthreads(); if (idx == 0 || hash != sharedHash[threadIdx.x]) { dCellStart[hash] = idx; if (idx > 0) { dCellEnd[sharedHash[threadIdx.x]] = idx; } } if (idx == numParticles - 1) { dCellEnd[hash] = idx + 1; } } //------------------------------------------------------------------------------ __global__ void computeDensitiesPressuresD ( float* dDensities, // [out] computed densities float* dPressures, const float* dPositions, const unsigned int* dCellStart, const unsigned int* dCellEnd, unsigned int numParticles ) { unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx >= numParticles) { return; } float3 xi; xi.x = dPositions[3*idx + 0]; xi.y = dPositions[3*idx + 1]; xi.z = dPositions[3*idx + 2]; float rhoi = 0.0f; int3 cs, ce; computeCoordinatesOff( cs, xi, gConfiguration.Grid, -gConfiguration.EffectiveRadius ); computeCoordinatesOff( ce, xi, gConfiguration.Grid, gConfiguration.EffectiveRadius ); int3 cc; for (cc.z = cs.z; cc.z <= ce.z; cc.z++) { for (cc.y = cs.y; cc.y <= ce.y; cc.y++) { for (cc.x = cs.x; cc.x <= ce.x; cc.x++) { unsigned int hash; computeHash(hash, cc, gConfiguration.Grid); unsigned int start = dCellStart[hash]; unsigned int end = dCellEnd[hash]; computeDensityCell( rhoi, xi, dPositions, start, end ); } } } rhoi *= gConfiguration.FluidParticleMass; dDensities[idx] = rhoi; dPressures[idx] = gConfiguration.BulkModulus* (rhoi - gConfiguration.RestDensity); } //------------------------------------------------------------------------------ __global__ void computeAccelerationsD ( float* dAccelerations, const float* dDensities, const float* dPressures, const float* dPositions, const float* dVelocities, const unsigned int* dCellStart, const unsigned int* dCellEnd, const float* dBoundaryPositions, const unsigned int* dBoundaryCellStart, const unsigned int* dBoundaryCellEnd, unsigned int numParticles ) { unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx >= numParticles) { return; } float3 xi; xi.x = dPositions[3*idx + 0]; xi.y = dPositions[3*idx + 1]; xi.z = dPositions[3*idx + 2]; float3 vi; vi.x = dVelocities[3*idx + 0]; vi.y = dVelocities[3*idx + 1]; vi.z = dVelocities[3*idx + 2]; float rhoi = dDensities[idx]; float pi = dPressures[idx]; int3 cs, ce; computeCoordinatesOff( cs, xi, gConfiguration.Grid, -gConfiguration.EffectiveRadius ); computeCoordinatesOff( ce, xi, gConfiguration.Grid, gConfiguration.EffectiveRadius ); float3 fi; fi.x = 0.0; fi.y = 0.0; fi.z = 0.0; float3 bi; bi.x = 0.0f; bi.y = 0.0f; bi.z = 0.0f; int3 cc; for (cc.z = cs.z; cc.z <= ce.z; cc.z++) { for (cc.y = cs.y; cc.y <= ce.y; cc.y++) { for (cc.x = cs.x; cc.x <= ce.x; cc.x++) { unsigned int hash; computeHash(hash, cc, gConfiguration.Grid); unsigned int start = dCellStart[hash]; unsigned int end = dCellEnd[hash]; computeAccelerationCell( fi, rhoi, pi, xi, vi, dDensities, dPressures, dPositions, dVelocities, start, end ); } } } for (cc.z = cs.z; cc.z <= ce.z; cc.z++) { for (cc.y = cs.y; cc.y <= ce.y; cc.y++) { for (cc.x = cs.x; cc.x <= ce.x; cc.x++) { unsigned int hash; computeHash(hash, cc, gConfiguration.Grid); unsigned int start = dBoundaryCellStart[hash]; unsigned int end = dBoundaryCellEnd[hash]; computeBoundaryForceCell( bi, xi, dBoundaryPositions, start, end ); } } } dAccelerations[3*idx + 0] = fi.x/rhoi + bi.x; dAccelerations[3*idx + 1] = fi.y/rhoi - 9.81f + bi.y; dAccelerations[3*idx + 2] = fi.z/rhoi + bi.z; } //------------------------------------------------------------------------------ __global__ void integrateD ( float* dPositions, float* dVelocities, const float* dAccelerations, const float* dTempPositions, const float* dTempVelocities, float timeStep, unsigned int numParticles ) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numParticles) { return; } float3 xi; xi.x = dTempPositions[3*idx + 0]; xi.y = dTempPositions[3*idx + 1]; xi.z = dTempPositions[3*idx + 2]; float3 vi; vi.x = dTempVelocities[3*idx + 0]; vi.y = dTempVelocities[3*idx + 1]; vi.z = dTempVelocities[3*idx + 2]; // update position and velocity of the particle vi.x += timeStep*dAccelerations[3*idx + 0]; vi.y += timeStep*dAccelerations[3*idx + 1]; vi.z += timeStep*dAccelerations[3*idx + 2]; xi.x += timeStep*vi.x; xi.y += timeStep*vi.y; xi.z += timeStep*vi.z; // store new position and velocity of the particle dPositions[3*idx + 0] = xi.x; dPositions[3*idx + 1] = xi.y; dPositions[3*idx + 2] = xi.z; dVelocities[3*idx + 0] = vi.x; dVelocities[3*idx + 1] = vi.y; dVelocities[3*idx + 2] = vi.z; } //------------------------------------------------------------------------------ __global__ void integrateXSPHD ( float* dPositions, float* dVelocities, const float* dTempPositions, const float* dTempVelocities, const float* dAccelerations, const float* dDensities, const unsigned int* dCellStart, const unsigned int* dCellEnd, float timeStep, unsigned int numParticles ) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numParticles) { return; } float3 xi; xi.x = dTempPositions[3*idx + 0]; xi.y = dTempPositions[3*idx + 1]; xi.z = dTempPositions[3*idx + 2]; float3 vi; vi.x = dTempVelocities[3*idx + 0]; vi.y = dTempVelocities[3*idx + 1]; vi.z = dTempVelocities[3*idx + 2]; vi.x += timeStep*dAccelerations[3*idx + 0]; vi.y += timeStep*dAccelerations[3*idx + 1]; vi.z += timeStep*dAccelerations[3*idx + 2]; float3 velXSPH; velXSPH.x = 0.0f; velXSPH.y = 0.0f; velXSPH.z = 0.0f; // compute XPSH velocity int3 cs, ce, cc; computeCoordinatesOff( cs, xi, gConfiguration.Grid, -gConfiguration.EffectiveRadius ); computeCoordinatesOff( ce, xi, gConfiguration.Grid, gConfiguration.EffectiveRadius ); for (cc.z = cs.z; cc.z <= ce.z; cc.z++) { for (cc.y = cs.y; cc.y <= ce.y; cc.y++) { for (cc.x = cs.x; cc.x <= ce.x; cc.x++) { unsigned int hash; computeHash(hash, cc, gConfiguration.Grid); unsigned int start = dCellStart[hash]; unsigned int end = dCellEnd[hash]; computeVelXSPHCell( velXSPH, xi, vi, dTempPositions, dTempVelocities, dAccelerations, dDensities, start, end, timeStep ); } } } vi.x += gConfiguration.XSPHCoeff*velXSPH.x; vi.y += gConfiguration.XSPHCoeff*velXSPH.x; vi.z += gConfiguration.XSPHCoeff*velXSPH.x; xi.x += timeStep*vi.x; xi.y += timeStep*vi.y; xi.z += timeStep*vi.z; // store new position and velocity of the particle dPositions[3*idx + 0] = xi.x; dPositions[3*idx + 1] = xi.y; dPositions[3*idx + 2] = xi.z; dVelocities[3*idx + 0] = vi.x; dVelocities[3*idx + 1] = vi.y; dVelocities[3*idx + 2] = vi.z; } //------------------------------------------------------------------------------ //============================================================================== // HOST code starts here //============================================================================== //------------------------------------------------------------------------------ #define BLOCK_DIMENSIONS_X 256 #define EMPTY_CELL_ID 0xFFFFFFFF //------------------------------------------------------------------------------ //============================================================================== // UTILITY functions start here //============================================================================== //------------------------------------------------------------------------------ void computeGridDimensions ( dim3& gridDimensions, const dim3& blockDimensions, unsigned int numParticles ) { // compute the dimensions of the cuda grid for tgiven block dimensions, // and the number of particles if (numParticles % blockDimensions.x == 0) { gridDimensions.x = numParticles/blockDimensions.x; } else { gridDimensions.x = numParticles/blockDimensions.x + 1; } gridDimensions.y = 1; gridDimensions.z = 1; } //------------------------------------------------------------------------------ //============================================================================== // SPHParticleData's definition //============================================================================== //------------------------------------------------------------------------------ Solver::SPHParticleData::SPHParticleData ( ParticleData* data, unsigned int numGridCells // # of grid cells in each direction ) : Data(data), BlockDimensions(BLOCK_DIMENSIONS_X, 1, 1), NumGridCells(numGridCells) { // allocate additional memory for storing density, pressure, acceleration, // velocities and hash values for the particles and initialize that data. // also allocate memory for the neighbor search as described in the nVidia // particles white paper CUDA::Alloc<float>(&dDensities, data->MaxParticles); CUDA::Alloc<float>(&dPressures, data->MaxParticles); CUDA::Alloc<float>(&dAccelerations, 3*data->MaxParticles); CUDA::Alloc<float>(&dVelocities, 3*data->MaxParticles); CUDA::Alloc<float>(&dTempPositions, 3*data->MaxParticles); CUDA::Alloc<float>(&dTempVelocities, 3*data->MaxParticles); CUDA::Fill<float>(dDensities, data->MaxParticles, 0.0f); CUDA::Fill<float>(dPressures, data->MaxParticles, 0.0f); CUDA::Fill<float>(dAccelerations, 3*data->MaxParticles, 0.0f); CUDA::Fill<float>(dVelocities, 3*data->MaxParticles, 0.0f); CUDA::Fill<float>(dTempPositions, 3*data->MaxParticles, 0.0f); CUDA::Fill<float>(dTempVelocities, 3*data->MaxParticles, 0.0f); CUDA::Alloc<unsigned int>(&dActiveIDs, data->MaxParticles); CUDA::Alloc<unsigned int>(&dHashs, data->MaxParticles); CUDA::Alloc<unsigned int>(&dCellStart, numGridCells); CUDA::Alloc<unsigned int>(&dCellEnd, numGridCells); CUDA::Fill<unsigned int>(dActiveIDs, data->MaxParticles, 0, 1); CUDA::Fill<unsigned int>(dHashs, data->MaxParticles, 0); CUDA::Fill<unsigned int>(dCellStart, numGridCells, 0); CUDA::Fill<unsigned int>(dCellEnd, numGridCells, 0); // compute the number of cuda blocks we need based on the current number // of particles and the threads per block we use. Also compute the amount // of shared memory we need to compute the values for [dCellStart] and // [dCellEnd] computeGridDimensions(GridDimensions, BlockDimensions, data->NumParticles); SharedMemSize = sizeof(int)*(BlockDimensions.x + 1); } //------------------------------------------------------------------------------ Solver::SPHParticleData::~SPHParticleData () { // free everything CUDA::Free<float>(&dDensities); CUDA::Free<float>(&dAccelerations); CUDA::Free<float>(&dPressures); CUDA::Free<float>(&dVelocities); CUDA::Free<float>(&dTempVelocities); CUDA::Free<float>(&dTempPositions); CUDA::Free<unsigned int>(&dHashs); CUDA::Free<unsigned int>(&dCellStart); CUDA::Free<unsigned int>(&dCellEnd); } //------------------------------------------------------------------------------ Solver::BoundaryParticleData::BoundaryParticleData ( ParticleData* data, unsigned int numGridCells ) : Data(data), BlockDimensions(BLOCK_DIMENSIONS_X, 1, 1), NumGridCells(numGridCells) { CUDA::Alloc<unsigned int>(&dHashs, data->MaxParticles); CUDA::Alloc<unsigned int>(&dCellStart, numGridCells); CUDA::Alloc<unsigned int>(&dCellEnd, numGridCells); CUDA::Fill<unsigned int>(dHashs, data->MaxParticles, 0); CUDA::Fill<unsigned int>(dCellStart, numGridCells, 0); CUDA::Fill<unsigned int>(dCellEnd, numGridCells, 0); computeGridDimensions(GridDimensions, BlockDimensions, data->NumParticles); SharedMemSize = sizeof(int)*(BlockDimensions.x + 1); } //------------------------------------------------------------------------------ Solver::BoundaryParticleData::~BoundaryParticleData () { CUDA::Free<unsigned int>(&dHashs); CUDA::Free<unsigned int>(&dCellStart); CUDA::Free<unsigned int>(&dCellEnd); } //------------------------------------------------------------------------------ //============================================================================== // Solvers's definition //============================================================================== //------------------------------------------------------------------------------ Solver::Solver ( ParticleData* fluidData, ParticleData* boundaryData, const SolverConfiguration* configuration ) : mConfiguration(*configuration), mFluidData(fluidData, Grid::ComputeNumGridCells(configuration->Grid)), mBoundaryData(boundaryData, Grid::ComputeNumGridCells(configuration->Grid)) { // store pointer to fluid particles and boundary particles also store // a copy of solver configuration //-------------------------------------------------------------------------- // compute neighborhood of boundary particles beforehand unsigned int* dBoundaryIDs; CUDA::Alloc<unsigned int>(&dBoundaryIDs, mBoundaryData.Data->MaxParticles); CUDA::Fill<unsigned int>(dBoundaryIDs, mBoundaryData.Data->MaxParticles, 0, 1); float* dBoundaryPositions; CUDA::Alloc<float>(&dBoundaryPositions, 3*mBoundaryData.Data->MaxParticles); this->Bind(); // bind first mBoundaryData.Data->Map(); hipLaunchKernelGGL(( computeHashs), dim3(mBoundaryData.GridDimensions), dim3(mBoundaryData.BlockDimensions), 0, 0, mBoundaryData.dHashs, dBoundaryIDs, mBoundaryData.Data->dPositions, mBoundaryData.Data->NumParticles ); thrust::sort_by_key( thrust::device_ptr<unsigned int>(mBoundaryData.dHashs), thrust::device_ptr<unsigned int>(mBoundaryData.dHashs + mBoundaryData.Data->NumParticles), thrust::device_ptr<unsigned int>(dBoundaryIDs) ); CUDA::Memset<unsigned int>( mBoundaryData.dCellStart, EMPTY_CELL_ID, mBoundaryData.NumGridCells ); CUDA::Memset<unsigned int>( mBoundaryData.dCellEnd, EMPTY_CELL_ID, mBoundaryData.NumGridCells ); hipLaunchKernelGGL(( reorderComputeCellStartEndBoundaryD), dim3(mBoundaryData.GridDimensions), dim3(mBoundaryData.BlockDimensions), mBoundaryData.SharedMemSize, 0, mBoundaryData.dCellStart, mBoundaryData.dCellEnd, dBoundaryPositions, mBoundaryData.Data->dPositions, dBoundaryIDs, mBoundaryData.dHashs, mBoundaryData.Data->NumParticles ); CUDA::Memcpy<float>( mBoundaryData.Data->dPositions, dBoundaryPositions, 3*mBoundaryData.Data->MaxParticles, hipMemcpyDeviceToDevice ); mBoundaryData.Data->Unmap(); CUDA::Free<unsigned int>(&dBoundaryIDs); CUDA::Free<float>(&dBoundaryPositions); //-------------------------------------------------------------------------- } //------------------------------------------------------------------------------ Solver::~Solver () { } //------------------------------------------------------------------------------ void Solver::Bind () const { // set the configuration of this solver on the device CUDA::SafeCall( hipMemcpyToSymbol( gConfiguration, &mConfiguration, sizeof(mConfiguration) ), __FILE__, __LINE__ ); } //------------------------------------------------------------------------------ void Solver::Advance (float timeStep) { CUDA::Timer t; t.Start(); mFluidData.Data->Map(); mBoundaryData.Data->Map(); this->computeNeighborhoods(); this->computeDensities(); this->computeAccelerations(); this->integrate(timeStep); mBoundaryData.Data->Unmap(); mFluidData.Data->Unmap(); t.Stop(); t.DumpElapsed(); } //------------------------------------------------------------------------------ void Solver::computeNeighborhoods () { // compute hashs of all particles hipLaunchKernelGGL(( computeHashs), dim3(mFluidData.GridDimensions), dim3(mFluidData.BlockDimensions), 0, 0, mFluidData.dHashs, mFluidData.dActiveIDs, mFluidData.Data->dPositions, mFluidData.Data->NumParticles ); // sort the active particle ids by their hash thrust::sort_by_key( thrust::device_ptr<unsigned int>(mFluidData.dHashs), thrust::device_ptr<unsigned int>(mFluidData.dHashs + mFluidData.Data->NumParticles), thrust::device_ptr<unsigned int>(mFluidData.dActiveIDs) ); // reset, then compute cell start end list CUDA::Memset<unsigned int>( mFluidData.dCellStart, EMPTY_CELL_ID, mFluidData.NumGridCells ); CUDA::Memset<unsigned int>( mFluidData.dCellEnd, EMPTY_CELL_ID, mFluidData.NumGridCells ); hipLaunchKernelGGL(( reorderAndComputeCellStartEndD), dim3(mFluidData.GridDimensions), dim3(mFluidData.BlockDimensions), mFluidData.SharedMemSize, 0, mFluidData.dCellStart, mFluidData.dCellEnd, mFluidData.dTempPositions, mFluidData.dTempVelocities, mFluidData.dActiveIDs, mFluidData.Data->dPositions, mFluidData.dVelocities, mFluidData.dHashs, mFluidData.Data->NumParticles ); } //------------------------------------------------------------------------------ void Solver::computeDensities () { hipLaunchKernelGGL(( computeDensitiesPressuresD), dim3(mFluidData.GridDimensions), dim3(mFluidData.BlockDimensions), 0, 0, mFluidData.dDensities, mFluidData.dPressures, mFluidData.dTempPositions, mFluidData.dCellStart, mFluidData.dCellEnd, mFluidData.Data->NumParticles ); } //------------------------------------------------------------------------------ void Solver::computeAccelerations () { hipLaunchKernelGGL(( computeAccelerationsD), dim3(mFluidData.GridDimensions), dim3(mFluidData.BlockDimensions), 0, 0, mFluidData.dAccelerations, mFluidData.dDensities, mFluidData.dPressures, mFluidData.dTempPositions, mFluidData.dTempVelocities, mFluidData.dCellStart, mFluidData.dCellEnd, mBoundaryData.Data->dPositions, mBoundaryData.dCellStart, mBoundaryData.dCellEnd, mFluidData.Data->NumParticles ); } //------------------------------------------------------------------------------ void Solver::integrate (float timeStep) { hipLaunchKernelGGL(( integrateD), dim3(mFluidData.GridDimensions), dim3(mFluidData.BlockDimensions), 0, 0, mFluidData.Data->dPositions, mFluidData.dVelocities, mFluidData.dAccelerations, mFluidData.dTempPositions, mFluidData.dTempVelocities, timeStep, mFluidData.Data->NumParticles ); //integrateXSPHD<<<mFluidData.GridDimensions, mFluidData.BlockDimensions>>>( // mFluidData.Data->dPositions, // mFluidData.dVelocities, // mFluidData.dTempPositions, // mFluidData.dTempVelocities, // mFluidData.dAccelerations, // mFluidData.dDensities, // mFluidData.dCellStart, // mFluidData.dCellEnd, // timeStep, // mFluidData.Data->NumParticles //); } //------------------------------------------------------------------------------
07f06996dbda1a177ac98b9b98e93af6ccd6884c.cu
//------------------------------------------------------------------------------ // Solver.cu //------------------------------------------------------------------------------ #include "Solver.h" //------------------------------------------------------------------------------ //============================================================================== // CUDA DEVICE code starts here //============================================================================== //------------------------------------------------------------------------------ __constant__ SolverConfiguration gConfiguration; // current solver's config. //------------------------------------------------------------------------------ //============================================================================== // UTLITY device kernels definition //============================================================================== //------------------------------------------------------------------------------ __device__ void computeCoordinatesOff ( int3& coordinate, // out: coordinate for [position] float3 position, const Grid& grid, float offset ) { // compute the coordinates of a point in space with respect to the given // grid coordinate.x = (int)((position.x + offset - grid.Origin.x)/grid.Spacing); coordinate.y = (int)((position.y + offset - grid.Origin.y)/grid.Spacing); coordinate.z = (int)((position.z + offset - grid.Origin.z)/grid.Spacing); // clamp coordinates if neccessary coordinate.x = max(0, min(coordinate.x, grid.Dimensions.x - 1)); coordinate.y = max(0, min(coordinate.y, grid.Dimensions.y - 1)); coordinate.z = max(0, min(coordinate.z, grid.Dimensions.z - 1)); } //------------------------------------------------------------------------------ __device__ void computeCoordinates ( int3& coordinate, // out: coordinate for [position] float3 position, const Grid& grid ) { // compute the coordinates of a point in space with respect to the given // grid coordinate.x = (int)((position.x - grid.Origin.x)/grid.Spacing); coordinate.y = (int)((position.y - grid.Origin.y)/grid.Spacing); coordinate.z = (int)((position.z - grid.Origin.z)/grid.Spacing); // clamp coordinates if neccessary coordinate.x = max(0, min(coordinate.x, grid.Dimensions.x - 1)); coordinate.y = max(0, min(coordinate.y, grid.Dimensions.y - 1)); coordinate.z = max(0, min(coordinate.z, grid.Dimensions.z - 1)); } //------------------------------------------------------------------------------ __device__ void computeHash ( unsigned int& hash, const int3& coordinate, const Grid& grid ) { // compute the hash for a grid given a coordinate within the grid hash = coordinate.x + grid.Dimensions.x* (coordinate.y + grid.Dimensions.y*coordinate.z); } //------------------------------------------------------------------------------ __device__ void computeHash ( unsigned int& hash, float3 position, const Grid& grid ) { // compute the hash for a grid given a position in world space, by first // conputing the coordinate in [grid], and then computing the hash. int3 coordinate; computeCoordinates(coordinate, position, grid); computeHash(hash, coordinate, grid); } //------------------------------------------------------------------------------ __device__ inline void computeNorm (float& norm, const float3& a) { norm = sqrt(a.x*a.x + a.y*a.y + a.z*a.z); } //------------------------------------------------------------------------------ __device__ inline void computeDistance ( float& dist, const float3& a, const float3& b ) { float3 d; d.x = a.x - b.x; d.y = a.y - b.y; d.z = a.z - b.z; computeNorm(dist, d); } //------------------------------------------------------------------------------ __device__ inline void evaluatePoly6Kernel ( float& res, // [out] result of evaluation float d, // distance between two particles float h // effective radius ) { // evaluate Muellers Poly6 Kernel float hhh = h*h*h; float coeff = 315.0f/(64.0f*M_PI*hhh*hhh*hhh); if (d < h) { float a = h*h - d*d; res = coeff*a*a*a; } else { res = 0.0f; } } //------------------------------------------------------------------------------ __device__ inline void evaluateSpikyKernelGradient ( float3& grad, const float3& xij, float h ) { float norm = 0.0f; computeNorm(norm, xij); if ((norm == 0.0f) || (norm > h)) { grad.x = 0.0f; grad.y = 0.0f; grad.z = 0.0f; return; } float hhh = h*h*h; float a = -45.0f/(M_PI*hhh*hhh)*(h - norm)*(h - norm); grad.x = a*xij.x/norm; grad.y = a*xij.y/norm; grad.z = a*xij.z/norm; } //------------------------------------------------------------------------------ __device__ inline void evaluateViscosityKernelLaplacian ( float& lapl, float dist, float h ) { if (dist < h) { float hhh = h*h*h; float coeff = 45.0f/(M_PI*hhh*hhh); lapl = coeff*(h - dist); return; } else { return; } } //------------------------------------------------------------------------------ __device__ inline void evaluateBoundaryWeight ( float& weight, float dist, float h ) { float q = 2.0f*dist/h; float coeff = 0.02f*gConfiguration.SpeedSound*gConfiguration.SpeedSound/ dist; if (q < 2.0f/3.0f) { weight = coeff*2.0f/3.0f; } else if (q < 1.0f) { weight = coeff*(2.0f*q - 3.0f/2.0f*q*q); } else if (q < 2.0f) { float a = 2.0f - q; weight = coeff*0.5f*a*a; } else { weight = 0.0f; } } //------------------------------------------------------------------------------ __device__ inline void computeDensityCell ( float& rhoi, // [out] density of particle i const float3& xi, // position of particle i const float* dPositions, unsigned int start, unsigned int end ) { // add up density contribution form particle in this cell ([start], [end]) // to the density of the particle i [rhoi]. (in fact only the kernel // weights are added up, mass is multiplied in the callee, to safe // operations) for (unsigned int j = start; j < end; j++) { float3 xj; xj.x = dPositions[3*j + 0]; xj.y = dPositions[3*j + 1]; xj.z = dPositions[3*j + 2]; float dist; computeDistance(dist, xi, xj); if (dist < gConfiguration.EffectiveRadius) { float weight = 0.0f; evaluatePoly6Kernel(weight, dist, gConfiguration.EffectiveRadius); rhoi += weight; } } } //------------------------------------------------------------------------------ __device__ inline void computeAccelerationCell ( float3& fi, float rhoi, float pi, const float3& xi, const float3& vi, const float* dDensities, const float* dPressures, const float* dPositions, const float* dVelocities, unsigned int start, unsigned int end ) { for (unsigned int j = start; j < end; j++) { float3 xj; xj.x = dPositions[3*j + 0]; xj.y = dPositions[3*j + 1]; xj.z = dPositions[3*j + 2]; float3 vj; vj.x = dVelocities[3*j + 0]; vj.y = dVelocities[3*j + 1]; vj.z = dVelocities[3*j + 2]; float rhoj = dDensities[j]; float pj = dPressures[j]; float dist; float3 xij; xij.x = xi.x - xj.x; xij.y = xi.y - xj.y; xij.z = xi.z - xj.z; computeNorm(dist, xij); if (dist != 0.0f && dist < gConfiguration.EffectiveRadius) { // evaluate the pressure force partice j exerts on particle i float coeffP = -rhoi*gConfiguration.FluidParticleMass* (pi/(rhoi*rhoi) + pj/(rhoj*rhoj)); float3 grad; evaluateSpikyKernelGradient( grad, xij, gConfiguration.EffectiveRadius ); fi.x += coeffP*grad.x; fi.y += coeffP*grad.y; fi.z += coeffP*grad.z; // evaluate the viscosity force partice j exerts on particle i float coeffV = gConfiguration.Viscosity* gConfiguration.FluidParticleMass/rhoj; float lapl = 0.0f; evaluateViscosityKernelLaplacian( lapl, dist, gConfiguration.EffectiveRadius ); float3 vji; vji.x = vj.x - vi.x; vji.y = vj.y - vi.y; vji.z = vj.z - vi.z; fi.x += coeffV*vji.x*lapl; fi.y += coeffV*vji.y*lapl; fi.z += coeffV*vji.z*lapl; // evaluate the surface tension force partice j exerts on particle i float weight; evaluatePoly6Kernel(weight, dist, gConfiguration.EffectiveRadius); float coeffT = -weight*gConfiguration.FluidParticleMass* gConfiguration.TensionCoefficient; fi.x += coeffT*xij.x; fi.y += coeffT*xij.y; fi.z += coeffT*xij.z; } } } //------------------------------------------------------------------------------ __device__ void computeBoundaryForceCell ( float3& bi, const float3& xi, const float* dPositions, unsigned int start, unsigned int end ) { for (unsigned int j = start; j < end; j++) { float3 xj; xj.x = dPositions[3*j + 0]; xj.y = dPositions[3*j + 1]; xj.z = dPositions[3*j + 2]; float3 xij; xij.x = xi.x - xj.x; xij.y = xi.y - xj.y; xij.z = xi.z - xj.z; float dist; computeNorm(dist, xij); if (dist < gConfiguration.EffectiveRadius) { float weight = 0.0f; evaluateBoundaryWeight(weight, dist, gConfiguration.EffectiveRadius); weight*= gConfiguration.BoundaryParticleMass/ (gConfiguration.FluidParticleMass + gConfiguration.BoundaryParticleMass); bi.x += weight*xij.x/dist; bi.y += weight*xij.y/dist; bi.z += weight*xij.z/dist; } } } //------------------------------------------------------------------------------ __device__ void computeVelXSPHCell( float3& velXSPH, const float3& xi, const float3& vi, const float* dPositions, const float* dVelocities, const float* dAccelerations, const float* dDensities, unsigned int start, unsigned int end, float dt ) { for (unsigned int j = start; j < end; j++) { float3 xj; xj.x = dPositions[3*j + 0]; xj.y = dPositions[3*j + 1]; xj.z = dPositions[3*j + 2]; float3 aj; aj.x = dAccelerations[3*j + 0]; aj.y = dAccelerations[3*j + 1]; aj.z = dAccelerations[3*j + 2]; float3 vj; vj.x = dVelocities[3*j + 0] + dt*aj.x; vj.y = dVelocities[3*j + 1] + dt*aj.y; vj.z = dVelocities[3*j + 2] + dt*aj.z; float rhoj = dDensities[j]; float dist; float3 xij; xij.x = xi.x - xj.x; xij.y = xi.y - xj.y; xij.z = xi.z - xj.z; computeNorm(dist, xij); if ( dist < gConfiguration.EffectiveRadius) { float3 vji; vji.x = vj.x - vi.x; vji.y = vj.y - vi.y; vji.z = vj.z - vi.z; float weight; evaluatePoly6Kernel(weight, dist, gConfiguration.EffectiveRadius); weight *= (gConfiguration.FluidParticleMass/rhoj); velXSPH.x += vji.x*weight; velXSPH.y += vji.y*weight; velXSPH.z += vji.z*weight; } } } //============================================================================== // GLOBAL device kernel definitions //============================================================================== //------------------------------------------------------------------------------ __global__ void computeHashs ( unsigned int* dHashs, // hash values of each particle unsigned int* dActiveIDs, // array of active particle ids const float* dPositions, // positions of each particle unsigned int numParticles // number of ids in the id array ) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numParticles) { return; } dActiveIDs[idx] = idx; float3 pos; pos.x = dPositions[3*idx + 0]; pos.y = dPositions[3*idx + 1]; pos.z = dPositions[3*idx + 2]; computeHash(dHashs[idx], pos, gConfiguration.Grid); }; //------------------------------------------------------------------------------ __global__ void reorderComputeCellStartEndBoundaryD ( unsigned int* dCellStart, unsigned int* dCellEnd, float* dTempPositions, const float* dPositions, const unsigned int* dSortedIDs, const unsigned int* dHashs, unsigned int numParticles ) { extern __shared__ int sharedHash[]; unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numParticles) { return; } // reorder unsigned int id = dSortedIDs[idx]; dTempPositions[3*idx + 0] = dPositions[3*id + 0]; dTempPositions[3*idx + 1] = dPositions[3*id + 1]; dTempPositions[3*idx + 2] = dPositions[3*id + 2]; // compute cell start end int hash = dHashs[idx]; sharedHash[threadIdx.x + 1] = hash; if (idx > 0 && threadIdx.x == 0) { sharedHash[0] = dHashs[idx - 1]; } __syncthreads(); if (idx == 0 || hash != sharedHash[threadIdx.x]) { dCellStart[hash] = idx; if (idx > 0) { dCellEnd[sharedHash[threadIdx.x]] = idx; } } if (idx == numParticles - 1) { dCellEnd[hash] = idx + 1; } } //------------------------------------------------------------------------------ __global__ void reorderAndComputeCellStartEndD ( unsigned int* dCellStart, unsigned int* dCellEnd, float* dTempPositions, float* dTempVelocities, unsigned int* dSortedIDs, const float* dPositions, const float* dVelocities, const unsigned int* dHashs, unsigned int numParticles ) { extern __shared__ int sharedHash[]; unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numParticles) { return; } // reorder unsigned int id = dSortedIDs[idx]; dTempPositions[3*idx + 0] = dPositions[3*id + 0]; dTempPositions[3*idx + 1] = dPositions[3*id + 1]; dTempPositions[3*idx + 2] = dPositions[3*id + 2]; dTempVelocities[3*idx + 0] = dVelocities[3*id + 0]; dTempVelocities[3*idx + 1] = dVelocities[3*id + 1]; dTempVelocities[3*idx + 2] = dVelocities[3*id + 2]; // compute cell start end ids int hash = dHashs[idx]; sharedHash[threadIdx.x + 1] = hash; if (idx > 0 && threadIdx.x == 0) { sharedHash[0] = dHashs[idx - 1]; } __syncthreads(); if (idx == 0 || hash != sharedHash[threadIdx.x]) { dCellStart[hash] = idx; if (idx > 0) { dCellEnd[sharedHash[threadIdx.x]] = idx; } } if (idx == numParticles - 1) { dCellEnd[hash] = idx + 1; } } //------------------------------------------------------------------------------ __global__ void computeDensitiesPressuresD ( float* dDensities, // [out] computed densities float* dPressures, const float* dPositions, const unsigned int* dCellStart, const unsigned int* dCellEnd, unsigned int numParticles ) { unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx >= numParticles) { return; } float3 xi; xi.x = dPositions[3*idx + 0]; xi.y = dPositions[3*idx + 1]; xi.z = dPositions[3*idx + 2]; float rhoi = 0.0f; int3 cs, ce; computeCoordinatesOff( cs, xi, gConfiguration.Grid, -gConfiguration.EffectiveRadius ); computeCoordinatesOff( ce, xi, gConfiguration.Grid, gConfiguration.EffectiveRadius ); int3 cc; for (cc.z = cs.z; cc.z <= ce.z; cc.z++) { for (cc.y = cs.y; cc.y <= ce.y; cc.y++) { for (cc.x = cs.x; cc.x <= ce.x; cc.x++) { unsigned int hash; computeHash(hash, cc, gConfiguration.Grid); unsigned int start = dCellStart[hash]; unsigned int end = dCellEnd[hash]; computeDensityCell( rhoi, xi, dPositions, start, end ); } } } rhoi *= gConfiguration.FluidParticleMass; dDensities[idx] = rhoi; dPressures[idx] = gConfiguration.BulkModulus* (rhoi - gConfiguration.RestDensity); } //------------------------------------------------------------------------------ __global__ void computeAccelerationsD ( float* dAccelerations, const float* dDensities, const float* dPressures, const float* dPositions, const float* dVelocities, const unsigned int* dCellStart, const unsigned int* dCellEnd, const float* dBoundaryPositions, const unsigned int* dBoundaryCellStart, const unsigned int* dBoundaryCellEnd, unsigned int numParticles ) { unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx >= numParticles) { return; } float3 xi; xi.x = dPositions[3*idx + 0]; xi.y = dPositions[3*idx + 1]; xi.z = dPositions[3*idx + 2]; float3 vi; vi.x = dVelocities[3*idx + 0]; vi.y = dVelocities[3*idx + 1]; vi.z = dVelocities[3*idx + 2]; float rhoi = dDensities[idx]; float pi = dPressures[idx]; int3 cs, ce; computeCoordinatesOff( cs, xi, gConfiguration.Grid, -gConfiguration.EffectiveRadius ); computeCoordinatesOff( ce, xi, gConfiguration.Grid, gConfiguration.EffectiveRadius ); float3 fi; fi.x = 0.0; fi.y = 0.0; fi.z = 0.0; float3 bi; bi.x = 0.0f; bi.y = 0.0f; bi.z = 0.0f; int3 cc; for (cc.z = cs.z; cc.z <= ce.z; cc.z++) { for (cc.y = cs.y; cc.y <= ce.y; cc.y++) { for (cc.x = cs.x; cc.x <= ce.x; cc.x++) { unsigned int hash; computeHash(hash, cc, gConfiguration.Grid); unsigned int start = dCellStart[hash]; unsigned int end = dCellEnd[hash]; computeAccelerationCell( fi, rhoi, pi, xi, vi, dDensities, dPressures, dPositions, dVelocities, start, end ); } } } for (cc.z = cs.z; cc.z <= ce.z; cc.z++) { for (cc.y = cs.y; cc.y <= ce.y; cc.y++) { for (cc.x = cs.x; cc.x <= ce.x; cc.x++) { unsigned int hash; computeHash(hash, cc, gConfiguration.Grid); unsigned int start = dBoundaryCellStart[hash]; unsigned int end = dBoundaryCellEnd[hash]; computeBoundaryForceCell( bi, xi, dBoundaryPositions, start, end ); } } } dAccelerations[3*idx + 0] = fi.x/rhoi + bi.x; dAccelerations[3*idx + 1] = fi.y/rhoi - 9.81f + bi.y; dAccelerations[3*idx + 2] = fi.z/rhoi + bi.z; } //------------------------------------------------------------------------------ __global__ void integrateD ( float* dPositions, float* dVelocities, const float* dAccelerations, const float* dTempPositions, const float* dTempVelocities, float timeStep, unsigned int numParticles ) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numParticles) { return; } float3 xi; xi.x = dTempPositions[3*idx + 0]; xi.y = dTempPositions[3*idx + 1]; xi.z = dTempPositions[3*idx + 2]; float3 vi; vi.x = dTempVelocities[3*idx + 0]; vi.y = dTempVelocities[3*idx + 1]; vi.z = dTempVelocities[3*idx + 2]; // update position and velocity of the particle vi.x += timeStep*dAccelerations[3*idx + 0]; vi.y += timeStep*dAccelerations[3*idx + 1]; vi.z += timeStep*dAccelerations[3*idx + 2]; xi.x += timeStep*vi.x; xi.y += timeStep*vi.y; xi.z += timeStep*vi.z; // store new position and velocity of the particle dPositions[3*idx + 0] = xi.x; dPositions[3*idx + 1] = xi.y; dPositions[3*idx + 2] = xi.z; dVelocities[3*idx + 0] = vi.x; dVelocities[3*idx + 1] = vi.y; dVelocities[3*idx + 2] = vi.z; } //------------------------------------------------------------------------------ __global__ void integrateXSPHD ( float* dPositions, float* dVelocities, const float* dTempPositions, const float* dTempVelocities, const float* dAccelerations, const float* dDensities, const unsigned int* dCellStart, const unsigned int* dCellEnd, float timeStep, unsigned int numParticles ) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numParticles) { return; } float3 xi; xi.x = dTempPositions[3*idx + 0]; xi.y = dTempPositions[3*idx + 1]; xi.z = dTempPositions[3*idx + 2]; float3 vi; vi.x = dTempVelocities[3*idx + 0]; vi.y = dTempVelocities[3*idx + 1]; vi.z = dTempVelocities[3*idx + 2]; vi.x += timeStep*dAccelerations[3*idx + 0]; vi.y += timeStep*dAccelerations[3*idx + 1]; vi.z += timeStep*dAccelerations[3*idx + 2]; float3 velXSPH; velXSPH.x = 0.0f; velXSPH.y = 0.0f; velXSPH.z = 0.0f; // compute XPSH velocity int3 cs, ce, cc; computeCoordinatesOff( cs, xi, gConfiguration.Grid, -gConfiguration.EffectiveRadius ); computeCoordinatesOff( ce, xi, gConfiguration.Grid, gConfiguration.EffectiveRadius ); for (cc.z = cs.z; cc.z <= ce.z; cc.z++) { for (cc.y = cs.y; cc.y <= ce.y; cc.y++) { for (cc.x = cs.x; cc.x <= ce.x; cc.x++) { unsigned int hash; computeHash(hash, cc, gConfiguration.Grid); unsigned int start = dCellStart[hash]; unsigned int end = dCellEnd[hash]; computeVelXSPHCell( velXSPH, xi, vi, dTempPositions, dTempVelocities, dAccelerations, dDensities, start, end, timeStep ); } } } vi.x += gConfiguration.XSPHCoeff*velXSPH.x; vi.y += gConfiguration.XSPHCoeff*velXSPH.x; vi.z += gConfiguration.XSPHCoeff*velXSPH.x; xi.x += timeStep*vi.x; xi.y += timeStep*vi.y; xi.z += timeStep*vi.z; // store new position and velocity of the particle dPositions[3*idx + 0] = xi.x; dPositions[3*idx + 1] = xi.y; dPositions[3*idx + 2] = xi.z; dVelocities[3*idx + 0] = vi.x; dVelocities[3*idx + 1] = vi.y; dVelocities[3*idx + 2] = vi.z; } //------------------------------------------------------------------------------ //============================================================================== // HOST code starts here //============================================================================== //------------------------------------------------------------------------------ #define BLOCK_DIMENSIONS_X 256 #define EMPTY_CELL_ID 0xFFFFFFFF //------------------------------------------------------------------------------ //============================================================================== // UTILITY functions start here //============================================================================== //------------------------------------------------------------------------------ void computeGridDimensions ( dim3& gridDimensions, const dim3& blockDimensions, unsigned int numParticles ) { // compute the dimensions of the cuda grid for tgiven block dimensions, // and the number of particles if (numParticles % blockDimensions.x == 0) { gridDimensions.x = numParticles/blockDimensions.x; } else { gridDimensions.x = numParticles/blockDimensions.x + 1; } gridDimensions.y = 1; gridDimensions.z = 1; } //------------------------------------------------------------------------------ //============================================================================== // SPHParticleData's definition //============================================================================== //------------------------------------------------------------------------------ Solver::SPHParticleData::SPHParticleData ( ParticleData* data, unsigned int numGridCells // # of grid cells in each direction ) : Data(data), BlockDimensions(BLOCK_DIMENSIONS_X, 1, 1), NumGridCells(numGridCells) { // allocate additional memory for storing density, pressure, acceleration, // velocities and hash values for the particles and initialize that data. // also allocate memory for the neighbor search as described in the nVidia // particles white paper CUDA::Alloc<float>(&dDensities, data->MaxParticles); CUDA::Alloc<float>(&dPressures, data->MaxParticles); CUDA::Alloc<float>(&dAccelerations, 3*data->MaxParticles); CUDA::Alloc<float>(&dVelocities, 3*data->MaxParticles); CUDA::Alloc<float>(&dTempPositions, 3*data->MaxParticles); CUDA::Alloc<float>(&dTempVelocities, 3*data->MaxParticles); CUDA::Fill<float>(dDensities, data->MaxParticles, 0.0f); CUDA::Fill<float>(dPressures, data->MaxParticles, 0.0f); CUDA::Fill<float>(dAccelerations, 3*data->MaxParticles, 0.0f); CUDA::Fill<float>(dVelocities, 3*data->MaxParticles, 0.0f); CUDA::Fill<float>(dTempPositions, 3*data->MaxParticles, 0.0f); CUDA::Fill<float>(dTempVelocities, 3*data->MaxParticles, 0.0f); CUDA::Alloc<unsigned int>(&dActiveIDs, data->MaxParticles); CUDA::Alloc<unsigned int>(&dHashs, data->MaxParticles); CUDA::Alloc<unsigned int>(&dCellStart, numGridCells); CUDA::Alloc<unsigned int>(&dCellEnd, numGridCells); CUDA::Fill<unsigned int>(dActiveIDs, data->MaxParticles, 0, 1); CUDA::Fill<unsigned int>(dHashs, data->MaxParticles, 0); CUDA::Fill<unsigned int>(dCellStart, numGridCells, 0); CUDA::Fill<unsigned int>(dCellEnd, numGridCells, 0); // compute the number of cuda blocks we need based on the current number // of particles and the threads per block we use. Also compute the amount // of shared memory we need to compute the values for [dCellStart] and // [dCellEnd] computeGridDimensions(GridDimensions, BlockDimensions, data->NumParticles); SharedMemSize = sizeof(int)*(BlockDimensions.x + 1); } //------------------------------------------------------------------------------ Solver::SPHParticleData::~SPHParticleData () { // free everything CUDA::Free<float>(&dDensities); CUDA::Free<float>(&dAccelerations); CUDA::Free<float>(&dPressures); CUDA::Free<float>(&dVelocities); CUDA::Free<float>(&dTempVelocities); CUDA::Free<float>(&dTempPositions); CUDA::Free<unsigned int>(&dHashs); CUDA::Free<unsigned int>(&dCellStart); CUDA::Free<unsigned int>(&dCellEnd); } //------------------------------------------------------------------------------ Solver::BoundaryParticleData::BoundaryParticleData ( ParticleData* data, unsigned int numGridCells ) : Data(data), BlockDimensions(BLOCK_DIMENSIONS_X, 1, 1), NumGridCells(numGridCells) { CUDA::Alloc<unsigned int>(&dHashs, data->MaxParticles); CUDA::Alloc<unsigned int>(&dCellStart, numGridCells); CUDA::Alloc<unsigned int>(&dCellEnd, numGridCells); CUDA::Fill<unsigned int>(dHashs, data->MaxParticles, 0); CUDA::Fill<unsigned int>(dCellStart, numGridCells, 0); CUDA::Fill<unsigned int>(dCellEnd, numGridCells, 0); computeGridDimensions(GridDimensions, BlockDimensions, data->NumParticles); SharedMemSize = sizeof(int)*(BlockDimensions.x + 1); } //------------------------------------------------------------------------------ Solver::BoundaryParticleData::~BoundaryParticleData () { CUDA::Free<unsigned int>(&dHashs); CUDA::Free<unsigned int>(&dCellStart); CUDA::Free<unsigned int>(&dCellEnd); } //------------------------------------------------------------------------------ //============================================================================== // Solvers's definition //============================================================================== //------------------------------------------------------------------------------ Solver::Solver ( ParticleData* fluidData, ParticleData* boundaryData, const SolverConfiguration* configuration ) : mConfiguration(*configuration), mFluidData(fluidData, Grid::ComputeNumGridCells(configuration->Grid)), mBoundaryData(boundaryData, Grid::ComputeNumGridCells(configuration->Grid)) { // store pointer to fluid particles and boundary particles also store // a copy of solver configuration //-------------------------------------------------------------------------- // compute neighborhood of boundary particles beforehand unsigned int* dBoundaryIDs; CUDA::Alloc<unsigned int>(&dBoundaryIDs, mBoundaryData.Data->MaxParticles); CUDA::Fill<unsigned int>(dBoundaryIDs, mBoundaryData.Data->MaxParticles, 0, 1); float* dBoundaryPositions; CUDA::Alloc<float>(&dBoundaryPositions, 3*mBoundaryData.Data->MaxParticles); this->Bind(); // bind first mBoundaryData.Data->Map(); computeHashs<<<mBoundaryData.GridDimensions, mBoundaryData.BlockDimensions>>>( mBoundaryData.dHashs, dBoundaryIDs, mBoundaryData.Data->dPositions, mBoundaryData.Data->NumParticles ); thrust::sort_by_key( thrust::device_ptr<unsigned int>(mBoundaryData.dHashs), thrust::device_ptr<unsigned int>(mBoundaryData.dHashs + mBoundaryData.Data->NumParticles), thrust::device_ptr<unsigned int>(dBoundaryIDs) ); CUDA::Memset<unsigned int>( mBoundaryData.dCellStart, EMPTY_CELL_ID, mBoundaryData.NumGridCells ); CUDA::Memset<unsigned int>( mBoundaryData.dCellEnd, EMPTY_CELL_ID, mBoundaryData.NumGridCells ); reorderComputeCellStartEndBoundaryD<<<mBoundaryData.GridDimensions, mBoundaryData.BlockDimensions, mBoundaryData.SharedMemSize>>>( mBoundaryData.dCellStart, mBoundaryData.dCellEnd, dBoundaryPositions, mBoundaryData.Data->dPositions, dBoundaryIDs, mBoundaryData.dHashs, mBoundaryData.Data->NumParticles ); CUDA::Memcpy<float>( mBoundaryData.Data->dPositions, dBoundaryPositions, 3*mBoundaryData.Data->MaxParticles, cudaMemcpyDeviceToDevice ); mBoundaryData.Data->Unmap(); CUDA::Free<unsigned int>(&dBoundaryIDs); CUDA::Free<float>(&dBoundaryPositions); //-------------------------------------------------------------------------- } //------------------------------------------------------------------------------ Solver::~Solver () { } //------------------------------------------------------------------------------ void Solver::Bind () const { // set the configuration of this solver on the device CUDA::SafeCall( cudaMemcpyToSymbol( gConfiguration, &mConfiguration, sizeof(mConfiguration) ), __FILE__, __LINE__ ); } //------------------------------------------------------------------------------ void Solver::Advance (float timeStep) { CUDA::Timer t; t.Start(); mFluidData.Data->Map(); mBoundaryData.Data->Map(); this->computeNeighborhoods(); this->computeDensities(); this->computeAccelerations(); this->integrate(timeStep); mBoundaryData.Data->Unmap(); mFluidData.Data->Unmap(); t.Stop(); t.DumpElapsed(); } //------------------------------------------------------------------------------ void Solver::computeNeighborhoods () { // compute hashs of all particles computeHashs<<<mFluidData.GridDimensions, mFluidData.BlockDimensions>>>( mFluidData.dHashs, mFluidData.dActiveIDs, mFluidData.Data->dPositions, mFluidData.Data->NumParticles ); // sort the active particle ids by their hash thrust::sort_by_key( thrust::device_ptr<unsigned int>(mFluidData.dHashs), thrust::device_ptr<unsigned int>(mFluidData.dHashs + mFluidData.Data->NumParticles), thrust::device_ptr<unsigned int>(mFluidData.dActiveIDs) ); // reset, then compute cell start end list CUDA::Memset<unsigned int>( mFluidData.dCellStart, EMPTY_CELL_ID, mFluidData.NumGridCells ); CUDA::Memset<unsigned int>( mFluidData.dCellEnd, EMPTY_CELL_ID, mFluidData.NumGridCells ); reorderAndComputeCellStartEndD<<<mFluidData.GridDimensions, mFluidData.BlockDimensions, mFluidData.SharedMemSize>>>( mFluidData.dCellStart, mFluidData.dCellEnd, mFluidData.dTempPositions, mFluidData.dTempVelocities, mFluidData.dActiveIDs, mFluidData.Data->dPositions, mFluidData.dVelocities, mFluidData.dHashs, mFluidData.Data->NumParticles ); } //------------------------------------------------------------------------------ void Solver::computeDensities () { computeDensitiesPressuresD<<<mFluidData.GridDimensions, mFluidData.BlockDimensions>>>( mFluidData.dDensities, mFluidData.dPressures, mFluidData.dTempPositions, mFluidData.dCellStart, mFluidData.dCellEnd, mFluidData.Data->NumParticles ); } //------------------------------------------------------------------------------ void Solver::computeAccelerations () { computeAccelerationsD<<<mFluidData.GridDimensions, mFluidData.BlockDimensions>>>( mFluidData.dAccelerations, mFluidData.dDensities, mFluidData.dPressures, mFluidData.dTempPositions, mFluidData.dTempVelocities, mFluidData.dCellStart, mFluidData.dCellEnd, mBoundaryData.Data->dPositions, mBoundaryData.dCellStart, mBoundaryData.dCellEnd, mFluidData.Data->NumParticles ); } //------------------------------------------------------------------------------ void Solver::integrate (float timeStep) { integrateD<<<mFluidData.GridDimensions, mFluidData.BlockDimensions>>>( mFluidData.Data->dPositions, mFluidData.dVelocities, mFluidData.dAccelerations, mFluidData.dTempPositions, mFluidData.dTempVelocities, timeStep, mFluidData.Data->NumParticles ); //integrateXSPHD<<<mFluidData.GridDimensions, mFluidData.BlockDimensions>>>( // mFluidData.Data->dPositions, // mFluidData.dVelocities, // mFluidData.dTempPositions, // mFluidData.dTempVelocities, // mFluidData.dAccelerations, // mFluidData.dDensities, // mFluidData.dCellStart, // mFluidData.dCellEnd, // timeStep, // mFluidData.Data->NumParticles //); } //------------------------------------------------------------------------------
2cd5b2b0eef24104de77f6d6943a3af1c8b8314e.hip
// !!! This is a file automatically generated by hipify!!! #include <bits/stdc++.h> #include <hip/hip_runtime.h> using namespace std; #define CEIL(a,b) ((a-1)/b+1) #define N 1024 __global__ void Sum(float* d_a,float* d_b,float* d_c,int r,int c) { int x=blockIdx.x*blockDim.x + threadIdx.x; int y=blockIdx.y*blockDim.y + threadIdx.y; int index=c*y+x; if(x<c && y<r) d_c[index]=d_a[index]+d_b[index]; } int main() { int r,c; cout<<"enter row and column : "; cin>>r>>c; float h_a[r][c], h_b[r][c], h_c[r][c]; int bytes=r*c*sizeof(float); for(int i=0;i<r;i++) { for(int j=0;j<c;j++) { h_a[i][j]=rand()%1000; h_b[i][j]=rand()%1000; } } float *d_a, *d_b, *d_c; hipMalloc((void**)&d_b, bytes); hipMalloc((void**)&d_a, bytes); hipMalloc((void**)&d_c, bytes); hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice); hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice); dim3 block(32, 32, 1); dim3 grid(CEIL(r, 32), CEIL(c, 32), 1); hipLaunchKernelGGL(( Sum), dim3(grid), dim3(block), 0, 0, d_a,d_b,d_c,r,c); hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost); bool verify=true; for(int i=0;i<r;i++) { for(int j=0;j<c;j++) { if(h_c[i][j]!=h_a[i][j]+h_b[i][j]) verify=false; } } if(verify) cout<<"Result is Correct"; else cout<<"Incorrect Result"; hipFree(d_a); hipFree(d_b); hipFree(d_c); }
2cd5b2b0eef24104de77f6d6943a3af1c8b8314e.cu
#include <bits/stdc++.h> #include <cuda.h> using namespace std; #define CEIL(a,b) ((a-1)/b+1) #define N 1024 __global__ void Sum(float* d_a,float* d_b,float* d_c,int r,int c) { int x=blockIdx.x*blockDim.x + threadIdx.x; int y=blockIdx.y*blockDim.y + threadIdx.y; int index=c*y+x; if(x<c && y<r) d_c[index]=d_a[index]+d_b[index]; } int main() { int r,c; cout<<"enter row and column : "; cin>>r>>c; float h_a[r][c], h_b[r][c], h_c[r][c]; int bytes=r*c*sizeof(float); for(int i=0;i<r;i++) { for(int j=0;j<c;j++) { h_a[i][j]=rand()%1000; h_b[i][j]=rand()%1000; } } float *d_a, *d_b, *d_c; cudaMalloc((void**)&d_b, bytes); cudaMalloc((void**)&d_a, bytes); cudaMalloc((void**)&d_c, bytes); cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); dim3 block(32, 32, 1); dim3 grid(CEIL(r, 32), CEIL(c, 32), 1); Sum<<<grid, block>>>(d_a,d_b,d_c,r,c); cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost); bool verify=true; for(int i=0;i<r;i++) { for(int j=0;j<c;j++) { if(h_c[i][j]!=h_a[i][j]+h_b[i][j]) verify=false; } } if(verify) cout<<"Result is Correct"; else cout<<"Incorrect Result"; cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); }
c8b0e72ffe6ac4fd4702607a9b296caa289a3869.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "repeat.h" #define DECLS(N) \ __global__ void kicache_line_##N##_0 (unsigned int *ts, unsigned int* out, int p1, int p2, int its); \ __global__ void kicache_line_##N##_1 (unsigned int *ts, unsigned int* out, int p1, int p2, int its); \ __global__ void kicache_line_##N##_2 (unsigned int *ts, unsigned int* out, int p1, int p2, int its); \ __global__ void kicache_line_##N##_3 (unsigned int *ts, unsigned int* out, int p1, int p2, int its); \ __global__ void kicache_line_##N##_4 (unsigned int *ts, unsigned int* out, int p1, int p2, int its); \ __global__ void kicache_line_##N##_5 (unsigned int *ts, unsigned int* out, int p1, int p2, int its); \ __global__ void kicache_line_##N##_6 (unsigned int *ts, unsigned int* out, int p1, int p2, int its); \ __global__ void kicache_line_##N##_7 (unsigned int *ts, unsigned int* out, int p1, int p2, int its); DECLS(117) DECLS(118) DECLS(119) DECLS(120) DECLS(121) DECLS(122) DECLS(123) DECLS(124) DECLS(125) DECLS(126) DECLS(127) DECLS(128) DECLS(129) DECLS(130) DECLS(131) DECLS(132) DECLS(133) DECLS(134) DECLS(135) DECLS(136) DECLS(137) DECLS(138) DECLS(139) DECLS(140) DECLS(141) DECLS(142) DECLS(143) DECLS(144) DECLS(145) DECLS(146) DECLS(147) DECLS(148) DECLS(149) DECLS(150) DECLS(151) DECLS(152) DECLS(153) DECLS(154) DECLS(155) DECLS(156) DECLS(157) DECLS(158) DECLS(159) DECLS(160) DECLS(161) DECLS(162) DECLS(163) DECLS(164) DECLS(165) DECLS(166) DECLS(167) DECLS(168) __global__ void kibuffer (unsigned int *ts, unsigned int *out, int p1, int p2, int its); void measure_icache2(); int main() { measure_icache2(); } void measure_icache2() { unsigned int ts[4096]; // ts, output from kernel. Two elements used per thread. unsigned int *d_ts; unsigned int *d_out; // Unused memory for storing output dim3 Db = dim3(1); dim3 Dg = dim3(1,1,1); // Allocate device array. hipError_t errcode; if (hipSuccess != (errcode = hipMalloc((void**)&d_ts, sizeof(ts)))) { printf ("hipMalloc failed %s:%d\n", __FILE__, __LINE__); printf (" %s\n", hipGetErrorString(errcode)); return; } if (hipSuccess != hipMalloc((void**)&d_out, 4)) { printf ("hipMalloc failed %s:%d\n", __FILE__, __LINE__); return; } printf ("L2 icache parameters (7-10KB of independent ops):\n"); printf (" [Codesize: avg, min, max runtime]\n"); #define DO_LTEST(N,M) \ {int stime=0, max_stime=0, min_stime=0x80000000; \ for (int k=0;k<20;k++) { \ hipLaunchKernelGGL(( kicache_line_##N##_##M) , dim3(Dg), dim3(Db), 0, 0, d_ts, d_out, 4, 3, 100); hipDeviceSynchronize(); \ hipMemcpy(ts, d_ts, sizeof(ts), hipMemcpyDeviceToHost); \ stime += ts[1]-ts[0]; \ if (max_stime < ts[1]-ts[0]) max_stime = ts[1]-ts[0]; \ if (min_stime > ts[1]-ts[0]) min_stime = ts[1]-ts[0]; \ } \ printf (" %d: %.4f, %.4f, %.4f\n", N*64+M*8, (stime)/(2000.0*(N*8+M)), (min_stime)/(100.0*(N*8+M)), (max_stime)/(100.0*(N*8+M))); } #define DO_LTESTS(N) DO_LTEST(N,0) DO_LTEST(N,1) DO_LTEST(N,2) DO_LTEST(N,3) DO_LTEST(N,4) DO_LTEST(N,5) DO_LTEST(N,6) DO_LTEST(N,7) DO_LTESTS(118); DO_LTESTS(119); DO_LTESTS(120); DO_LTESTS(121); DO_LTESTS(122); DO_LTESTS(123); DO_LTESTS(124); DO_LTESTS(125); DO_LTESTS(126); DO_LTESTS(127); DO_LTESTS(128); DO_LTESTS(129); DO_LTESTS(130); DO_LTESTS(131); DO_LTESTS(132); DO_LTESTS(133); DO_LTESTS(134); DO_LTESTS(135); DO_LTESTS(136); DO_LTESTS(137); DO_LTESTS(138); DO_LTESTS(139); DO_LTESTS(140); DO_LTESTS(141); DO_LTESTS(142); DO_LTESTS(143); DO_LTESTS(144); DO_LTESTS(145); DO_LTESTS(146); DO_LTESTS(147); DO_LTESTS(148); DO_LTESTS(149); DO_LTESTS(150); DO_LTESTS(151); DO_LTESTS(152); DO_LTESTS(153); DO_LTESTS(154); DO_LTESTS(155); DO_LTESTS(156); DO_LTESTS(157); DO_LTESTS(158); DO_LTESTS(159); DO_LTESTS(160); DO_LTESTS(161); DO_LTESTS(162); DO_LTESTS(163); DO_LTESTS(164); //DO_LTESTS(165); //DO_LTESTS(166); //DO_LTESTS(167); //DO_LTESTS(168); if (1) { printf ("\nMeasuring instruction buffer size:\n"); unsigned times[36] = {0}; Db.x = 256; for (int k=0;k<10000;k++) { hipLaunchKernelGGL(( kibuffer) , dim3(Dg), dim3(Db), 0, 0, d_ts, d_out, k%200+4, 3, 300); hipDeviceSynchronize(); hipError_t errcode = hipGetLastError(); if (errcode != hipSuccess) { printf ("Failed: %s\n", hipGetErrorString(errcode)); break; } hipMemcpy(ts, d_ts, sizeof(ts), hipMemcpyDeviceToHost); for (int i=0;i<36;i++) times[i] += (unsigned)ts[i]-(unsigned)ts[0]; } for (int i=0;i<36;i++) printf (" %d: %.3f\n", i*16, times[i]/(10000.0)); printf ("\n"); } hipFree(d_ts); hipFree(d_out); }
c8b0e72ffe6ac4fd4702607a9b296caa289a3869.cu
#include <stdio.h> #include "repeat.h" #define DECLS(N) \ __global__ void kicache_line_##N##_0 (unsigned int *ts, unsigned int* out, int p1, int p2, int its); \ __global__ void kicache_line_##N##_1 (unsigned int *ts, unsigned int* out, int p1, int p2, int its); \ __global__ void kicache_line_##N##_2 (unsigned int *ts, unsigned int* out, int p1, int p2, int its); \ __global__ void kicache_line_##N##_3 (unsigned int *ts, unsigned int* out, int p1, int p2, int its); \ __global__ void kicache_line_##N##_4 (unsigned int *ts, unsigned int* out, int p1, int p2, int its); \ __global__ void kicache_line_##N##_5 (unsigned int *ts, unsigned int* out, int p1, int p2, int its); \ __global__ void kicache_line_##N##_6 (unsigned int *ts, unsigned int* out, int p1, int p2, int its); \ __global__ void kicache_line_##N##_7 (unsigned int *ts, unsigned int* out, int p1, int p2, int its); DECLS(117) DECLS(118) DECLS(119) DECLS(120) DECLS(121) DECLS(122) DECLS(123) DECLS(124) DECLS(125) DECLS(126) DECLS(127) DECLS(128) DECLS(129) DECLS(130) DECLS(131) DECLS(132) DECLS(133) DECLS(134) DECLS(135) DECLS(136) DECLS(137) DECLS(138) DECLS(139) DECLS(140) DECLS(141) DECLS(142) DECLS(143) DECLS(144) DECLS(145) DECLS(146) DECLS(147) DECLS(148) DECLS(149) DECLS(150) DECLS(151) DECLS(152) DECLS(153) DECLS(154) DECLS(155) DECLS(156) DECLS(157) DECLS(158) DECLS(159) DECLS(160) DECLS(161) DECLS(162) DECLS(163) DECLS(164) DECLS(165) DECLS(166) DECLS(167) DECLS(168) __global__ void kibuffer (unsigned int *ts, unsigned int *out, int p1, int p2, int its); void measure_icache2(); int main() { measure_icache2(); } void measure_icache2() { unsigned int ts[4096]; // ts, output from kernel. Two elements used per thread. unsigned int *d_ts; unsigned int *d_out; // Unused memory for storing output dim3 Db = dim3(1); dim3 Dg = dim3(1,1,1); // Allocate device array. cudaError_t errcode; if (cudaSuccess != (errcode = cudaMalloc((void**)&d_ts, sizeof(ts)))) { printf ("cudaMalloc failed %s:%d\n", __FILE__, __LINE__); printf (" %s\n", cudaGetErrorString(errcode)); return; } if (cudaSuccess != cudaMalloc((void**)&d_out, 4)) { printf ("cudaMalloc failed %s:%d\n", __FILE__, __LINE__); return; } printf ("L2 icache parameters (7-10KB of independent ops):\n"); printf (" [Codesize: avg, min, max runtime]\n"); #define DO_LTEST(N,M) \ {int stime=0, max_stime=0, min_stime=0x80000000; \ for (int k=0;k<20;k++) { \ kicache_line_##N##_##M <<<Dg, Db>>>(d_ts, d_out, 4, 3, 100); cudaThreadSynchronize(); \ cudaMemcpy(ts, d_ts, sizeof(ts), cudaMemcpyDeviceToHost); \ stime += ts[1]-ts[0]; \ if (max_stime < ts[1]-ts[0]) max_stime = ts[1]-ts[0]; \ if (min_stime > ts[1]-ts[0]) min_stime = ts[1]-ts[0]; \ } \ printf (" %d: %.4f, %.4f, %.4f\n", N*64+M*8, (stime)/(2000.0*(N*8+M)), (min_stime)/(100.0*(N*8+M)), (max_stime)/(100.0*(N*8+M))); } #define DO_LTESTS(N) DO_LTEST(N,0) DO_LTEST(N,1) DO_LTEST(N,2) DO_LTEST(N,3) DO_LTEST(N,4) DO_LTEST(N,5) DO_LTEST(N,6) DO_LTEST(N,7) DO_LTESTS(118); DO_LTESTS(119); DO_LTESTS(120); DO_LTESTS(121); DO_LTESTS(122); DO_LTESTS(123); DO_LTESTS(124); DO_LTESTS(125); DO_LTESTS(126); DO_LTESTS(127); DO_LTESTS(128); DO_LTESTS(129); DO_LTESTS(130); DO_LTESTS(131); DO_LTESTS(132); DO_LTESTS(133); DO_LTESTS(134); DO_LTESTS(135); DO_LTESTS(136); DO_LTESTS(137); DO_LTESTS(138); DO_LTESTS(139); DO_LTESTS(140); DO_LTESTS(141); DO_LTESTS(142); DO_LTESTS(143); DO_LTESTS(144); DO_LTESTS(145); DO_LTESTS(146); DO_LTESTS(147); DO_LTESTS(148); DO_LTESTS(149); DO_LTESTS(150); DO_LTESTS(151); DO_LTESTS(152); DO_LTESTS(153); DO_LTESTS(154); DO_LTESTS(155); DO_LTESTS(156); DO_LTESTS(157); DO_LTESTS(158); DO_LTESTS(159); DO_LTESTS(160); DO_LTESTS(161); DO_LTESTS(162); DO_LTESTS(163); DO_LTESTS(164); //DO_LTESTS(165); //DO_LTESTS(166); //DO_LTESTS(167); //DO_LTESTS(168); if (1) { printf ("\nMeasuring instruction buffer size:\n"); unsigned times[36] = {0}; Db.x = 256; for (int k=0;k<10000;k++) { kibuffer <<<Dg, Db>>>(d_ts, d_out, k%200+4, 3, 300); cudaThreadSynchronize(); cudaError_t errcode = cudaGetLastError(); if (errcode != cudaSuccess) { printf ("Failed: %s\n", cudaGetErrorString(errcode)); break; } cudaMemcpy(ts, d_ts, sizeof(ts), cudaMemcpyDeviceToHost); for (int i=0;i<36;i++) times[i] += (unsigned)ts[i]-(unsigned)ts[0]; } for (int i=0;i<36;i++) printf (" %d: %.3f\n", i*16, times[i]/(10000.0)); printf ("\n"); } cudaFree(d_ts); cudaFree(d_out); }
a8692ce65179e87e6412883db0713b970a906ccd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2019-2020 CERN and copyright holders of ALICE O2. // See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. // All rights not expressly granted are reserved. // // This software is distributed under the terms of the GNU General Public // License v3 (GPL Version 3), copied verbatim in the file "COPYING". // // In applying this license CERN does not waive the privileges and immunities // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. /// /// \file Kernels.{cu, hip.cxx} /// \author: [email protected] #include "../Shared/Kernels.h" #include <chrono> #include <cstdio> #include <numeric> // Memory partitioning schema // // |----------------------region 0-----------------|----------------------region 1-----------------| regions -> deafult: 2, to test lower and upper RAM // |--chunk 0--|--chunk 1--|--chunk 2--| *** |--chunk n--| chunks -> default size: 1GB (sing block pins) // |__________________________________________scratch______________________________________________| scratch -> default size: 95% free GPU RAM #define GPUCHECK(error) \ if (error != hipSuccess) { \ printf("%serror: '%s'(%d) at %s:%d%s\n", KRED, hipGetErrorString(error), error, __FILE__, \ __LINE__, KNRM); \ failed("API returned error code."); \ } double bytesToconfig(size_t s) { return (double)s / (1024.0); } double bytesToGB(size_t s) { return (double)s / GB; } bool checkTestChunks(std::vector<std::pair<float, float>>& chunks, size_t availMemSizeGB) { if (!chunks.size()) { return true; } bool check{false}; sort(chunks.begin(), chunks.end()); for (size_t iChunk{0}; iChunk < chunks.size(); ++iChunk) { // Check boundaries if (chunks[iChunk].first + chunks[iChunk].second > availMemSizeGB) { check = false; break; } if (iChunk > 0) { // Check intersections if (chunks[iChunk].first < chunks[iChunk - 1].first + chunks[iChunk - 1].second) { check = false; break; } } check = true; } return check; } // CUDA does not support <type4> operations: // https://forums.developer.nvidia.com/t/swizzling-float4-arithmetic-support/217 #ifndef __HIPCC__ inline __host__ __device__ void operator+=(int4& a, int4 b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; } #endif namespace o2 { namespace benchmark { namespace gpu { //////////// // Kernels // Read template <class chunk_t> __global__ void read_k( chunk_t* chunkPtr, size_t chunkSize) { chunk_t sink{0}; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < chunkSize; i += blockDim.x * gridDim.x) { sink += chunkPtr[i]; } chunkPtr[threadIdx.x] = sink; } // Write template <class chunk_t> __global__ void write_k( chunk_t* chunkPtr, size_t chunkSize) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < chunkSize; i += blockDim.x * gridDim.x) { chunkPtr[i] = 0; } } template <> __global__ void write_k( int4* chunkPtr, size_t chunkSize) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < chunkSize; i += blockDim.x * gridDim.x) { chunkPtr[i] = {0, 1, 0, 0}; }; } // Copy template <class chunk_t> __global__ void copy_k( chunk_t* chunkPtr, size_t chunkSize) { size_t offset = chunkSize / 2; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < offset; i += blockDim.x * gridDim.x) { chunkPtr[i] = chunkPtr[offset + i]; } } // Random read template <class chunk_t> __global__ void rand_read_k( chunk_t* chunkPtr, size_t chunkSize, int prime) { chunk_t sink{0}; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < chunkSize; i += blockDim.x * gridDim.x) { sink += chunkPtr[(i * prime) % chunkSize]; } chunkPtr[threadIdx.x] = sink; } // Random write template <class chunk_t> __global__ void rand_write_k( chunk_t* chunkPtr, size_t chunkSize, int prime) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < chunkSize; i += blockDim.x * gridDim.x) { chunkPtr[(i * prime) % chunkSize] = 0; } } template <> __global__ void rand_write_k( int4* chunkPtr, size_t chunkSize, int prime) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < chunkSize; i += blockDim.x * gridDim.x) { chunkPtr[(i * prime) % chunkSize] = {0, 1, 0, 0}; }; } // Random copy template <class chunk_t> __global__ void rand_copy_k( chunk_t* chunkPtr, size_t chunkSize, int prime) { size_t offset = chunkSize / 2; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < offset; i += blockDim.x * gridDim.x) { chunkPtr[(i * prime) % offset] = chunkPtr[offset + (i * prime) % offset]; // might be % = 0... } } // Distributed read template <class chunk_t> __global__ void read_dist_k( chunk_t** block_ptr, size_t* block_size) { chunk_t sink{0}; chunk_t* ptr = block_ptr[blockIdx.x]; size_t n = block_size[blockIdx.x]; for (size_t i = threadIdx.x; i < n; i += blockDim.x) { sink += ptr[i]; } ptr[threadIdx.x] = sink; } // Distributed write template <class chunk_t> __global__ void write_dist_k( chunk_t** block_ptr, size_t* block_size) { chunk_t* ptr = block_ptr[blockIdx.x]; size_t n = block_size[blockIdx.x]; for (size_t i = threadIdx.x; i < n; i += blockDim.x) { ptr[i] = 0; } } template <> __global__ void write_dist_k( int4** block_ptr, size_t* block_size) { int4* ptr = block_ptr[blockIdx.x]; size_t n = block_size[blockIdx.x]; for (size_t i = threadIdx.x; i < n; i += blockDim.x) { ptr[i] = {0, 1, 0, 0}; } } // Distributed copy template <class chunk_t> __global__ void copy_dist_k( chunk_t** block_ptr, size_t* block_size) { chunk_t* ptr = block_ptr[blockIdx.x]; size_t n = block_size[blockIdx.x]; size_t offset = n / 2; for (size_t i = threadIdx.x; i < offset; i += blockDim.x) { ptr[i] = ptr[offset + i]; } } // Distributed Random read template <class chunk_t> __global__ void rand_read_dist_k( chunk_t** block_ptr, size_t* block_size, int prime) { chunk_t sink{0}; chunk_t* ptr = block_ptr[blockIdx.x]; size_t n = block_size[blockIdx.x]; for (size_t i = threadIdx.x; i < n; i += blockDim.x) { sink += ptr[(i * prime) % n]; } ptr[threadIdx.x] = sink; } // Distributed Random write template <class chunk_t> __global__ void rand_write_dist_k( chunk_t** block_ptr, size_t* block_size, int prime) { chunk_t* ptr = block_ptr[blockIdx.x]; size_t n = block_size[blockIdx.x]; for (size_t i = threadIdx.x; i < n; i += blockDim.x) { ptr[(i * prime) % n] = 0; } } template <> __global__ void rand_write_dist_k( int4** block_ptr, size_t* block_size, int prime) { int4* ptr = block_ptr[blockIdx.x]; size_t n = block_size[blockIdx.x]; for (size_t i = threadIdx.x; i < n; i += blockDim.x) { ptr[(i * prime) % n] = {0, 1, 0, 0}; } } // Distributed Random copy template <class chunk_t> __global__ void rand_copy_dist_k( chunk_t** block_ptr, size_t* block_size, int prime) { chunk_t* ptr = block_ptr[blockIdx.x]; size_t n = block_size[blockIdx.x]; size_t offset = n / 2; for (size_t i = threadIdx.x; i < offset; i += blockDim.x) { ptr[(i * prime) % offset] = ptr[offset + (i * prime) % offset]; } } } // namespace gpu void printDeviceProp(int deviceId) { const int w1 = 34; std::cout << std::left; std::cout << std::setw(w1) << "--------------------------------------------------------------------------------" << std::endl; std::cout << std::setw(w1) << "device#" << deviceId << std::endl; hipDeviceProp_t props; GPUCHECK(hipGetDeviceProperties(&props, deviceId)); std::cout << std::setw(w1) << "Name: " << props.name << std::endl; std::cout << std::setw(w1) << "pciBusID: " << props.pciBusID << std::endl; std::cout << std::setw(w1) << "pciDeviceID: " << props.pciDeviceID << std::endl; std::cout << std::setw(w1) << "pciDomainID: " << props.pciDomainID << std::endl; std::cout << std::setw(w1) << "multiProcessorCount: " << props.multiProcessorCount << std::endl; std::cout << std::setw(w1) << "maxThreadsPerMultiProcessor: " << props.maxThreadsPerMultiProcessor << std::endl; std::cout << std::setw(w1) << "isMultiGpuBoard: " << props.isMultiGpuBoard << std::endl; std::cout << std::setw(w1) << "clockRate: " << (float)props.clockRate / 1000.0 << " Mhz" << std::endl; std::cout << std::setw(w1) << "memoryClockRate: " << (float)props.memoryClockRate / 1000.0 << " Mhz" << std::endl; std::cout << std::setw(w1) << "memoryBusWidth: " << props.memoryBusWidth << std::endl; std::cout << std::setw(w1) << "clockInstructionRate: " << (float)props.clockRate / 1000.0 << " Mhz" << std::endl; std::cout << std::setw(w1) << "totalGlobalMem: " << std::fixed << std::setprecision(2) << bytesToGB(props.totalGlobalMem) << " GB" << std::endl; #if !defined(__HIPCC__) std::cout << std::setw(w1) << "maxSharedMemoryPerMultiProcessor: " << std::fixed << std::setprecision(2) << bytesToconfig(props.sharedMemPerMultiprocessor) << " config" << std::endl; #endif #if defined(__HIPCC__) std::cout << std::setw(w1) << "maxSharedMemoryPerMultiProcessor: " << std::fixed << std::setprecision(2) << bytesToconfig(props.maxSharedMemoryPerMultiProcessor) << " config" << std::endl; #endif std::cout << std::setw(w1) << "totalConstMem: " << props.totalConstMem << std::endl; std::cout << std::setw(w1) << "sharedMemPerBlock: " << (float)props.sharedMemPerBlock / 1024.0 << " config" << std::endl; std::cout << std::setw(w1) << "canMapHostMemory: " << props.canMapHostMemory << std::endl; std::cout << std::setw(w1) << "regsPerBlock: " << props.regsPerBlock << std::endl; std::cout << std::setw(w1) << "warpSize: " << props.warpSize << std::endl; std::cout << std::setw(w1) << "l2CacheSize: " << props.l2CacheSize << std::endl; std::cout << std::setw(w1) << "computeMode: " << props.computeMode << std::endl; std::cout << std::setw(w1) << "maxThreadsPerBlock: " << props.maxThreadsPerBlock << std::endl; std::cout << std::setw(w1) << "maxThreadsDim.x: " << props.maxThreadsDim[0] << std::endl; std::cout << std::setw(w1) << "maxThreadsDim.y: " << props.maxThreadsDim[1] << std::endl; std::cout << std::setw(w1) << "maxThreadsDim.z: " << props.maxThreadsDim[2] << std::endl; std::cout << std::setw(w1) << "maxGridSize.x: " << props.maxGridSize[0] << std::endl; std::cout << std::setw(w1) << "maxGridSize.y: " << props.maxGridSize[1] << std::endl; std::cout << std::setw(w1) << "maxGridSize.z: " << props.maxGridSize[2] << std::endl; std::cout << std::setw(w1) << "major: " << props.major << std::endl; std::cout << std::setw(w1) << "minor: " << props.minor << std::endl; std::cout << std::setw(w1) << "concurrentKernels: " << props.concurrentKernels << std::endl; std::cout << std::setw(w1) << "cooperativeLaunch: " << props.cooperativeLaunch << std::endl; std::cout << std::setw(w1) << "cooperativeMultiDeviceLaunch: " << props.cooperativeMultiDeviceLaunch << std::endl; #if defined(__HIPCC__) std::cout << std::setw(w1) << "arch.hasGlobalInt32Atomics: " << props.arch.hasGlobalInt32Atomics << std::endl; std::cout << std::setw(w1) << "arch.hasGlobalFloatAtomicExch: " << props.arch.hasGlobalFloatAtomicExch << std::endl; std::cout << std::setw(w1) << "arch.hasSharedInt32Atomics: " << props.arch.hasSharedInt32Atomics << std::endl; std::cout << std::setw(w1) << "arch.hasSharedFloatAtomicExch: " << props.arch.hasSharedFloatAtomicExch << std::endl; std::cout << std::setw(w1) << "arch.hasFloatAtomicAdd: " << props.arch.hasFloatAtomicAdd << std::endl; std::cout << std::setw(w1) << "arch.hasGlobalInt64Atomics: " << props.arch.hasGlobalInt64Atomics << std::endl; std::cout << std::setw(w1) << "arch.hasSharedInt64Atomics: " << props.arch.hasSharedInt64Atomics << std::endl; std::cout << std::setw(w1) << "arch.hasDoubles: " << props.arch.hasDoubles << std::endl; std::cout << std::setw(w1) << "arch.hasWarpVote: " << props.arch.hasWarpVote << std::endl; std::cout << std::setw(w1) << "arch.hasWarpBallot: " << props.arch.hasWarpBallot << std::endl; std::cout << std::setw(w1) << "arch.hasWarpShuffle: " << props.arch.hasWarpShuffle << std::endl; std::cout << std::setw(w1) << "arch.hasFunnelShift: " << props.arch.hasFunnelShift << std::endl; std::cout << std::setw(w1) << "arch.hasThreadFenceSystem: " << props.arch.hasThreadFenceSystem << std::endl; std::cout << std::setw(w1) << "arch.hasSyncThreadsExt: " << props.arch.hasSyncThreadsExt << std::endl; std::cout << std::setw(w1) << "arch.hasSurfaceFuncs: " << props.arch.hasSurfaceFuncs << std::endl; std::cout << std::setw(w1) << "arch.has3dGrid: " << props.arch.has3dGrid << std::endl; std::cout << std::setw(w1) << "arch.hasDynamicParallelism: " << props.arch.hasDynamicParallelism << std::endl; std::cout << std::setw(w1) << "gcnArchName: " << props.gcnArchName << std::endl; #endif std::cout << std::setw(w1) << "isIntegrated: " << props.integrated << std::endl; std::cout << std::setw(w1) << "maxTexture1D: " << props.maxTexture1D << std::endl; std::cout << std::setw(w1) << "maxTexture2D.width: " << props.maxTexture2D[0] << std::endl; std::cout << std::setw(w1) << "maxTexture2D.height: " << props.maxTexture2D[1] << std::endl; std::cout << std::setw(w1) << "maxTexture3D.width: " << props.maxTexture3D[0] << std::endl; std::cout << std::setw(w1) << "maxTexture3D.height: " << props.maxTexture3D[1] << std::endl; std::cout << std::setw(w1) << "maxTexture3D.depth: " << props.maxTexture3D[2] << std::endl; #if defined(__HIPCC__) std::cout << std::setw(w1) << "isLargeBar: " << props.isLargeBar << std::endl; std::cout << std::setw(w1) << "asicRevision: " << props.asicRevision << std::endl; #endif int deviceCnt; GPUCHECK(hipGetDeviceCount(&deviceCnt)); std::cout << std::setw(w1) << "peers: "; for (int i = 0; i < deviceCnt; i++) { int isPeer; GPUCHECK(hipDeviceCanAccessPeer(&isPeer, i, deviceId)); if (isPeer) { std::cout << "device#" << i << " "; } } std::cout << std::endl; std::cout << std::setw(w1) << "non-peers: "; for (int i = 0; i < deviceCnt; i++) { int isPeer; GPUCHECK(hipDeviceCanAccessPeer(&isPeer, i, deviceId)); if (!isPeer) { std::cout << "device#" << i << " "; } } std::cout << std::endl; size_t free, total; GPUCHECK(hipMemGetInfo(&free, &total)); std::cout << std::fixed << std::setprecision(2); std::cout << std::setw(w1) << "memInfo.total: " << bytesToGB(total) << " GB" << std::endl; std::cout << std::setw(w1) << "memInfo.free: " << bytesToGB(free) << " GB (" << std::setprecision(0) << (float)free / total * 100.0 << "%)" << std::endl; } template <class chunk_t> template <typename... T> float GPUbenchmark<chunk_t>::runSequential(void (*kernel)(chunk_t*, size_t, T...), std::pair<float, float>& chunk, int nLaunches, int nBlocks, int nThreads, T&... args) // run for each chunk { float milliseconds{0.f}; hipEvent_t start, stop; hipStream_t stream; GPUCHECK(hipStreamCreate(&stream)); GPUCHECK(hipSetDevice(mOptions.deviceId)); chunk_t* chunkPtr = getCustomPtr<chunk_t>(mState.scratchPtr, chunk.first); // Warm up (hipLaunchKernelGGL((*kernel)), dim3(nBlocks), dim3(nThreads), 0, stream, chunkPtr, getBufferCapacity<chunk_t>(chunk.second, mOptions.prime), args...); GPUCHECK(hipEventCreate(&start)); GPUCHECK(hipEventCreate(&stop)); GPUCHECK(hipEventRecord(start)); for (auto iLaunch{0}; iLaunch < nLaunches; ++iLaunch) { // Schedule all the requested kernel launches (hipLaunchKernelGGL((*kernel)), dim3(nBlocks), dim3(nThreads), 0, stream, chunkPtr, getBufferCapacity<chunk_t>(chunk.second, mOptions.prime), args...); // NOLINT: clang-tidy false-positive } GPUCHECK(hipEventRecord(stop)); // record checkpoint GPUCHECK(hipEventSynchronize(stop)); // synchronize executions GPUCHECK(hipEventElapsedTime(&milliseconds, start, stop)); GPUCHECK(hipEventDestroy(start)); GPUCHECK(hipEventDestroy(stop)); GPUCHECK(hipStreamDestroy(stream)); return milliseconds; } template <class chunk_t> template <typename... T> std::vector<float> GPUbenchmark<chunk_t>::runConcurrent(void (*kernel)(chunk_t*, size_t, T...), std::vector<std::pair<float, float>>& chunkRanges, int nLaunches, int dimStreams, int nBlocks, int nThreads, T&... args) { auto nChunks = chunkRanges.size(); std::vector<float> results(nChunks + 1); // last spot is for the host time std::vector<hipEvent_t> starts(nChunks), stops(nChunks); std::vector<hipStream_t> streams(dimStreams); GPUCHECK(hipSetDevice(mOptions.deviceId)); for (auto iStream{0}; iStream < dimStreams; ++iStream) { GPUCHECK(hipStreamCreate(&(streams.at(iStream)))); // round-robin on stream pool } for (size_t iChunk{0}; iChunk < nChunks; ++iChunk) { GPUCHECK(hipEventCreate(&(starts[iChunk]))); GPUCHECK(hipEventCreate(&(stops[iChunk]))); } // Warm up on every chunk for (size_t iChunk{0}; iChunk < nChunks; ++iChunk) { auto& chunk = chunkRanges[iChunk]; chunk_t* chunkPtr = getCustomPtr<chunk_t>(mState.scratchPtr, chunk.first); (hipLaunchKernelGGL((*kernel)), dim3(nBlocks), dim3(nThreads), 0, streams[iChunk % dimStreams], chunkPtr, getBufferCapacity<chunk_t>(chunk.second, mOptions.prime), args...); } auto start = std::chrono::high_resolution_clock::now(); for (size_t iChunk{0}; iChunk < nChunks; ++iChunk) { auto& chunk = chunkRanges[iChunk]; chunk_t* chunkPtr = getCustomPtr<chunk_t>(mState.scratchPtr, chunk.first); GPUCHECK(hipEventRecord(starts[iChunk], streams[iChunk % dimStreams])); for (auto iLaunch{0}; iLaunch < nLaunches; ++iLaunch) { (hipLaunchKernelGGL((*kernel)), dim3(nBlocks), dim3(nThreads), 0, streams[iChunk % dimStreams], chunkPtr, getBufferCapacity<chunk_t>(chunk.second, mOptions.prime), args...); } GPUCHECK(hipEventRecord(stops[iChunk], streams[iChunk % dimStreams])); } for (size_t iChunk{0}; iChunk < nChunks; ++iChunk) { GPUCHECK(hipEventSynchronize(stops[iChunk])); GPUCHECK(hipEventElapsedTime(&(results.at(iChunk)), starts[iChunk], stops[iChunk])); GPUCHECK(hipEventDestroy(starts[iChunk])); GPUCHECK(hipEventDestroy(stops[iChunk])); } GPUCHECK(hipDeviceSynchronize()); auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double, std::milli> diff_t{end - start}; for (auto iStream{0}; iStream < dimStreams; ++iStream) { GPUCHECK(hipStreamDestroy(streams[iStream])); } results[nChunks] = diff_t.count(); // register host time on latest spot return results; } template <class chunk_t> template <typename... T> float GPUbenchmark<chunk_t>::runDistributed(void (*kernel)(chunk_t**, size_t*, T...), std::vector<std::pair<float, float>>& chunkRanges, int nLaunches, size_t nBlocks, int nThreads, T&... args) { std::vector<chunk_t*> chunkPtrs(chunkRanges.size()); // Pointers to the beginning of each chunk std::vector<chunk_t*> ptrPerBlocks(nBlocks); // Pointers for each block std::vector<size_t> perBlockCapacity(nBlocks); // Capacity of sub-buffer for block float totChunkGB{0.f}; size_t totComputedBlocks{0}; for (size_t iChunk{0}; iChunk < chunkRanges.size(); ++iChunk) { chunkPtrs[iChunk] = getCustomPtr<chunk_t>(mState.scratchPtr, chunkRanges[iChunk].first); totChunkGB += chunkRanges[iChunk].second; } int index{0}; for (size_t iChunk{0}; iChunk < chunkRanges.size(); ++iChunk) { float percFromMem = chunkRanges[iChunk].second / totChunkGB; int blocksPerChunk = percFromMem * nBlocks; totComputedBlocks += blocksPerChunk; for (int iBlock{0}; iBlock < blocksPerChunk; ++iBlock, ++index) { float memPerBlock = chunkRanges[iChunk].second / blocksPerChunk; ptrPerBlocks[index] = getCustomPtr<chunk_t>(chunkPtrs[iChunk], iBlock * memPerBlock); perBlockCapacity[index] = getBufferCapacity<chunk_t>(memPerBlock, mOptions.prime); } } if (totComputedBlocks != nBlocks) { std::cerr << " - \033[1;33mWarning: Sum of used blocks (" << totComputedBlocks << ") is different from requested one (" << nBlocks << ")!\e[0m" << std::endl; } if (mOptions.dumpChunks) { for (size_t iChunk{0}; iChunk < totComputedBlocks; ++iChunk) { std::cout << " - block " << iChunk << " address: " << ptrPerBlocks[iChunk] << ", size: " << perBlockCapacity[iChunk] << std::endl; } } // Setup chunk_t** block_ptr; size_t* block_size; GPUCHECK(hipMalloc(reinterpret_cast<void**>(&block_ptr), nBlocks * sizeof(chunk_t*))); GPUCHECK(hipMalloc(reinterpret_cast<void**>(&block_size), nBlocks * sizeof(size_t))); GPUCHECK(hipMemcpy(block_ptr, ptrPerBlocks.data(), nBlocks * sizeof(chunk_t*), hipMemcpyHostToDevice)); GPUCHECK(hipMemcpy(block_size, perBlockCapacity.data(), nBlocks * sizeof(size_t), hipMemcpyHostToDevice)); float milliseconds{0.f}; hipEvent_t start, stop; hipStream_t stream; GPUCHECK(hipStreamCreate(&stream)); GPUCHECK(hipSetDevice(mOptions.deviceId)); GPUCHECK(hipEventCreate(&start)); GPUCHECK(hipEventCreate(&stop)); // Warm up (hipLaunchKernelGGL((*kernel)), dim3(totComputedBlocks), dim3(nThreads), 0, stream, block_ptr, block_size, args...); GPUCHECK(hipEventRecord(start)); for (auto iLaunch{0}; iLaunch < nLaunches; ++iLaunch) { // Schedule all the requested kernel launches (hipLaunchKernelGGL((*kernel)), dim3(totComputedBlocks), dim3(nThreads), 0, stream, block_ptr, block_size, args...); // NOLINT: clang-tidy false-positive } GPUCHECK(hipEventRecord(stop)); // record checkpoint GPUCHECK(hipEventSynchronize(stop)); // synchronize executions GPUCHECK(hipEventElapsedTime(&milliseconds, start, stop)); GPUCHECK(hipEventDestroy(start)); GPUCHECK(hipEventDestroy(stop)); GPUCHECK(hipStreamDestroy(stream)); return milliseconds; } template <class chunk_t> void GPUbenchmark<chunk_t>::printDevices() { int deviceCnt; GPUCHECK(hipGetDeviceCount(&deviceCnt)); for (int i = 0; i < deviceCnt; i++) { GPUCHECK(hipSetDevice(i)); printDeviceProp(i); } } template <class chunk_t> void GPUbenchmark<chunk_t>::globalInit() { hipDeviceProp_t props; size_t free; // Fetch and store features GPUCHECK(hipGetDeviceProperties(&props, mOptions.deviceId)); GPUCHECK(hipMemGetInfo(&free, &mState.totalMemory)); GPUCHECK(hipSetDevice(mOptions.deviceId)); mState.chunkReservedGB = mOptions.chunkReservedGB; mState.iterations = mOptions.kernelLaunches; mState.streams = mOptions.streams; mState.testChunks = mOptions.testChunks; if (!checkTestChunks(mState.testChunks, mOptions.freeMemoryFractionToAllocate * free / GB)) { std::cerr << "Failed to configure memory chunks: check arbitrary chunks boundaries." << std::endl; exit(1); } mState.nMultiprocessors = props.multiProcessorCount; mState.nMaxThreadsPerBlock = props.maxThreadsPerMultiProcessor; mState.nMaxThreadsPerDimension = props.maxThreadsDim[0]; mState.scratchSize = static_cast<long int>(mOptions.freeMemoryFractionToAllocate * free); if (mState.testChunks.empty()) { for (auto j{0}; j < mState.getMaxChunks() * mState.chunkReservedGB; j += mState.chunkReservedGB) { mState.testChunks.emplace_back(j, mState.chunkReservedGB); } } if (!mOptions.raw) { std::cout << " Running on: \033[1;31m" << props.name << "\e[0m" << std::endl; } // Allocate scratch on GPU GPUCHECK(hipMalloc(reinterpret_cast<void**>(&mState.scratchPtr), mState.scratchSize)); GPUCHECK(hipMemset(mState.scratchPtr, 0, mState.scratchSize)) if (!mOptions.raw) { std::cout << " Buffer type: \e[1m" << getType<chunk_t>() << "\e[0m" << std::endl << " Allocated: " << std::setprecision(2) << bytesToGB(mState.scratchSize) << "/" << std::setprecision(2) << bytesToGB(mState.totalMemory) << "(GB) [" << std::setprecision(3) << (100.f) * (mState.scratchSize / (float)mState.totalMemory) << "%]\n" << " Available streams: " << mState.getStreamsPoolSize() << "\n\n"; } } template <class chunk_t> void GPUbenchmark<chunk_t>::initTest(Test test) { if (!mOptions.raw) { std::cout << " \033[1;33m" << getType<chunk_t>() << "\033[0m " << test << " benchmark with \e[1m" << mOptions.nTests << "\e[0m runs and \e[1m" << mOptions.kernelLaunches << "\e[0m kernel launches" << std::endl; } GPUCHECK(hipSetDevice(mOptions.deviceId)); } template <class chunk_t> void GPUbenchmark<chunk_t>::runTest(Test test, Mode mode, KernelConfig config) { mResultWriter.get()->addBenchmarkEntry(getTestName(mode, test, config), getType<chunk_t>(), mState.getMaxChunks()); auto dimGrid{mState.nMultiprocessors}; auto nBlocks{(config == KernelConfig::Single) ? 1 : (config == KernelConfig::Multi) ? dimGrid / mState.testChunks.size() : (config == KernelConfig::All) ? dimGrid : mOptions.numBlocks}; size_t nThreads; if (mOptions.numThreads < 0) { nThreads = ::min(mState.nMaxThreadsPerDimension, mState.nMaxThreadsPerBlock); } else { nThreads = mOptions.numThreads; } nThreads *= mOptions.threadPoolFraction; void (*kernel)(chunk_t*, size_t) = &gpu::read_k<chunk_t>; // Initialising to a default value void (*kernel_distributed)(chunk_t**, size_t*) = &gpu::read_dist_k<chunk_t>; // Initialising to a default value void (*kernel_rand)(chunk_t*, size_t, int) = &gpu::rand_read_k<chunk_t>; // Initialising to a default value void (*kernel_rand_distributed)(chunk_t**, size_t*, int) = &gpu::rand_read_dist_k<chunk_t>; // Initialising to a default value bool is_random{false}; if (mode != Mode::Distributed) { switch (test) { case Test::Read: { kernel = &gpu::read_k<chunk_t>; break; } case Test::Write: { kernel = &gpu::write_k<chunk_t>; break; } case Test::Copy: { kernel = &gpu::copy_k<chunk_t>; break; } case Test::RandomRead: { kernel_rand = &gpu::rand_read_k<chunk_t>; is_random = true; break; } case Test::RandomWrite: { kernel_rand = &gpu::rand_write_k<chunk_t>; is_random = true; break; } case Test::RandomCopy: { kernel_rand = &gpu::rand_copy_k<chunk_t>; is_random = true; break; } } } else { switch (test) { case Test::Read: { kernel_distributed = &gpu::read_dist_k<chunk_t>; break; } case Test::Write: { kernel_distributed = &gpu::write_dist_k<chunk_t>; break; } case Test::Copy: { kernel_distributed = &gpu::copy_dist_k<chunk_t>; break; } case Test::RandomRead: { kernel_rand_distributed = &gpu::rand_read_dist_k<chunk_t>; is_random = true; break; } case Test::RandomWrite: { kernel_rand_distributed = &gpu::rand_write_dist_k<chunk_t>; is_random = true; break; } case Test::RandomCopy: { kernel_rand_distributed = &gpu::rand_copy_dist_k<chunk_t>; is_random = true; break; } } } for (auto measurement{0}; measurement < mOptions.nTests; ++measurement) { if (!mOptions.raw) { std::cout << " " << mode << " " << test << " " << config << " block(s) (" << measurement + 1 << "/" << mOptions.nTests << "): \n" << " - blocks per kernel: " << nBlocks << "/" << dimGrid << "\n" << " - threads per block: " << (int)nThreads << "\n"; } if (mode == Mode::Sequential) { if (!mOptions.raw) { std::cout << " - per chunk throughput:\n"; } for (size_t iChunk{0}; iChunk < mState.testChunks.size(); ++iChunk) { // loop over single chunks separately auto& chunk = mState.testChunks[iChunk]; float result{0.f}; if (!is_random) { result = runSequential(kernel, chunk, mState.getNKernelLaunches(), nBlocks, nThreads); } else { result = runSequential(kernel_rand, chunk, mState.getNKernelLaunches(), nBlocks, nThreads, mOptions.prime); } float chunkSize = (float)getBufferCapacity<chunk_t>(chunk.second, mOptions.prime) * sizeof(chunk_t) / (float)GB; auto throughput = computeThroughput(test, result, chunkSize, mState.getNKernelLaunches()); if (!mOptions.raw) { std::cout << " " << ((mState.testChunks.size() - iChunk != 1) ? " " : " ") << iChunk + 1 << "/" << mState.testChunks.size() << ": [" << chunk.first << "-" << chunk.first + chunk.second << ") \e[1m" << throughput << " GB/s \e[0m(" << result * 1e-3 << " s)\n"; } else { std::cout << "" << measurement << "\t" << iChunk << "\t" << throughput << "\t" << chunkSize << "\t" << result << std::endl; } mResultWriter.get()->storeBenchmarkEntry(test, iChunk, result, chunk.second, mState.getNKernelLaunches()); } } else if (mode == Mode::Concurrent) { if (!mOptions.raw) { std::cout << " - per chunk throughput:\n"; } std::vector<float> results; if (!is_random) { results = runConcurrent(kernel, mState.testChunks, mState.getNKernelLaunches(), mState.getStreamsPoolSize(), nBlocks, nThreads); } else { results = runConcurrent(kernel_rand, mState.testChunks, mState.getNKernelLaunches(), mState.getStreamsPoolSize(), nBlocks, nThreads, mOptions.prime); } float sum{0}; for (size_t iChunk{0}; iChunk < mState.testChunks.size(); ++iChunk) { auto& chunk = mState.testChunks[iChunk]; float chunkSize = (float)getBufferCapacity<chunk_t>(chunk.second, mOptions.prime) * sizeof(chunk_t) / (float)GB; auto throughput = computeThroughput(test, results[iChunk], chunkSize, mState.getNKernelLaunches()); sum += throughput; if (!mOptions.raw) { std::cout << " " << ((mState.testChunks.size() - iChunk != 1) ? " " : " ") << iChunk + 1 << "/" << mState.testChunks.size() << ": [" << chunk.first << "-" << chunk.first + chunk.second << ") \e[1m" << throughput << " GB/s \e[0m(" << results[iChunk] * 1e-3 << " s)\n"; } else { std::cout << "" << measurement << "\t" << iChunk << "\t" << throughput << "\t" << chunkSize << "\t" << results[iChunk] << std::endl; } mResultWriter.get()->storeBenchmarkEntry(test, iChunk, results[iChunk], chunk.second, mState.getNKernelLaunches()); } if (mState.testChunks.size() > 1) { if (!mOptions.raw) { std::cout << " - total throughput: \e[1m" << sum << " GB/s \e[0m" << std::endl; } } // Add throughput computed via system time measurement float tot{0}; for (auto& chunk : mState.testChunks) { tot += chunk.second; } if (!mOptions.raw) { std::cout << " - total throughput with host time: \e[1m" << computeThroughput(test, results[mState.testChunks.size()], tot, mState.getNKernelLaunches()) << " GB/s \e[0m (" << std::setw(2) << results[mState.testChunks.size()] / 1000 << " s)" << std::endl; } } else if (mode == Mode::Distributed) { float result{0.f}; if (!is_random) { result = runDistributed(kernel_distributed, mState.testChunks, mState.getNKernelLaunches(), nBlocks, nThreads); } else { result = runDistributed(kernel_rand_distributed, mState.testChunks, mState.getNKernelLaunches(), nBlocks, nThreads, mOptions.prime); } float tot{0}; for (auto& chunk : mState.testChunks) { float chunkSize = (float)getBufferCapacity<chunk_t>(chunk.second, mOptions.prime) * sizeof(chunk_t) / (float)GB; tot += chunkSize; } auto throughput = computeThroughput(test, result, tot, mState.getNKernelLaunches()); if (!mOptions.raw) { std::cout << " throughput: \e[1m" << throughput << " GB/s \e[0m(" << result * 1e-3 << " s)\n"; } else { std::cout << "" << measurement << "\t" << 0 << "\t" << throughput << "\t" << tot << "\t" << result << std::endl; } mResultWriter.get()->storeBenchmarkEntry(test, 0, result, tot, mState.getNKernelLaunches()); } mResultWriter.get()->snapshotBenchmark(); } } template <class chunk_t> void GPUbenchmark<chunk_t>::finalizeTest(Test test) { if (!mOptions.raw) { std::cout << " \033[1;32m done\033[0m" << std::endl; } } template <class chunk_t> void GPUbenchmark<chunk_t>::globalFinalize() { GPUCHECK(hipSetDevice(mOptions.deviceId)); GPUCHECK(hipFree(mState.scratchPtr)); } template <class chunk_t> void GPUbenchmark<chunk_t>::run() { globalInit(); for (auto& test : mOptions.tests) { initTest(test); for (auto& mode : mOptions.modes) { for (auto& config : mOptions.pools) { runTest(test, mode, config); } } finalizeTest(test); } globalFinalize(); } template class GPUbenchmark<char>; template class GPUbenchmark<size_t>; template class GPUbenchmark<int>; template class GPUbenchmark<int4>; } // namespace benchmark } // namespace o2
a8692ce65179e87e6412883db0713b970a906ccd.cu
// Copyright 2019-2020 CERN and copyright holders of ALICE O2. // See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. // All rights not expressly granted are reserved. // // This software is distributed under the terms of the GNU General Public // License v3 (GPL Version 3), copied verbatim in the file "COPYING". // // In applying this license CERN does not waive the privileges and immunities // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. /// /// \file Kernels.{cu, hip.cxx} /// \author: [email protected] #include "../Shared/Kernels.h" #include <chrono> #include <cstdio> #include <numeric> // Memory partitioning schema // // |----------------------region 0-----------------|----------------------region 1-----------------| regions -> deafult: 2, to test lower and upper RAM // |--chunk 0--|--chunk 1--|--chunk 2--| *** |--chunk n--| chunks -> default size: 1GB (sing block pins) // |__________________________________________scratch______________________________________________| scratch -> default size: 95% free GPU RAM #define GPUCHECK(error) \ if (error != cudaSuccess) { \ printf("%serror: '%s'(%d) at %s:%d%s\n", KRED, cudaGetErrorString(error), error, __FILE__, \ __LINE__, KNRM); \ failed("API returned error code."); \ } double bytesToconfig(size_t s) { return (double)s / (1024.0); } double bytesToGB(size_t s) { return (double)s / GB; } bool checkTestChunks(std::vector<std::pair<float, float>>& chunks, size_t availMemSizeGB) { if (!chunks.size()) { return true; } bool check{false}; sort(chunks.begin(), chunks.end()); for (size_t iChunk{0}; iChunk < chunks.size(); ++iChunk) { // Check boundaries if (chunks[iChunk].first + chunks[iChunk].second > availMemSizeGB) { check = false; break; } if (iChunk > 0) { // Check intersections if (chunks[iChunk].first < chunks[iChunk - 1].first + chunks[iChunk - 1].second) { check = false; break; } } check = true; } return check; } // CUDA does not support <type4> operations: // https://forums.developer.nvidia.com/t/swizzling-float4-arithmetic-support/217 #ifndef __HIPCC__ inline __host__ __device__ void operator+=(int4& a, int4 b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; } #endif namespace o2 { namespace benchmark { namespace gpu { //////////// // Kernels // Read template <class chunk_t> __global__ void read_k( chunk_t* chunkPtr, size_t chunkSize) { chunk_t sink{0}; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < chunkSize; i += blockDim.x * gridDim.x) { sink += chunkPtr[i]; } chunkPtr[threadIdx.x] = sink; } // Write template <class chunk_t> __global__ void write_k( chunk_t* chunkPtr, size_t chunkSize) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < chunkSize; i += blockDim.x * gridDim.x) { chunkPtr[i] = 0; } } template <> __global__ void write_k( int4* chunkPtr, size_t chunkSize) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < chunkSize; i += blockDim.x * gridDim.x) { chunkPtr[i] = {0, 1, 0, 0}; }; } // Copy template <class chunk_t> __global__ void copy_k( chunk_t* chunkPtr, size_t chunkSize) { size_t offset = chunkSize / 2; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < offset; i += blockDim.x * gridDim.x) { chunkPtr[i] = chunkPtr[offset + i]; } } // Random read template <class chunk_t> __global__ void rand_read_k( chunk_t* chunkPtr, size_t chunkSize, int prime) { chunk_t sink{0}; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < chunkSize; i += blockDim.x * gridDim.x) { sink += chunkPtr[(i * prime) % chunkSize]; } chunkPtr[threadIdx.x] = sink; } // Random write template <class chunk_t> __global__ void rand_write_k( chunk_t* chunkPtr, size_t chunkSize, int prime) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < chunkSize; i += blockDim.x * gridDim.x) { chunkPtr[(i * prime) % chunkSize] = 0; } } template <> __global__ void rand_write_k( int4* chunkPtr, size_t chunkSize, int prime) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < chunkSize; i += blockDim.x * gridDim.x) { chunkPtr[(i * prime) % chunkSize] = {0, 1, 0, 0}; }; } // Random copy template <class chunk_t> __global__ void rand_copy_k( chunk_t* chunkPtr, size_t chunkSize, int prime) { size_t offset = chunkSize / 2; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < offset; i += blockDim.x * gridDim.x) { chunkPtr[(i * prime) % offset] = chunkPtr[offset + (i * prime) % offset]; // might be % = 0... } } // Distributed read template <class chunk_t> __global__ void read_dist_k( chunk_t** block_ptr, size_t* block_size) { chunk_t sink{0}; chunk_t* ptr = block_ptr[blockIdx.x]; size_t n = block_size[blockIdx.x]; for (size_t i = threadIdx.x; i < n; i += blockDim.x) { sink += ptr[i]; } ptr[threadIdx.x] = sink; } // Distributed write template <class chunk_t> __global__ void write_dist_k( chunk_t** block_ptr, size_t* block_size) { chunk_t* ptr = block_ptr[blockIdx.x]; size_t n = block_size[blockIdx.x]; for (size_t i = threadIdx.x; i < n; i += blockDim.x) { ptr[i] = 0; } } template <> __global__ void write_dist_k( int4** block_ptr, size_t* block_size) { int4* ptr = block_ptr[blockIdx.x]; size_t n = block_size[blockIdx.x]; for (size_t i = threadIdx.x; i < n; i += blockDim.x) { ptr[i] = {0, 1, 0, 0}; } } // Distributed copy template <class chunk_t> __global__ void copy_dist_k( chunk_t** block_ptr, size_t* block_size) { chunk_t* ptr = block_ptr[blockIdx.x]; size_t n = block_size[blockIdx.x]; size_t offset = n / 2; for (size_t i = threadIdx.x; i < offset; i += blockDim.x) { ptr[i] = ptr[offset + i]; } } // Distributed Random read template <class chunk_t> __global__ void rand_read_dist_k( chunk_t** block_ptr, size_t* block_size, int prime) { chunk_t sink{0}; chunk_t* ptr = block_ptr[blockIdx.x]; size_t n = block_size[blockIdx.x]; for (size_t i = threadIdx.x; i < n; i += blockDim.x) { sink += ptr[(i * prime) % n]; } ptr[threadIdx.x] = sink; } // Distributed Random write template <class chunk_t> __global__ void rand_write_dist_k( chunk_t** block_ptr, size_t* block_size, int prime) { chunk_t* ptr = block_ptr[blockIdx.x]; size_t n = block_size[blockIdx.x]; for (size_t i = threadIdx.x; i < n; i += blockDim.x) { ptr[(i * prime) % n] = 0; } } template <> __global__ void rand_write_dist_k( int4** block_ptr, size_t* block_size, int prime) { int4* ptr = block_ptr[blockIdx.x]; size_t n = block_size[blockIdx.x]; for (size_t i = threadIdx.x; i < n; i += blockDim.x) { ptr[(i * prime) % n] = {0, 1, 0, 0}; } } // Distributed Random copy template <class chunk_t> __global__ void rand_copy_dist_k( chunk_t** block_ptr, size_t* block_size, int prime) { chunk_t* ptr = block_ptr[blockIdx.x]; size_t n = block_size[blockIdx.x]; size_t offset = n / 2; for (size_t i = threadIdx.x; i < offset; i += blockDim.x) { ptr[(i * prime) % offset] = ptr[offset + (i * prime) % offset]; } } } // namespace gpu void printDeviceProp(int deviceId) { const int w1 = 34; std::cout << std::left; std::cout << std::setw(w1) << "--------------------------------------------------------------------------------" << std::endl; std::cout << std::setw(w1) << "device#" << deviceId << std::endl; cudaDeviceProp props; GPUCHECK(cudaGetDeviceProperties(&props, deviceId)); std::cout << std::setw(w1) << "Name: " << props.name << std::endl; std::cout << std::setw(w1) << "pciBusID: " << props.pciBusID << std::endl; std::cout << std::setw(w1) << "pciDeviceID: " << props.pciDeviceID << std::endl; std::cout << std::setw(w1) << "pciDomainID: " << props.pciDomainID << std::endl; std::cout << std::setw(w1) << "multiProcessorCount: " << props.multiProcessorCount << std::endl; std::cout << std::setw(w1) << "maxThreadsPerMultiProcessor: " << props.maxThreadsPerMultiProcessor << std::endl; std::cout << std::setw(w1) << "isMultiGpuBoard: " << props.isMultiGpuBoard << std::endl; std::cout << std::setw(w1) << "clockRate: " << (float)props.clockRate / 1000.0 << " Mhz" << std::endl; std::cout << std::setw(w1) << "memoryClockRate: " << (float)props.memoryClockRate / 1000.0 << " Mhz" << std::endl; std::cout << std::setw(w1) << "memoryBusWidth: " << props.memoryBusWidth << std::endl; std::cout << std::setw(w1) << "clockInstructionRate: " << (float)props.clockRate / 1000.0 << " Mhz" << std::endl; std::cout << std::setw(w1) << "totalGlobalMem: " << std::fixed << std::setprecision(2) << bytesToGB(props.totalGlobalMem) << " GB" << std::endl; #if !defined(__CUDACC__) std::cout << std::setw(w1) << "maxSharedMemoryPerMultiProcessor: " << std::fixed << std::setprecision(2) << bytesToconfig(props.sharedMemPerMultiprocessor) << " config" << std::endl; #endif #if defined(__HIPCC__) std::cout << std::setw(w1) << "maxSharedMemoryPerMultiProcessor: " << std::fixed << std::setprecision(2) << bytesToconfig(props.maxSharedMemoryPerMultiProcessor) << " config" << std::endl; #endif std::cout << std::setw(w1) << "totalConstMem: " << props.totalConstMem << std::endl; std::cout << std::setw(w1) << "sharedMemPerBlock: " << (float)props.sharedMemPerBlock / 1024.0 << " config" << std::endl; std::cout << std::setw(w1) << "canMapHostMemory: " << props.canMapHostMemory << std::endl; std::cout << std::setw(w1) << "regsPerBlock: " << props.regsPerBlock << std::endl; std::cout << std::setw(w1) << "warpSize: " << props.warpSize << std::endl; std::cout << std::setw(w1) << "l2CacheSize: " << props.l2CacheSize << std::endl; std::cout << std::setw(w1) << "computeMode: " << props.computeMode << std::endl; std::cout << std::setw(w1) << "maxThreadsPerBlock: " << props.maxThreadsPerBlock << std::endl; std::cout << std::setw(w1) << "maxThreadsDim.x: " << props.maxThreadsDim[0] << std::endl; std::cout << std::setw(w1) << "maxThreadsDim.y: " << props.maxThreadsDim[1] << std::endl; std::cout << std::setw(w1) << "maxThreadsDim.z: " << props.maxThreadsDim[2] << std::endl; std::cout << std::setw(w1) << "maxGridSize.x: " << props.maxGridSize[0] << std::endl; std::cout << std::setw(w1) << "maxGridSize.y: " << props.maxGridSize[1] << std::endl; std::cout << std::setw(w1) << "maxGridSize.z: " << props.maxGridSize[2] << std::endl; std::cout << std::setw(w1) << "major: " << props.major << std::endl; std::cout << std::setw(w1) << "minor: " << props.minor << std::endl; std::cout << std::setw(w1) << "concurrentKernels: " << props.concurrentKernels << std::endl; std::cout << std::setw(w1) << "cooperativeLaunch: " << props.cooperativeLaunch << std::endl; std::cout << std::setw(w1) << "cooperativeMultiDeviceLaunch: " << props.cooperativeMultiDeviceLaunch << std::endl; #if defined(__HIPCC__) std::cout << std::setw(w1) << "arch.hasGlobalInt32Atomics: " << props.arch.hasGlobalInt32Atomics << std::endl; std::cout << std::setw(w1) << "arch.hasGlobalFloatAtomicExch: " << props.arch.hasGlobalFloatAtomicExch << std::endl; std::cout << std::setw(w1) << "arch.hasSharedInt32Atomics: " << props.arch.hasSharedInt32Atomics << std::endl; std::cout << std::setw(w1) << "arch.hasSharedFloatAtomicExch: " << props.arch.hasSharedFloatAtomicExch << std::endl; std::cout << std::setw(w1) << "arch.hasFloatAtomicAdd: " << props.arch.hasFloatAtomicAdd << std::endl; std::cout << std::setw(w1) << "arch.hasGlobalInt64Atomics: " << props.arch.hasGlobalInt64Atomics << std::endl; std::cout << std::setw(w1) << "arch.hasSharedInt64Atomics: " << props.arch.hasSharedInt64Atomics << std::endl; std::cout << std::setw(w1) << "arch.hasDoubles: " << props.arch.hasDoubles << std::endl; std::cout << std::setw(w1) << "arch.hasWarpVote: " << props.arch.hasWarpVote << std::endl; std::cout << std::setw(w1) << "arch.hasWarpBallot: " << props.arch.hasWarpBallot << std::endl; std::cout << std::setw(w1) << "arch.hasWarpShuffle: " << props.arch.hasWarpShuffle << std::endl; std::cout << std::setw(w1) << "arch.hasFunnelShift: " << props.arch.hasFunnelShift << std::endl; std::cout << std::setw(w1) << "arch.hasThreadFenceSystem: " << props.arch.hasThreadFenceSystem << std::endl; std::cout << std::setw(w1) << "arch.hasSyncThreadsExt: " << props.arch.hasSyncThreadsExt << std::endl; std::cout << std::setw(w1) << "arch.hasSurfaceFuncs: " << props.arch.hasSurfaceFuncs << std::endl; std::cout << std::setw(w1) << "arch.has3dGrid: " << props.arch.has3dGrid << std::endl; std::cout << std::setw(w1) << "arch.hasDynamicParallelism: " << props.arch.hasDynamicParallelism << std::endl; std::cout << std::setw(w1) << "gcnArchName: " << props.gcnArchName << std::endl; #endif std::cout << std::setw(w1) << "isIntegrated: " << props.integrated << std::endl; std::cout << std::setw(w1) << "maxTexture1D: " << props.maxTexture1D << std::endl; std::cout << std::setw(w1) << "maxTexture2D.width: " << props.maxTexture2D[0] << std::endl; std::cout << std::setw(w1) << "maxTexture2D.height: " << props.maxTexture2D[1] << std::endl; std::cout << std::setw(w1) << "maxTexture3D.width: " << props.maxTexture3D[0] << std::endl; std::cout << std::setw(w1) << "maxTexture3D.height: " << props.maxTexture3D[1] << std::endl; std::cout << std::setw(w1) << "maxTexture3D.depth: " << props.maxTexture3D[2] << std::endl; #if defined(__HIPCC__) std::cout << std::setw(w1) << "isLargeBar: " << props.isLargeBar << std::endl; std::cout << std::setw(w1) << "asicRevision: " << props.asicRevision << std::endl; #endif int deviceCnt; GPUCHECK(cudaGetDeviceCount(&deviceCnt)); std::cout << std::setw(w1) << "peers: "; for (int i = 0; i < deviceCnt; i++) { int isPeer; GPUCHECK(cudaDeviceCanAccessPeer(&isPeer, i, deviceId)); if (isPeer) { std::cout << "device#" << i << " "; } } std::cout << std::endl; std::cout << std::setw(w1) << "non-peers: "; for (int i = 0; i < deviceCnt; i++) { int isPeer; GPUCHECK(cudaDeviceCanAccessPeer(&isPeer, i, deviceId)); if (!isPeer) { std::cout << "device#" << i << " "; } } std::cout << std::endl; size_t free, total; GPUCHECK(cudaMemGetInfo(&free, &total)); std::cout << std::fixed << std::setprecision(2); std::cout << std::setw(w1) << "memInfo.total: " << bytesToGB(total) << " GB" << std::endl; std::cout << std::setw(w1) << "memInfo.free: " << bytesToGB(free) << " GB (" << std::setprecision(0) << (float)free / total * 100.0 << "%)" << std::endl; } template <class chunk_t> template <typename... T> float GPUbenchmark<chunk_t>::runSequential(void (*kernel)(chunk_t*, size_t, T...), std::pair<float, float>& chunk, int nLaunches, int nBlocks, int nThreads, T&... args) // run for each chunk { float milliseconds{0.f}; cudaEvent_t start, stop; cudaStream_t stream; GPUCHECK(cudaStreamCreate(&stream)); GPUCHECK(cudaSetDevice(mOptions.deviceId)); chunk_t* chunkPtr = getCustomPtr<chunk_t>(mState.scratchPtr, chunk.first); // Warm up (*kernel)<<<nBlocks, nThreads, 0, stream>>>(chunkPtr, getBufferCapacity<chunk_t>(chunk.second, mOptions.prime), args...); GPUCHECK(cudaEventCreate(&start)); GPUCHECK(cudaEventCreate(&stop)); GPUCHECK(cudaEventRecord(start)); for (auto iLaunch{0}; iLaunch < nLaunches; ++iLaunch) { // Schedule all the requested kernel launches (*kernel)<<<nBlocks, nThreads, 0, stream>>>(chunkPtr, getBufferCapacity<chunk_t>(chunk.second, mOptions.prime), args...); // NOLINT: clang-tidy false-positive } GPUCHECK(cudaEventRecord(stop)); // record checkpoint GPUCHECK(cudaEventSynchronize(stop)); // synchronize executions GPUCHECK(cudaEventElapsedTime(&milliseconds, start, stop)); GPUCHECK(cudaEventDestroy(start)); GPUCHECK(cudaEventDestroy(stop)); GPUCHECK(cudaStreamDestroy(stream)); return milliseconds; } template <class chunk_t> template <typename... T> std::vector<float> GPUbenchmark<chunk_t>::runConcurrent(void (*kernel)(chunk_t*, size_t, T...), std::vector<std::pair<float, float>>& chunkRanges, int nLaunches, int dimStreams, int nBlocks, int nThreads, T&... args) { auto nChunks = chunkRanges.size(); std::vector<float> results(nChunks + 1); // last spot is for the host time std::vector<cudaEvent_t> starts(nChunks), stops(nChunks); std::vector<cudaStream_t> streams(dimStreams); GPUCHECK(cudaSetDevice(mOptions.deviceId)); for (auto iStream{0}; iStream < dimStreams; ++iStream) { GPUCHECK(cudaStreamCreate(&(streams.at(iStream)))); // round-robin on stream pool } for (size_t iChunk{0}; iChunk < nChunks; ++iChunk) { GPUCHECK(cudaEventCreate(&(starts[iChunk]))); GPUCHECK(cudaEventCreate(&(stops[iChunk]))); } // Warm up on every chunk for (size_t iChunk{0}; iChunk < nChunks; ++iChunk) { auto& chunk = chunkRanges[iChunk]; chunk_t* chunkPtr = getCustomPtr<chunk_t>(mState.scratchPtr, chunk.first); (*kernel)<<<nBlocks, nThreads, 0, streams[iChunk % dimStreams]>>>(chunkPtr, getBufferCapacity<chunk_t>(chunk.second, mOptions.prime), args...); } auto start = std::chrono::high_resolution_clock::now(); for (size_t iChunk{0}; iChunk < nChunks; ++iChunk) { auto& chunk = chunkRanges[iChunk]; chunk_t* chunkPtr = getCustomPtr<chunk_t>(mState.scratchPtr, chunk.first); GPUCHECK(cudaEventRecord(starts[iChunk], streams[iChunk % dimStreams])); for (auto iLaunch{0}; iLaunch < nLaunches; ++iLaunch) { (*kernel)<<<nBlocks, nThreads, 0, streams[iChunk % dimStreams]>>>(chunkPtr, getBufferCapacity<chunk_t>(chunk.second, mOptions.prime), args...); } GPUCHECK(cudaEventRecord(stops[iChunk], streams[iChunk % dimStreams])); } for (size_t iChunk{0}; iChunk < nChunks; ++iChunk) { GPUCHECK(cudaEventSynchronize(stops[iChunk])); GPUCHECK(cudaEventElapsedTime(&(results.at(iChunk)), starts[iChunk], stops[iChunk])); GPUCHECK(cudaEventDestroy(starts[iChunk])); GPUCHECK(cudaEventDestroy(stops[iChunk])); } GPUCHECK(cudaDeviceSynchronize()); auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double, std::milli> diff_t{end - start}; for (auto iStream{0}; iStream < dimStreams; ++iStream) { GPUCHECK(cudaStreamDestroy(streams[iStream])); } results[nChunks] = diff_t.count(); // register host time on latest spot return results; } template <class chunk_t> template <typename... T> float GPUbenchmark<chunk_t>::runDistributed(void (*kernel)(chunk_t**, size_t*, T...), std::vector<std::pair<float, float>>& chunkRanges, int nLaunches, size_t nBlocks, int nThreads, T&... args) { std::vector<chunk_t*> chunkPtrs(chunkRanges.size()); // Pointers to the beginning of each chunk std::vector<chunk_t*> ptrPerBlocks(nBlocks); // Pointers for each block std::vector<size_t> perBlockCapacity(nBlocks); // Capacity of sub-buffer for block float totChunkGB{0.f}; size_t totComputedBlocks{0}; for (size_t iChunk{0}; iChunk < chunkRanges.size(); ++iChunk) { chunkPtrs[iChunk] = getCustomPtr<chunk_t>(mState.scratchPtr, chunkRanges[iChunk].first); totChunkGB += chunkRanges[iChunk].second; } int index{0}; for (size_t iChunk{0}; iChunk < chunkRanges.size(); ++iChunk) { float percFromMem = chunkRanges[iChunk].second / totChunkGB; int blocksPerChunk = percFromMem * nBlocks; totComputedBlocks += blocksPerChunk; for (int iBlock{0}; iBlock < blocksPerChunk; ++iBlock, ++index) { float memPerBlock = chunkRanges[iChunk].second / blocksPerChunk; ptrPerBlocks[index] = getCustomPtr<chunk_t>(chunkPtrs[iChunk], iBlock * memPerBlock); perBlockCapacity[index] = getBufferCapacity<chunk_t>(memPerBlock, mOptions.prime); } } if (totComputedBlocks != nBlocks) { std::cerr << " │ - \033[1;33mWarning: Sum of used blocks (" << totComputedBlocks << ") is different from requested one (" << nBlocks << ")!\e[0m" << std::endl; } if (mOptions.dumpChunks) { for (size_t iChunk{0}; iChunk < totComputedBlocks; ++iChunk) { std::cout << " │ - block " << iChunk << " address: " << ptrPerBlocks[iChunk] << ", size: " << perBlockCapacity[iChunk] << std::endl; } } // Setup chunk_t** block_ptr; size_t* block_size; GPUCHECK(cudaMalloc(reinterpret_cast<void**>(&block_ptr), nBlocks * sizeof(chunk_t*))); GPUCHECK(cudaMalloc(reinterpret_cast<void**>(&block_size), nBlocks * sizeof(size_t))); GPUCHECK(cudaMemcpy(block_ptr, ptrPerBlocks.data(), nBlocks * sizeof(chunk_t*), cudaMemcpyHostToDevice)); GPUCHECK(cudaMemcpy(block_size, perBlockCapacity.data(), nBlocks * sizeof(size_t), cudaMemcpyHostToDevice)); float milliseconds{0.f}; cudaEvent_t start, stop; cudaStream_t stream; GPUCHECK(cudaStreamCreate(&stream)); GPUCHECK(cudaSetDevice(mOptions.deviceId)); GPUCHECK(cudaEventCreate(&start)); GPUCHECK(cudaEventCreate(&stop)); // Warm up (*kernel)<<<totComputedBlocks, nThreads, 0, stream>>>(block_ptr, block_size, args...); GPUCHECK(cudaEventRecord(start)); for (auto iLaunch{0}; iLaunch < nLaunches; ++iLaunch) { // Schedule all the requested kernel launches (*kernel)<<<totComputedBlocks, nThreads, 0, stream>>>(block_ptr, block_size, args...); // NOLINT: clang-tidy false-positive } GPUCHECK(cudaEventRecord(stop)); // record checkpoint GPUCHECK(cudaEventSynchronize(stop)); // synchronize executions GPUCHECK(cudaEventElapsedTime(&milliseconds, start, stop)); GPUCHECK(cudaEventDestroy(start)); GPUCHECK(cudaEventDestroy(stop)); GPUCHECK(cudaStreamDestroy(stream)); return milliseconds; } template <class chunk_t> void GPUbenchmark<chunk_t>::printDevices() { int deviceCnt; GPUCHECK(cudaGetDeviceCount(&deviceCnt)); for (int i = 0; i < deviceCnt; i++) { GPUCHECK(cudaSetDevice(i)); printDeviceProp(i); } } template <class chunk_t> void GPUbenchmark<chunk_t>::globalInit() { cudaDeviceProp props; size_t free; // Fetch and store features GPUCHECK(cudaGetDeviceProperties(&props, mOptions.deviceId)); GPUCHECK(cudaMemGetInfo(&free, &mState.totalMemory)); GPUCHECK(cudaSetDevice(mOptions.deviceId)); mState.chunkReservedGB = mOptions.chunkReservedGB; mState.iterations = mOptions.kernelLaunches; mState.streams = mOptions.streams; mState.testChunks = mOptions.testChunks; if (!checkTestChunks(mState.testChunks, mOptions.freeMemoryFractionToAllocate * free / GB)) { std::cerr << "Failed to configure memory chunks: check arbitrary chunks boundaries." << std::endl; exit(1); } mState.nMultiprocessors = props.multiProcessorCount; mState.nMaxThreadsPerBlock = props.maxThreadsPerMultiProcessor; mState.nMaxThreadsPerDimension = props.maxThreadsDim[0]; mState.scratchSize = static_cast<long int>(mOptions.freeMemoryFractionToAllocate * free); if (mState.testChunks.empty()) { for (auto j{0}; j < mState.getMaxChunks() * mState.chunkReservedGB; j += mState.chunkReservedGB) { mState.testChunks.emplace_back(j, mState.chunkReservedGB); } } if (!mOptions.raw) { std::cout << " ◈ Running on: \033[1;31m" << props.name << "\e[0m" << std::endl; } // Allocate scratch on GPU GPUCHECK(cudaMalloc(reinterpret_cast<void**>(&mState.scratchPtr), mState.scratchSize)); GPUCHECK(cudaMemset(mState.scratchPtr, 0, mState.scratchSize)) if (!mOptions.raw) { std::cout << " ├ Buffer type: \e[1m" << getType<chunk_t>() << "\e[0m" << std::endl << " ├ Allocated: " << std::setprecision(2) << bytesToGB(mState.scratchSize) << "/" << std::setprecision(2) << bytesToGB(mState.totalMemory) << "(GB) [" << std::setprecision(3) << (100.f) * (mState.scratchSize / (float)mState.totalMemory) << "%]\n" << " └ Available streams: " << mState.getStreamsPoolSize() << "\n\n"; } } template <class chunk_t> void GPUbenchmark<chunk_t>::initTest(Test test) { if (!mOptions.raw) { std::cout << " ◈ \033[1;33m" << getType<chunk_t>() << "\033[0m " << test << " benchmark with \e[1m" << mOptions.nTests << "\e[0m runs and \e[1m" << mOptions.kernelLaunches << "\e[0m kernel launches" << std::endl; } GPUCHECK(cudaSetDevice(mOptions.deviceId)); } template <class chunk_t> void GPUbenchmark<chunk_t>::runTest(Test test, Mode mode, KernelConfig config) { mResultWriter.get()->addBenchmarkEntry(getTestName(mode, test, config), getType<chunk_t>(), mState.getMaxChunks()); auto dimGrid{mState.nMultiprocessors}; auto nBlocks{(config == KernelConfig::Single) ? 1 : (config == KernelConfig::Multi) ? dimGrid / mState.testChunks.size() : (config == KernelConfig::All) ? dimGrid : mOptions.numBlocks}; size_t nThreads; if (mOptions.numThreads < 0) { nThreads = std::min(mState.nMaxThreadsPerDimension, mState.nMaxThreadsPerBlock); } else { nThreads = mOptions.numThreads; } nThreads *= mOptions.threadPoolFraction; void (*kernel)(chunk_t*, size_t) = &gpu::read_k<chunk_t>; // Initialising to a default value void (*kernel_distributed)(chunk_t**, size_t*) = &gpu::read_dist_k<chunk_t>; // Initialising to a default value void (*kernel_rand)(chunk_t*, size_t, int) = &gpu::rand_read_k<chunk_t>; // Initialising to a default value void (*kernel_rand_distributed)(chunk_t**, size_t*, int) = &gpu::rand_read_dist_k<chunk_t>; // Initialising to a default value bool is_random{false}; if (mode != Mode::Distributed) { switch (test) { case Test::Read: { kernel = &gpu::read_k<chunk_t>; break; } case Test::Write: { kernel = &gpu::write_k<chunk_t>; break; } case Test::Copy: { kernel = &gpu::copy_k<chunk_t>; break; } case Test::RandomRead: { kernel_rand = &gpu::rand_read_k<chunk_t>; is_random = true; break; } case Test::RandomWrite: { kernel_rand = &gpu::rand_write_k<chunk_t>; is_random = true; break; } case Test::RandomCopy: { kernel_rand = &gpu::rand_copy_k<chunk_t>; is_random = true; break; } } } else { switch (test) { case Test::Read: { kernel_distributed = &gpu::read_dist_k<chunk_t>; break; } case Test::Write: { kernel_distributed = &gpu::write_dist_k<chunk_t>; break; } case Test::Copy: { kernel_distributed = &gpu::copy_dist_k<chunk_t>; break; } case Test::RandomRead: { kernel_rand_distributed = &gpu::rand_read_dist_k<chunk_t>; is_random = true; break; } case Test::RandomWrite: { kernel_rand_distributed = &gpu::rand_write_dist_k<chunk_t>; is_random = true; break; } case Test::RandomCopy: { kernel_rand_distributed = &gpu::rand_copy_dist_k<chunk_t>; is_random = true; break; } } } for (auto measurement{0}; measurement < mOptions.nTests; ++measurement) { if (!mOptions.raw) { std::cout << " ├ " << mode << " " << test << " " << config << " block(s) (" << measurement + 1 << "/" << mOptions.nTests << "): \n" << " │ - blocks per kernel: " << nBlocks << "/" << dimGrid << "\n" << " │ - threads per block: " << (int)nThreads << "\n"; } if (mode == Mode::Sequential) { if (!mOptions.raw) { std::cout << " │ - per chunk throughput:\n"; } for (size_t iChunk{0}; iChunk < mState.testChunks.size(); ++iChunk) { // loop over single chunks separately auto& chunk = mState.testChunks[iChunk]; float result{0.f}; if (!is_random) { result = runSequential(kernel, chunk, mState.getNKernelLaunches(), nBlocks, nThreads); } else { result = runSequential(kernel_rand, chunk, mState.getNKernelLaunches(), nBlocks, nThreads, mOptions.prime); } float chunkSize = (float)getBufferCapacity<chunk_t>(chunk.second, mOptions.prime) * sizeof(chunk_t) / (float)GB; auto throughput = computeThroughput(test, result, chunkSize, mState.getNKernelLaunches()); if (!mOptions.raw) { std::cout << " │ " << ((mState.testChunks.size() - iChunk != 1) ? "├ " : "└ ") << iChunk + 1 << "/" << mState.testChunks.size() << ": [" << chunk.first << "-" << chunk.first + chunk.second << ") \e[1m" << throughput << " GB/s \e[0m(" << result * 1e-3 << " s)\n"; } else { std::cout << "" << measurement << "\t" << iChunk << "\t" << throughput << "\t" << chunkSize << "\t" << result << std::endl; } mResultWriter.get()->storeBenchmarkEntry(test, iChunk, result, chunk.second, mState.getNKernelLaunches()); } } else if (mode == Mode::Concurrent) { if (!mOptions.raw) { std::cout << " │ - per chunk throughput:\n"; } std::vector<float> results; if (!is_random) { results = runConcurrent(kernel, mState.testChunks, mState.getNKernelLaunches(), mState.getStreamsPoolSize(), nBlocks, nThreads); } else { results = runConcurrent(kernel_rand, mState.testChunks, mState.getNKernelLaunches(), mState.getStreamsPoolSize(), nBlocks, nThreads, mOptions.prime); } float sum{0}; for (size_t iChunk{0}; iChunk < mState.testChunks.size(); ++iChunk) { auto& chunk = mState.testChunks[iChunk]; float chunkSize = (float)getBufferCapacity<chunk_t>(chunk.second, mOptions.prime) * sizeof(chunk_t) / (float)GB; auto throughput = computeThroughput(test, results[iChunk], chunkSize, mState.getNKernelLaunches()); sum += throughput; if (!mOptions.raw) { std::cout << " │ " << ((mState.testChunks.size() - iChunk != 1) ? "├ " : "└ ") << iChunk + 1 << "/" << mState.testChunks.size() << ": [" << chunk.first << "-" << chunk.first + chunk.second << ") \e[1m" << throughput << " GB/s \e[0m(" << results[iChunk] * 1e-3 << " s)\n"; } else { std::cout << "" << measurement << "\t" << iChunk << "\t" << throughput << "\t" << chunkSize << "\t" << results[iChunk] << std::endl; } mResultWriter.get()->storeBenchmarkEntry(test, iChunk, results[iChunk], chunk.second, mState.getNKernelLaunches()); } if (mState.testChunks.size() > 1) { if (!mOptions.raw) { std::cout << " │ - total throughput: \e[1m" << sum << " GB/s \e[0m" << std::endl; } } // Add throughput computed via system time measurement float tot{0}; for (auto& chunk : mState.testChunks) { tot += chunk.second; } if (!mOptions.raw) { std::cout << " │ - total throughput with host time: \e[1m" << computeThroughput(test, results[mState.testChunks.size()], tot, mState.getNKernelLaunches()) << " GB/s \e[0m (" << std::setw(2) << results[mState.testChunks.size()] / 1000 << " s)" << std::endl; } } else if (mode == Mode::Distributed) { float result{0.f}; if (!is_random) { result = runDistributed(kernel_distributed, mState.testChunks, mState.getNKernelLaunches(), nBlocks, nThreads); } else { result = runDistributed(kernel_rand_distributed, mState.testChunks, mState.getNKernelLaunches(), nBlocks, nThreads, mOptions.prime); } float tot{0}; for (auto& chunk : mState.testChunks) { float chunkSize = (float)getBufferCapacity<chunk_t>(chunk.second, mOptions.prime) * sizeof(chunk_t) / (float)GB; tot += chunkSize; } auto throughput = computeThroughput(test, result, tot, mState.getNKernelLaunches()); if (!mOptions.raw) { std::cout << " │ └ throughput: \e[1m" << throughput << " GB/s \e[0m(" << result * 1e-3 << " s)\n"; } else { std::cout << "" << measurement << "\t" << 0 << "\t" << throughput << "\t" << tot << "\t" << result << std::endl; } mResultWriter.get()->storeBenchmarkEntry(test, 0, result, tot, mState.getNKernelLaunches()); } mResultWriter.get()->snapshotBenchmark(); } } template <class chunk_t> void GPUbenchmark<chunk_t>::finalizeTest(Test test) { if (!mOptions.raw) { std::cout << " └\033[1;32m done\033[0m" << std::endl; } } template <class chunk_t> void GPUbenchmark<chunk_t>::globalFinalize() { GPUCHECK(cudaSetDevice(mOptions.deviceId)); GPUCHECK(cudaFree(mState.scratchPtr)); } template <class chunk_t> void GPUbenchmark<chunk_t>::run() { globalInit(); for (auto& test : mOptions.tests) { initTest(test); for (auto& mode : mOptions.modes) { for (auto& config : mOptions.pools) { runTest(test, mode, config); } } finalizeTest(test); } globalFinalize(); } template class GPUbenchmark<char>; template class GPUbenchmark<size_t>; template class GPUbenchmark<int>; template class GPUbenchmark<int4>; } // namespace benchmark } // namespace o2
81f9cf9b2108849a29b77f163ed10d95f85e2fae.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include "matmul.cuh" using namespace std; int main(int argc, char** argv){ float *A, *B, *C; unsigned int n = atoi(argv[1]); unsigned int nthreads_perblock = atoi(argv[2]); hipMallocManaged((void **)&A, n*n*sizeof(float)); hipMallocManaged((void **)&B, n*n*sizeof(float)); hipMallocManaged((void **)&C, n*n*sizeof(float)); for(unsigned int i = 0; i < n; i++) { for(unsigned j = 0; j < n; j++) { A[n*i + j] = (float) ((i == j)? 1.0 : 0); //Identity Matrix B[n*i + j] = (float) n*i + j; //Unique index matrix } } //Measure Time template hipEvent_t start; hipEvent_t stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); matmul(A, B, C, n, nthreads_perblock); hipEventRecord(stop); hipEventSynchronize(stop); float ms = 0; hipEventElapsedTime(&ms, start, stop); //Print template cout << C[n*n-1] << endl; cout << ms << endl; hipFree(A); hipFree(B); hipFree(C); return 0; }
81f9cf9b2108849a29b77f163ed10d95f85e2fae.cu
#include <iostream> #include <cuda.h> #include "matmul.cuh" using namespace std; int main(int argc, char** argv){ float *A, *B, *C; unsigned int n = atoi(argv[1]); unsigned int nthreads_perblock = atoi(argv[2]); cudaMallocManaged((void **)&A, n*n*sizeof(float)); cudaMallocManaged((void **)&B, n*n*sizeof(float)); cudaMallocManaged((void **)&C, n*n*sizeof(float)); for(unsigned int i = 0; i < n; i++) { for(unsigned j = 0; j < n; j++) { A[n*i + j] = (float) ((i == j)? 1.0 : 0); //Identity Matrix B[n*i + j] = (float) n*i + j; //Unique index matrix } } //Measure Time template cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); matmul(A, B, C, n, nthreads_perblock); cudaEventRecord(stop); cudaEventSynchronize(stop); float ms = 0; cudaEventElapsedTime(&ms, start, stop); //Print template cout << C[n*n-1] << endl; cout << ms << endl; cudaFree(A); cudaFree(B); cudaFree(C); return 0; }
bfea4784fc762db9c4cc954f7943644f0e0e3de8.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <stdio.h> #include <stdlib.h> #include "timer.h" #include "check.h" #include <hip/hip_runtime.h> #define SOFTENING 1e-9f #define BLOCK_SIZE 32 #define BLOCK_STRIDE 32 typedef struct { float x, y, z, vx, vy, vz; } Body; void randomizeBodies(float *data, int n) { for (int i = 0; i < n; i++) { data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f; } } __global__ void bodyForce(Body *p, float dt, int n) { // int i = threadIdx.x + blockIdx.x * blockDim.x; // index int i = threadIdx.x + (int)(blockIdx.x / BLOCK_STRIDE) * blockDim.x; // int start_block = blockIdx.x % BLOCK_STRIDE; if (i < n) { int cycle_times = n / BLOCK_SIZE; Body ptemp = p[i]; // shared_memory __shared__ float3 spos[BLOCK_SIZE]; Body temp; float dx, dy, dz, distSqr, invDist, invDist3; float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f; // cycle_times for (int block_num = start_block; block_num < cycle_times; block_num += BLOCK_STRIDE) { temp = p[block_num * BLOCK_SIZE + threadIdx.x]; spos[threadIdx.x] = make_float3(temp.x, temp.y, temp.z); // spos __syncthreads(); // BLOCK_SIZE #pragma unroll for (int j = 0; j < BLOCK_SIZE; j++) { dx = spos[j].x - ptemp.x; dy = spos[j].y - ptemp.y; dz = spos[j].z - ptemp.z; distSqr = dx * dx + dy * dy + dz * dz + SOFTENING; invDist = rsqrtf(distSqr); invDist3 = invDist * invDist * invDist; Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3; } // spos __syncthreads(); } // atomicAdd(&p[i].vx, dt * Fx); atomicAdd(&p[i].vy, dt * Fy); atomicAdd(&p[i].vz, dt * Fz); // p[i].vx += dt * Fx; // p[i].vy += dt * Fy; // p[i].vz += dt * Fz; } } __global__ void integrate_position(Body *p, float dt, int n) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < n) { p[i].x += p[i].vx * dt; p[i].y += p[i].vy * dt; p[i].z += p[i].vz * dt; } } int main(const int argc, const char **argv) { int nBodies = 2 << 11; int salt = 0; if (argc > 1) nBodies = 2 << atoi(argv[1]); /* * This salt is for assessment reasons. Tampering with it will result in automatic failure. */ if (argc > 2) salt = atoi(argv[2]); const float dt = 0.01f; // time step const int nIters = 10; // simulation iterations int bytes = nBodies * sizeof(Body); float *buf; hipHostMalloc(&buf, bytes); randomizeBodies(buf, 6 * nBodies); // Init pos / vel data double totalTime = 0.0; int deviceId; hipGetDevice(&deviceId); size_t threadsPerBlock = BLOCK_SIZE; size_t numberOfBlocks = (nBodies + threadsPerBlock - 1) / threadsPerBlock; float *d_buf; hipMalloc(&d_buf, bytes); Body *d_p = (Body *)d_buf; /* * This simulation will run for 10 cycles of time, calculating gravitational * interaction amongst bodies, and adjusting their positions to reflect. */ hipMemcpy(d_buf, buf, bytes, hipMemcpyHostToDevice); /*******************************************************************/ // Do not modify these 2 lines of code.gg for (int iter = 0; iter < nIters; iter++) { StartTimer(); /*******************************************************************/ /* * You will likely wish to refactor the work being done in `bodyForce`, * as well as the work to integrate the positions. */ hipLaunchKernelGGL(( bodyForce), dim3(numberOfBlocks * BLOCK_STRIDE), dim3(threadsPerBlock), 0, 0, d_p, dt, nBodies); // compute interbody forces /* * This position integration cannot occur until this round of `bodyForce` has completed. * Also, the next round of `bodyForce` cannot begin until the integration is complete. */ hipLaunchKernelGGL(( integrate_position), dim3(nBodies / threadsPerBlock), dim3(threadsPerBlock), 0, 0, d_p, dt, nBodies); if (iter == nIters - 1) { hipMemcpy(buf, d_buf, bytes, hipMemcpyDeviceToHost); } /*******************************************************************/ // Do not modify the code in this section. const double tElapsed = GetTimer() / 1000.0; totalTime += tElapsed; } double avgTime = totalTime / (double)(nIters); float billionsOfOpsPerSecond = 1e-9 * nBodies * nBodies / avgTime; #ifdef ASSESS checkPerformance(buf, billionsOfOpsPerSecond, salt); #else checkAccuracy(buf, nBodies); printf("%d Bodies: average %0.3f Billion Interactions / second\n", nBodies, billionsOfOpsPerSecond); salt += 1; #endif /*******************************************************************/ /* * Feel free to modify code below. */ hipFree(d_buf); hipHostFree(buf); }
bfea4784fc762db9c4cc954f7943644f0e0e3de8.cu
#include <math.h> #include <stdio.h> #include <stdlib.h> #include "timer.h" #include "check.h" #include <cuda_runtime.h> #define SOFTENING 1e-9f #define BLOCK_SIZE 32 #define BLOCK_STRIDE 32 typedef struct { float x, y, z, vx, vy, vz; } Body; void randomizeBodies(float *data, int n) { for (int i = 0; i < n; i++) { data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f; } } __global__ void bodyForce(Body *p, float dt, int n) { // int i = threadIdx.x + blockIdx.x * blockDim.x; // 计算要处理的数据index int i = threadIdx.x + (int)(blockIdx.x / BLOCK_STRIDE) * blockDim.x; // 此块对应要处理的数据块的起始位置 int start_block = blockIdx.x % BLOCK_STRIDE; if (i < n) { int cycle_times = n / BLOCK_SIZE; Body ptemp = p[i]; // 使用shared_memory 多个线程读取同一块数据进入,提升存取性能 __shared__ float3 spos[BLOCK_SIZE]; Body temp; float dx, dy, dz, distSqr, invDist, invDist3; float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f; // 这里的cycle_times 在已知块大小时使用常数性能会高一些 for (int block_num = start_block; block_num < cycle_times; block_num += BLOCK_STRIDE) { temp = p[block_num * BLOCK_SIZE + threadIdx.x]; spos[threadIdx.x] = make_float3(temp.x, temp.y, temp.z); // 块内同步,防止spos提前被读取 __syncthreads(); // 编译优化,只有 BLOCK_SIZE 为常量时才有用 #pragma unroll for (int j = 0; j < BLOCK_SIZE; j++) { dx = spos[j].x - ptemp.x; dy = spos[j].y - ptemp.y; dz = spos[j].z - ptemp.z; distSqr = dx * dx + dy * dy + dz * dz + SOFTENING; invDist = rsqrtf(distSqr); invDist3 = invDist * invDist * invDist; Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3; } // 块内同步,防止spos提前被写入 __syncthreads(); } // 块之间不同步,原子加保证正确性 atomicAdd(&p[i].vx, dt * Fx); atomicAdd(&p[i].vy, dt * Fy); atomicAdd(&p[i].vz, dt * Fz); // p[i].vx += dt * Fx; // p[i].vy += dt * Fy; // p[i].vz += dt * Fz; } } __global__ void integrate_position(Body *p, float dt, int n) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < n) { p[i].x += p[i].vx * dt; p[i].y += p[i].vy * dt; p[i].z += p[i].vz * dt; } } int main(const int argc, const char **argv) { int nBodies = 2 << 11; int salt = 0; if (argc > 1) nBodies = 2 << atoi(argv[1]); /* * This salt is for assessment reasons. Tampering with it will result in automatic failure. */ if (argc > 2) salt = atoi(argv[2]); const float dt = 0.01f; // time step const int nIters = 10; // simulation iterations int bytes = nBodies * sizeof(Body); float *buf; cudaMallocHost(&buf, bytes); randomizeBodies(buf, 6 * nBodies); // Init pos / vel data double totalTime = 0.0; int deviceId; cudaGetDevice(&deviceId); size_t threadsPerBlock = BLOCK_SIZE; size_t numberOfBlocks = (nBodies + threadsPerBlock - 1) / threadsPerBlock; float *d_buf; cudaMalloc(&d_buf, bytes); Body *d_p = (Body *)d_buf; /* * This simulation will run for 10 cycles of time, calculating gravitational * interaction amongst bodies, and adjusting their positions to reflect. */ cudaMemcpy(d_buf, buf, bytes, cudaMemcpyHostToDevice); /*******************************************************************/ // Do not modify these 2 lines of code.gg for (int iter = 0; iter < nIters; iter++) { StartTimer(); /*******************************************************************/ /* * You will likely wish to refactor the work being done in `bodyForce`, * as well as the work to integrate the positions. */ bodyForce<<<numberOfBlocks * BLOCK_STRIDE, threadsPerBlock>>>(d_p, dt, nBodies); // compute interbody forces /* * This position integration cannot occur until this round of `bodyForce` has completed. * Also, the next round of `bodyForce` cannot begin until the integration is complete. */ integrate_position<<<nBodies / threadsPerBlock, threadsPerBlock>>>(d_p, dt, nBodies); if (iter == nIters - 1) { cudaMemcpy(buf, d_buf, bytes, cudaMemcpyDeviceToHost); } /*******************************************************************/ // Do not modify the code in this section. const double tElapsed = GetTimer() / 1000.0; totalTime += tElapsed; } double avgTime = totalTime / (double)(nIters); float billionsOfOpsPerSecond = 1e-9 * nBodies * nBodies / avgTime; #ifdef ASSESS checkPerformance(buf, billionsOfOpsPerSecond, salt); #else checkAccuracy(buf, nBodies); printf("%d Bodies: average %0.3f Billion Interactions / second\n", nBodies, billionsOfOpsPerSecond); salt += 1; #endif /*******************************************************************/ /* * Feel free to modify code below. */ cudaFree(d_buf); cudaFreeHost(buf); }
9cf0faa4bc886d47cabfb5410165d8897bc37e15.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * thunderstruck/tracker: HaarFeatureCalculatorImpl.cu */ #include "HaarFeatureCalculatorImpl.h" #include "cudaApi.cuh" namespace thunderstruck { //#################### CUDA KERNELS #################### __global__ void ck_calculate_haar_features(hipTextureObject_t integralFrame, float *sampleData, int *keepSamples, size_t sampleCount, float bbX, float bbY, float bbWidth, float bbHeight, float *bottoms, float *lefts, float *rights, float *tops, float *weights, double *features, size_t offset, size_t stride) { int sampleIndex = blockIdx.x; int featureIndex = threadIdx.x; float bbArea = bbWidth * bbHeight; double result = 0.0; // 1 global read if(keepSamples[sampleIndex]) { // Compute the coordinates of the sample. // 2 global reads float sampleLeft = bbX + sampleData[sampleIndex * 2]; float sampleTop = bbY + sampleData[sampleIndex * 2 + 1]; // Add up the weighted contributions from all the potential mini-boxes. for(int i = 0; i < 4; ++i) { // Compute the coordinates of the mini-box. int j = i * HAAR_FEATURE_COUNT + featureIndex; // 4 global reads float leftsJ = lefts[j]; float topsJ = tops[j]; int left(sampleLeft + leftsJ * bbWidth + 0.5f); int top(sampleTop + tops[j] * bbHeight + 0.5f); int right(left + (rights[j] - leftsJ) * bbWidth); int bottom(top + (bottoms[j] - topsJ) * bbHeight); // Add the weighted sum of the pixels in the mini-box to the result. // 1 global read, 4 texture reads result += weights[j] * ( tex2D<int>(integralFrame, left, top) + tex2D<int>(integralFrame, right, bottom) - tex2D<int>(integralFrame, left, bottom) - tex2D<int>(integralFrame, right, top) ); } // Normalize the result (divide it by the area of the sample). result /= bbArea; } // Determine the target position in the output array. size_t k = sampleIndex * stride + offset + featureIndex; // Write the result to the output array. features[k] = result; } //#################### WRAPPER FUNCTIONS #################### void calculate_haar_features(hipTextureObject_t integralFrame, float *sampleData, int *keepSamples, size_t sampleCount, float bbX, float bbY, float bbWidth, float bbHeight, float *bottoms, float *lefts, float *rights, float *tops, float *weights, double *features, size_t offset, size_t stride) { hipLaunchKernelGGL(( ck_calculate_haar_features), dim3(sampleCount),dim3(HAAR_FEATURE_COUNT), 0, 0, integralFrame, sampleData, keepSamples, sampleCount, bbX, bbY, bbWidth, bbHeight, bottoms, lefts, rights, tops, weights, features, offset, stride ); } }
9cf0faa4bc886d47cabfb5410165d8897bc37e15.cu
/** * thunderstruck/tracker: HaarFeatureCalculatorImpl.cu */ #include "HaarFeatureCalculatorImpl.h" #include "cudaApi.cuh" namespace thunderstruck { //#################### CUDA KERNELS #################### __global__ void ck_calculate_haar_features(cudaTextureObject_t integralFrame, float *sampleData, int *keepSamples, size_t sampleCount, float bbX, float bbY, float bbWidth, float bbHeight, float *bottoms, float *lefts, float *rights, float *tops, float *weights, double *features, size_t offset, size_t stride) { int sampleIndex = blockIdx.x; int featureIndex = threadIdx.x; float bbArea = bbWidth * bbHeight; double result = 0.0; // 1 global read if(keepSamples[sampleIndex]) { // Compute the coordinates of the sample. // 2 global reads float sampleLeft = bbX + sampleData[sampleIndex * 2]; float sampleTop = bbY + sampleData[sampleIndex * 2 + 1]; // Add up the weighted contributions from all the potential mini-boxes. for(int i = 0; i < 4; ++i) { // Compute the coordinates of the mini-box. int j = i * HAAR_FEATURE_COUNT + featureIndex; // 4 global reads float leftsJ = lefts[j]; float topsJ = tops[j]; int left(sampleLeft + leftsJ * bbWidth + 0.5f); int top(sampleTop + tops[j] * bbHeight + 0.5f); int right(left + (rights[j] - leftsJ) * bbWidth); int bottom(top + (bottoms[j] - topsJ) * bbHeight); // Add the weighted sum of the pixels in the mini-box to the result. // 1 global read, 4 texture reads result += weights[j] * ( tex2D<int>(integralFrame, left, top) + tex2D<int>(integralFrame, right, bottom) - tex2D<int>(integralFrame, left, bottom) - tex2D<int>(integralFrame, right, top) ); } // Normalize the result (divide it by the area of the sample). result /= bbArea; } // Determine the target position in the output array. size_t k = sampleIndex * stride + offset + featureIndex; // Write the result to the output array. features[k] = result; } //#################### WRAPPER FUNCTIONS #################### void calculate_haar_features(cudaTextureObject_t integralFrame, float *sampleData, int *keepSamples, size_t sampleCount, float bbX, float bbY, float bbWidth, float bbHeight, float *bottoms, float *lefts, float *rights, float *tops, float *weights, double *features, size_t offset, size_t stride) { ck_calculate_haar_features<<<sampleCount,HAAR_FEATURE_COUNT>>>( integralFrame, sampleData, keepSamples, sampleCount, bbX, bbY, bbWidth, bbHeight, bottoms, lefts, rights, tops, weights, features, offset, stride ); } }
d53ea21a1d31995d12c7b07df356f4527f48b25c.hip
// !!! This is a file automatically generated by hipify!!! //PND #define X 1 //RN #define Y 10 //membrane potential increase per neurotransmitter #define Z 100 //CD #define C 10 //MPD #define D 5 #include <hip/hip_runtime.h> #include "menu.h" using namespace std; /* void check_preNeuron(){ } */ void gen_neuron(int i){ neuronTag *collect = NULL; collect = new neuronTag [i]; for(int id = 1; id < i + 1; id++){ collect[id].set_ID(id); collect[id].gen_NT(); collect[id].NT -> NeuronSave(); Tag.insert(pair<int, neuronTag>(id,collect[id])); } } string Nread_word(int number){ string temp_buffer = ""; for(int i = 0; i < number; i++){ test >> temp_buffer; } return temp_buffer; //number is the thread of word } int get_neuron_check(){ int tempN = 1; int neuronNumber = 0; for(int i = 0; i < 1; i++){ string buffer; char tempput[15] = ""; sprintf(datanumber, "%d", tempN); strcat(tempput, output); strcat(tempput, datanumber); strcat(tempput, format); test.open(tempput); if(test.is_open()){ i = i - 1; neuronNumber = neuronNumber + 1; //cout << "check" ; }else{ //cout << "note" << tempN << " no such file" << endl; } test.close(); tempN = tempN + 1; } return neuronNumber; } void get_neuron(){ int neuronNumber = get_neuron_check(); neuronTag *collect = NULL; collect = new neuronTag [neuronNumber]; //temp for(int i = 1/*need to change to log*/; i < neuronNumber + 1; i++){ collect[i].NT = new neuron(i); Tag.insert(pair<int, neuronTag>(i,collect[i])); char tempput[15] = ""; sprintf(datanumber, "%d", i); strcat(tempput, output); strcat(tempput, datanumber); strcat(tempput, format); test.open(tempput, ios::in); Tag[i].NT -> set_name(atoi(Nread_word(3).c_str())); Tag[i].NT -> set_CD(atoi(Nread_word(4).c_str())); Tag[i].NT -> set_TH(atoi(Nread_word(3).c_str())); //Tag[i].NT -> NS = atoi(Nread_word(4).c_str()); Tag[i].NT -> set_PND(atoi(Nread_word(5).c_str())); Tag[i].NT -> set_BMPD(atoi(Nread_word(5).c_str())); Tag[i].NT -> set_RN(atoi(Nread_word(5).c_str())); test.close(); } } int main(){ srand(time(NULL)); string temp_input; //initialization of neuron double START, END; START = clock(); cout << get_neuron_check(); if(get_neuron_check() != 0){ cout << "!=0" << endl; get_neuron(); }else{ cout << "==0" << endl; gen_neuron(8); } END = clock(); //check log and get log test.open("log"); if(test.is_open()){ }else{ test.open("log", ios::out); test << 0 << endl; test.close(); } ifstream read("log", ios::in); read >> temp_input; read.close(); int pastlog = atoi(temp_input.c_str()); temp_input = ""; /* cout << pastlog << endl; test.open("log", ios::out); test << "HAHAHA" << endl; test << pastlog << endl; test.close(); */ for(int i = pastlog; i < pastlog + 100; i++){ for(int j = 1; j < 9; j++){ if(Tag[j].NT -> get_at() == i){ Tag[j].NT -> NeuronExe(); if(j == 8){ Tag[1].NT -> set_PN(Tag[1].NT -> get_PN() + (Tag[j].NT -> get_OT()) * (Tag[j].NT -> get_RN())); }else{ Tag[j + 1].NT -> set_PN(Tag[j + 1].NT -> get_PN() + (Tag[j].NT -> get_OT()) * (Tag[j].NT -> get_RN()));; } } Tag[j].NT -> NeuronNote(i); Tag[j].NT -> Neurontimepass(); } } /* test.open("log", ios::out | ios::app); test << pastlog << endl; test.close(); */ cout << (double)clock()/CLOCKS_PER_SEC << " s" << endl; cout << (END-START)/1000000 << " s" << endl; return 0; }
d53ea21a1d31995d12c7b07df356f4527f48b25c.cu
//PND #define X 1 //RN #define Y 10 //membrane potential increase per neurotransmitter #define Z 100 //CD #define C 10 //MPD #define D 5 #include <cuda_runtime.h> #include "menu.h" using namespace std; /* void check_preNeuron(){ } */ void gen_neuron(int i){ neuronTag *collect = NULL; collect = new neuronTag [i]; for(int id = 1; id < i + 1; id++){ collect[id].set_ID(id); collect[id].gen_NT(); collect[id].NT -> NeuronSave(); Tag.insert(pair<int, neuronTag>(id,collect[id])); } } string Nread_word(int number){ string temp_buffer = ""; for(int i = 0; i < number; i++){ test >> temp_buffer; } return temp_buffer; //number is the thread of word } int get_neuron_check(){ int tempN = 1; int neuronNumber = 0; for(int i = 0; i < 1; i++){ string buffer; char tempput[15] = ""; sprintf(datanumber, "%d", tempN); strcat(tempput, output); strcat(tempput, datanumber); strcat(tempput, format); test.open(tempput); if(test.is_open()){ i = i - 1; neuronNumber = neuronNumber + 1; //cout << "check" ; }else{ //cout << "note" << tempN << " no such file" << endl; } test.close(); tempN = tempN + 1; } return neuronNumber; } void get_neuron(){ int neuronNumber = get_neuron_check(); neuronTag *collect = NULL; collect = new neuronTag [neuronNumber]; //temp for(int i = 1/*need to change to log*/; i < neuronNumber + 1; i++){ collect[i].NT = new neuron(i); Tag.insert(pair<int, neuronTag>(i,collect[i])); char tempput[15] = ""; sprintf(datanumber, "%d", i); strcat(tempput, output); strcat(tempput, datanumber); strcat(tempput, format); test.open(tempput, ios::in); Tag[i].NT -> set_name(atoi(Nread_word(3).c_str())); Tag[i].NT -> set_CD(atoi(Nread_word(4).c_str())); Tag[i].NT -> set_TH(atoi(Nread_word(3).c_str())); //Tag[i].NT -> NS = atoi(Nread_word(4).c_str()); Tag[i].NT -> set_PND(atoi(Nread_word(5).c_str())); Tag[i].NT -> set_BMPD(atoi(Nread_word(5).c_str())); Tag[i].NT -> set_RN(atoi(Nread_word(5).c_str())); test.close(); } } int main(){ srand(time(NULL)); string temp_input; //initialization of neuron double START, END; START = clock(); cout << get_neuron_check(); if(get_neuron_check() != 0){ cout << "!=0" << endl; get_neuron(); }else{ cout << "==0" << endl; gen_neuron(8); } END = clock(); //check log and get log test.open("log"); if(test.is_open()){ }else{ test.open("log", ios::out); test << 0 << endl; test.close(); } ifstream read("log", ios::in); read >> temp_input; read.close(); int pastlog = atoi(temp_input.c_str()); temp_input = ""; /* cout << pastlog << endl; test.open("log", ios::out); test << "HAHAHA" << endl; test << pastlog << endl; test.close(); */ for(int i = pastlog; i < pastlog + 100; i++){ for(int j = 1; j < 9; j++){ if(Tag[j].NT -> get_at() == i){ Tag[j].NT -> NeuronExe(); if(j == 8){ Tag[1].NT -> set_PN(Tag[1].NT -> get_PN() + (Tag[j].NT -> get_OT()) * (Tag[j].NT -> get_RN())); }else{ Tag[j + 1].NT -> set_PN(Tag[j + 1].NT -> get_PN() + (Tag[j].NT -> get_OT()) * (Tag[j].NT -> get_RN()));; } } Tag[j].NT -> NeuronNote(i); Tag[j].NT -> Neurontimepass(); } } /* test.open("log", ios::out | ios::app); test << pastlog << endl; test.close(); */ cout << (double)clock()/CLOCKS_PER_SEC << " s" << endl; cout << (END-START)/1000000 << " s" << endl; return 0; }
4e0d34c7eecf9ecac7437f0cb9ed2739f65fc429.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/null_mask.hpp> #include <cudf/unary.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/thrust_rmm_allocator.h> namespace cudf { namespace experimental { namespace detail { template <typename _T, typename _R> struct unary_cast { template < typename T = _T, typename R = _R, typename std::enable_if_t< (cudf::is_numeric<T>() && cudf::is_numeric<R>()) || (cudf::is_timestamp<T>() && cudf::is_timestamp<R>())>* = nullptr> CUDA_DEVICE_CALLABLE R operator()(T const element) { return static_cast<R>(element); } template <typename T = _T, typename R = _R, typename std::enable_if_t<cudf::is_numeric<T>() && cudf::is_timestamp<R>()>* = nullptr> CUDA_DEVICE_CALLABLE R operator()(T const element) { return static_cast<R>(static_cast<typename R::rep>(element)); } template <typename T = _T, typename R = _R, typename std::enable_if_t<cudf::is_timestamp<T>() && cudf::is_numeric<R>()>* = nullptr> CUDA_DEVICE_CALLABLE R operator()(T const element) { return static_cast<R>(element.time_since_epoch().count()); } }; template <typename T> struct dispatch_unary_cast_to { column_view input; dispatch_unary_cast_to(column_view inp) : input(inp) {} template <typename R, typename std::enable_if_t<cudf::is_numeric<R>() || cudf::is_timestamp<R>()>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::mr::device_memory_resource* mr, hipStream_t stream) { auto size = input.size(); auto output = std::make_unique<column>( type, size, rmm::device_buffer{size * cudf::size_of(type), 0, mr}, copy_bitmask(input, 0, mr), input.null_count()); mutable_column_view output_mutable = *output; thrust::transform(rmm::exec_policy(stream)->on(stream), input.begin<T>(), input.end<T>(), output_mutable.begin<R>(), unary_cast<T, R>{}); return output; } template <typename R, typename std::enable_if_t<!cudf::is_numeric<R>() && !cudf::is_timestamp<R>()>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::mr::device_memory_resource* mr, hipStream_t stream) { CUDF_FAIL("Column type must be numeric or timestamp"); } }; struct dispatch_unary_cast_from { column_view input; dispatch_unary_cast_from(column_view inp) : input(inp) {} template <typename T, typename std::enable_if_t<cudf::is_numeric<T>() || cudf::is_timestamp<T>()>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::mr::device_memory_resource* mr, hipStream_t stream) { return experimental::type_dispatcher(type, dispatch_unary_cast_to<T>{input}, type, mr, stream); } template <typename T, typename std::enable_if_t<!cudf::is_timestamp<T>() && !cudf::is_numeric<T>()>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::mr::device_memory_resource* mr, hipStream_t stream) { CUDF_FAIL("Column type must be numeric or timestamp"); } }; } // namespace detail std::unique_ptr<column> cast(column_view const& input, data_type type, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(is_fixed_width(type), "Unary cast type must be fixed-width."); return experimental::type_dispatcher(input.type(), detail::dispatch_unary_cast_from{input}, type, mr, static_cast<hipStream_t>(0)); } } // namespace experimental } // namespace cudf
4e0d34c7eecf9ecac7437f0cb9ed2739f65fc429.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/null_mask.hpp> #include <cudf/unary.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/thrust_rmm_allocator.h> namespace cudf { namespace experimental { namespace detail { template <typename _T, typename _R> struct unary_cast { template < typename T = _T, typename R = _R, typename std::enable_if_t< (cudf::is_numeric<T>() && cudf::is_numeric<R>()) || (cudf::is_timestamp<T>() && cudf::is_timestamp<R>())>* = nullptr> CUDA_DEVICE_CALLABLE R operator()(T const element) { return static_cast<R>(element); } template <typename T = _T, typename R = _R, typename std::enable_if_t<cudf::is_numeric<T>() && cudf::is_timestamp<R>()>* = nullptr> CUDA_DEVICE_CALLABLE R operator()(T const element) { return static_cast<R>(static_cast<typename R::rep>(element)); } template <typename T = _T, typename R = _R, typename std::enable_if_t<cudf::is_timestamp<T>() && cudf::is_numeric<R>()>* = nullptr> CUDA_DEVICE_CALLABLE R operator()(T const element) { return static_cast<R>(element.time_since_epoch().count()); } }; template <typename T> struct dispatch_unary_cast_to { column_view input; dispatch_unary_cast_to(column_view inp) : input(inp) {} template <typename R, typename std::enable_if_t<cudf::is_numeric<R>() || cudf::is_timestamp<R>()>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { auto size = input.size(); auto output = std::make_unique<column>( type, size, rmm::device_buffer{size * cudf::size_of(type), 0, mr}, copy_bitmask(input, 0, mr), input.null_count()); mutable_column_view output_mutable = *output; thrust::transform(rmm::exec_policy(stream)->on(stream), input.begin<T>(), input.end<T>(), output_mutable.begin<R>(), unary_cast<T, R>{}); return output; } template <typename R, typename std::enable_if_t<!cudf::is_numeric<R>() && !cudf::is_timestamp<R>()>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { CUDF_FAIL("Column type must be numeric or timestamp"); } }; struct dispatch_unary_cast_from { column_view input; dispatch_unary_cast_from(column_view inp) : input(inp) {} template <typename T, typename std::enable_if_t<cudf::is_numeric<T>() || cudf::is_timestamp<T>()>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { return experimental::type_dispatcher(type, dispatch_unary_cast_to<T>{input}, type, mr, stream); } template <typename T, typename std::enable_if_t<!cudf::is_timestamp<T>() && !cudf::is_numeric<T>()>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { CUDF_FAIL("Column type must be numeric or timestamp"); } }; } // namespace detail std::unique_ptr<column> cast(column_view const& input, data_type type, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(is_fixed_width(type), "Unary cast type must be fixed-width."); return experimental::type_dispatcher(input.type(), detail::dispatch_unary_cast_from{input}, type, mr, static_cast<cudaStream_t>(0)); } } // namespace experimental } // namespace cudf
1df403d66921595d36a21a4119c5d7c00cfd3e5d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math_constants.h> #include "BC.h" /** * Calculates the next finite difference step given a * grid point and step lengths. * * @param curr Pointer to the grid point that should be updated. * @param width Number of grid points in the x dimension. * @param xcfl Courant number for x dimension. * @param ycfl Courant number for y dimension. * @returns Grid value of next timestep. */ template<int order> __device__ float Stencil(const float *curr, int width, float xcfl, float ycfl) { switch (order) { case 2: return curr[0] + xcfl * (curr[-1] + curr[1] - 2.f * curr[0]) + ycfl * (curr[width] + curr[-width] - 2.f * curr[0]); case 4: return curr[0] + xcfl * (- curr[2] + 16.f * curr[1] - 30.f * curr[0] + 16.f * curr[-1] - curr[-2]) + ycfl * (- curr[2 * width] + 16.f * curr[width] - 30.f * curr[0] + 16.f * curr[-width] - curr[-2 * width]); case 8: return curr[0] + xcfl * (-9.f * curr[4] + 128.f * curr[3] - 1008.f * curr[2] + 8064.f * curr[1] - 14350.f * curr[0] + 8064.f * curr[-1] - 1008.f * curr[-2] + 128.f * curr[-3] - 9.f * curr[-4]) + ycfl * (-9.f * curr[4 * width] + 128.f * curr[3 * width] - 1008.f * curr[2 * width] + 8064.f * curr[width] - 14350.f * curr[0] + 8064.f * curr[-width] - 1008.f * curr[-2 * width] + 128.f * curr[-3 * width] - 9.f * curr[-4 * width]); default: printf("ERROR: Order %d not supported", order); return CUDART_NAN_F; } } /** * Kernel to propagate finite difference grid from the current * time point to the next. * * This kernel should be very simple and only use global memory. * * @param next[out] Next grid state. * @param curr Current grid state. * @param gx Number of grid points in the x dimension. * @param nx Number of grid points in the x dimension to which the full * stencil can be applied (ie the number of points that are at least * order/2 grid points away from the boundar). * @param ny Number of grid points in the y dimension to which th full * stencil can be applied. * @param xcfl Courant number for x dimension. * @param ycfl Courant number for y dimension. */ template<int order> __global__ void gpuStencil(float *next, const float *curr, int gx, int nx, int ny, float xcfl, float ycfl) { // TODO const int tid_x = threadIdx.x + blockDim.x*blockIdx.x; const int tid_y = threadIdx.y + blockDim.y*blockIdx.y; // If the thread is not inside the domain, return if (tid_x >= nx || tid_y >= ny) { return; } // Compute the boarder size const int b = order/2; // Compute the index of the point in 1D array const int idx = (tid_y + b)*gx + (tid_x + b); next[idx] = Stencil<order> (&curr[idx], gx, xcfl, ycfl); } /** * Propagates the finite difference 2D heat diffusion solver * using the gpuStencil kernel. * * Use this function to do necessary setup and propagate params.iters() * number of times. * * @param curr_grid The current state of the grid. * @param params Parameters for the finite difference computation. * @returns Time required for computation. */ double gpuComputation(Grid &curr_grid, const simParams &params) { boundary_conditions BC(params); Grid next_grid(curr_grid); // TODO: Declare variables/Compute parameters. dim3 threads(0, 0); dim3 blocks(0, 0); // Set the size of each block const unsigned int block_dim_x = 64u; const unsigned int block_dim_y = 8u; int nx = params.nx(); int ny = params.ny(); int gx = params.gx(); float xcfl = params.xcfl(); float ycfl = params.ycfl(); int order = params.order(); // Compute the block dimension threads.x = block_dim_x; threads.y = block_dim_y; // Assume each dimension of the block is less than 65536 // and compute the grid size blocks.x = ((unsigned int) nx + threads.x - 1)/threads.x; blocks.y = ((unsigned int) ny + threads.y - 1)/threads.y; event_pair timer; start_timer(&timer); for (int i = 0; i < params.iters(); ++i) { // update the values on the boundary only BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_); // TODO: Apply stencil. switch (order) { case 2: hipLaunchKernelGGL(( gpuStencil<2>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl); break; case 4: hipLaunchKernelGGL(( gpuStencil<4>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl); break; case 8: hipLaunchKernelGGL(( gpuStencil<8>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl); break; default: printf("ERROR: Order %d not supported", order); exit(1); } check_launch("gpuStencil"); Grid::swap(curr_grid, next_grid); } return stop_timer(&timer); } /** * Kernel to propagate finite difference grid from the current * time point to the next. * * This kernel should be optimized to compute finite difference updates * in blocks of size (blockDim.y * numYPerStep) * blockDim.x. Each thread * should calculate at most numYPerStep updates. It should still only use * global memory. * * @param next[out] Next grid state. * @param curr Current grid state. * @param gx Number of grid points in the x dimension. * @param nx Number of grid points in the x dimension to which the full * stencil can be applied (ie the number of points that are at least * order/2 grid points away from the boundar). * @param ny Number of grid points in the y dimension to which th full * stencil can be applied. * @param xcfl Courant number for x dimension. * @param ycfl Courant number for y dimension. */ template<int order, int numYPerStep> __global__ void gpuStencilLoop(float *next, const float *curr, int gx, int nx, int ny, float xcfl, float ycfl) { // TODO const int tid_x = threadIdx.x + blockDim.x*blockIdx.x; const int tid_y = threadIdx.y + numYPerStep*blockDim.y*blockIdx.y; // If the thread is not inside the domain, return if (tid_x >= nx) { return; } // Compute the boarder size const int b = order/2; for (int i = 0; i < numYPerStep; i++) { if (tid_y + i*blockDim.y >= ny) { return; } const int idx = (tid_y + i*blockDim.y + b)*gx + (tid_x + b); next[idx] = Stencil<order> (&curr[idx], gx, xcfl, ycfl); } } /** * Propagates the finite difference 2D heat diffusion solver * using the gpuStencilLoop kernel. * * Use this function to do necessary setup and propagate params.iters() * number of times. * * @param curr_grid The current state of the grid. * @param params Parameters for the finite difference computation. * @returns Time required for computation. */ double gpuComputationLoop(Grid &curr_grid, const simParams &params) { boundary_conditions BC(params); Grid next_grid(curr_grid); // TODO // TODO: Declare variables/Compute parameters. dim3 threads(0, 0); dim3 blocks(0, 0); // Set the size of each block const unsigned int block_dim_x = 64u; const unsigned int block_dim_y = 8u; const unsigned int numYPerStep = 8u; int nx = params.nx(); int ny = params.ny(); int gx = params.gx(); float xcfl = params.xcfl(); float ycfl = params.ycfl(); int order = params.order(); // Compute the block dimension threads.x = block_dim_x; threads.y = block_dim_y; // Assume each dimension of the block is less than 65536 // and compute the grid size blocks.x = ((unsigned int) nx + threads.x - 1)/threads.x; blocks.y = ((unsigned int) ny + threads.y*numYPerStep - 1)/(threads.y*numYPerStep); event_pair timer; start_timer(&timer); for (int i = 0; i < params.iters(); ++i) { // update the values on the boundary only BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_); // TODO: Apply stencil. switch (order) { case 2: hipLaunchKernelGGL(( gpuStencilLoop<2, numYPerStep>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl); break; case 4: hipLaunchKernelGGL(( gpuStencilLoop<4, numYPerStep>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl); break; case 8: hipLaunchKernelGGL(( gpuStencilLoop<8, numYPerStep>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl); break; default: printf("ERROR: Order %d not supported", order); exit(1); } check_launch("gpuStencilLoop"); Grid::swap(curr_grid, next_grid); } return stop_timer(&timer); } /** * Kernel to propagate finite difference grid from the current * time point to the next. * * This kernel should be optimized to compute finite difference updates * in blocks of size side * side using shared memory. * * @param next[out] Next grid state. * @param curr Current grid state. * @param gx Number of grid points in the x dimension. * @param gy Number of grid points in the y dimension. * @param xcfl Courant number for x dimension. * @param ycfl Courant number for y dimension. */ template<int side, int order> __global__ void gpuShared(float *next, const float *curr, int gx, int gy, float xcfl, float ycfl) { // TODO const int lane_x = threadIdx.x; const int lane_y = threadIdx.y; // Compute the boarder size const int b = order/2; __shared__ float smem_block[side*side]; const int global_col = lane_x + (blockDim.x - order)*blockIdx.x; // Load the data into shared memory if (global_col < gx) { // Compute the bound of the loop const int y_lim = min(side, gy - (side - order)*blockIdx.y); for (int i = lane_y; i < y_lim; i += blockDim.y) { const int global_row = i + (side - order)*blockIdx.y; const int global_idx = global_row*gx + global_col; const int smem_block_idx = i*blockDim.x + lane_x; smem_block[smem_block_idx] = curr[global_idx]; } } __syncthreads(); // Do the computation here if (global_col < gx - b) { if (lane_x >= b && lane_x < blockDim.x - b) { // Compute the bound of the loop const int y_lim = min(side - b, gy - b - (side - order)*blockIdx.y); for (int i = lane_y + b; i < y_lim; i += blockDim.y) { const int global_row = i + (side - order)*blockIdx.y; const int global_idx = global_row*gx + global_col; const int smem_block_idx = i*blockDim.x + lane_x; next[global_idx] = Stencil<order> (&smem_block[smem_block_idx], side, xcfl, ycfl); } } } } /** * Propagates the finite difference 2D heat diffusion solver * using the gpuShared kernel. * * Use this function to do necessary setup and propagate params.iters() * number of times. * * @param curr_grid The current state of the grid. * @param params Parameters for the finite difference computation. * @returns Time required for computation. */ template<int order> double gpuComputationShared(Grid &curr_grid, const simParams &params) { boundary_conditions BC(params); Grid next_grid(curr_grid); // TODO: Declare variables/Compute parameters. dim3 threads(0, 0); dim3 blocks(0, 0); // Set the size of each block const unsigned int block_dim_x = 64u; const unsigned int block_dim_y = 8u; const unsigned int smem_side = block_dim_x; int nx = params.nx(); int ny = params.ny(); int gx = params.gx(); int gy = params.gy(); float xcfl = params.xcfl(); float ycfl = params.ycfl(); // Compute the block dimension threads.x = block_dim_x; threads.y = block_dim_y; // Assume each dimension of the block is less than 65536 // and compute the grid size blocks.x = ((unsigned int) nx + threads.x - (unsigned int) order - 1)/(threads.x - (unsigned int) order); blocks.y = ((unsigned int) ny + smem_side - (unsigned int) order - 1)/(smem_side - (unsigned int) order); event_pair timer; start_timer(&timer); for (int i = 0; i < params.iters(); ++i) { // update the values on the boundary only BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_); // TODO: Apply stencil. switch (order) { case 2: hipLaunchKernelGGL(( gpuShared<smem_side, 2>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_, gx, gy, xcfl, ycfl); break; case 4: hipLaunchKernelGGL(( gpuShared<smem_side, 4>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_, gx, gy, xcfl, ycfl); break; case 8: hipLaunchKernelGGL(( gpuShared<smem_side, 8>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_, gx, gy, xcfl, ycfl); break; default: printf("ERROR: Order %d not supported", order); exit(1); } check_launch("gpuShared"); Grid::swap(curr_grid, next_grid); } return stop_timer(&timer); }
1df403d66921595d36a21a4119c5d7c00cfd3e5d.cu
#include <math_constants.h> #include "BC.h" /** * Calculates the next finite difference step given a * grid point and step lengths. * * @param curr Pointer to the grid point that should be updated. * @param width Number of grid points in the x dimension. * @param xcfl Courant number for x dimension. * @param ycfl Courant number for y dimension. * @returns Grid value of next timestep. */ template<int order> __device__ float Stencil(const float *curr, int width, float xcfl, float ycfl) { switch (order) { case 2: return curr[0] + xcfl * (curr[-1] + curr[1] - 2.f * curr[0]) + ycfl * (curr[width] + curr[-width] - 2.f * curr[0]); case 4: return curr[0] + xcfl * (- curr[2] + 16.f * curr[1] - 30.f * curr[0] + 16.f * curr[-1] - curr[-2]) + ycfl * (- curr[2 * width] + 16.f * curr[width] - 30.f * curr[0] + 16.f * curr[-width] - curr[-2 * width]); case 8: return curr[0] + xcfl * (-9.f * curr[4] + 128.f * curr[3] - 1008.f * curr[2] + 8064.f * curr[1] - 14350.f * curr[0] + 8064.f * curr[-1] - 1008.f * curr[-2] + 128.f * curr[-3] - 9.f * curr[-4]) + ycfl * (-9.f * curr[4 * width] + 128.f * curr[3 * width] - 1008.f * curr[2 * width] + 8064.f * curr[width] - 14350.f * curr[0] + 8064.f * curr[-width] - 1008.f * curr[-2 * width] + 128.f * curr[-3 * width] - 9.f * curr[-4 * width]); default: printf("ERROR: Order %d not supported", order); return CUDART_NAN_F; } } /** * Kernel to propagate finite difference grid from the current * time point to the next. * * This kernel should be very simple and only use global memory. * * @param next[out] Next grid state. * @param curr Current grid state. * @param gx Number of grid points in the x dimension. * @param nx Number of grid points in the x dimension to which the full * stencil can be applied (ie the number of points that are at least * order/2 grid points away from the boundar). * @param ny Number of grid points in the y dimension to which th full * stencil can be applied. * @param xcfl Courant number for x dimension. * @param ycfl Courant number for y dimension. */ template<int order> __global__ void gpuStencil(float *next, const float *curr, int gx, int nx, int ny, float xcfl, float ycfl) { // TODO const int tid_x = threadIdx.x + blockDim.x*blockIdx.x; const int tid_y = threadIdx.y + blockDim.y*blockIdx.y; // If the thread is not inside the domain, return if (tid_x >= nx || tid_y >= ny) { return; } // Compute the boarder size const int b = order/2; // Compute the index of the point in 1D array const int idx = (tid_y + b)*gx + (tid_x + b); next[idx] = Stencil<order> (&curr[idx], gx, xcfl, ycfl); } /** * Propagates the finite difference 2D heat diffusion solver * using the gpuStencil kernel. * * Use this function to do necessary setup and propagate params.iters() * number of times. * * @param curr_grid The current state of the grid. * @param params Parameters for the finite difference computation. * @returns Time required for computation. */ double gpuComputation(Grid &curr_grid, const simParams &params) { boundary_conditions BC(params); Grid next_grid(curr_grid); // TODO: Declare variables/Compute parameters. dim3 threads(0, 0); dim3 blocks(0, 0); // Set the size of each block const unsigned int block_dim_x = 64u; const unsigned int block_dim_y = 8u; int nx = params.nx(); int ny = params.ny(); int gx = params.gx(); float xcfl = params.xcfl(); float ycfl = params.ycfl(); int order = params.order(); // Compute the block dimension threads.x = block_dim_x; threads.y = block_dim_y; // Assume each dimension of the block is less than 65536 // and compute the grid size blocks.x = ((unsigned int) nx + threads.x - 1)/threads.x; blocks.y = ((unsigned int) ny + threads.y - 1)/threads.y; event_pair timer; start_timer(&timer); for (int i = 0; i < params.iters(); ++i) { // update the values on the boundary only BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_); // TODO: Apply stencil. switch (order) { case 2: gpuStencil<2><<<blocks, threads>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl); break; case 4: gpuStencil<4><<<blocks, threads>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl); break; case 8: gpuStencil<8><<<blocks, threads>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl); break; default: printf("ERROR: Order %d not supported", order); exit(1); } check_launch("gpuStencil"); Grid::swap(curr_grid, next_grid); } return stop_timer(&timer); } /** * Kernel to propagate finite difference grid from the current * time point to the next. * * This kernel should be optimized to compute finite difference updates * in blocks of size (blockDim.y * numYPerStep) * blockDim.x. Each thread * should calculate at most numYPerStep updates. It should still only use * global memory. * * @param next[out] Next grid state. * @param curr Current grid state. * @param gx Number of grid points in the x dimension. * @param nx Number of grid points in the x dimension to which the full * stencil can be applied (ie the number of points that are at least * order/2 grid points away from the boundar). * @param ny Number of grid points in the y dimension to which th full * stencil can be applied. * @param xcfl Courant number for x dimension. * @param ycfl Courant number for y dimension. */ template<int order, int numYPerStep> __global__ void gpuStencilLoop(float *next, const float *curr, int gx, int nx, int ny, float xcfl, float ycfl) { // TODO const int tid_x = threadIdx.x + blockDim.x*blockIdx.x; const int tid_y = threadIdx.y + numYPerStep*blockDim.y*blockIdx.y; // If the thread is not inside the domain, return if (tid_x >= nx) { return; } // Compute the boarder size const int b = order/2; for (int i = 0; i < numYPerStep; i++) { if (tid_y + i*blockDim.y >= ny) { return; } const int idx = (tid_y + i*blockDim.y + b)*gx + (tid_x + b); next[idx] = Stencil<order> (&curr[idx], gx, xcfl, ycfl); } } /** * Propagates the finite difference 2D heat diffusion solver * using the gpuStencilLoop kernel. * * Use this function to do necessary setup and propagate params.iters() * number of times. * * @param curr_grid The current state of the grid. * @param params Parameters for the finite difference computation. * @returns Time required for computation. */ double gpuComputationLoop(Grid &curr_grid, const simParams &params) { boundary_conditions BC(params); Grid next_grid(curr_grid); // TODO // TODO: Declare variables/Compute parameters. dim3 threads(0, 0); dim3 blocks(0, 0); // Set the size of each block const unsigned int block_dim_x = 64u; const unsigned int block_dim_y = 8u; const unsigned int numYPerStep = 8u; int nx = params.nx(); int ny = params.ny(); int gx = params.gx(); float xcfl = params.xcfl(); float ycfl = params.ycfl(); int order = params.order(); // Compute the block dimension threads.x = block_dim_x; threads.y = block_dim_y; // Assume each dimension of the block is less than 65536 // and compute the grid size blocks.x = ((unsigned int) nx + threads.x - 1)/threads.x; blocks.y = ((unsigned int) ny + threads.y*numYPerStep - 1)/(threads.y*numYPerStep); event_pair timer; start_timer(&timer); for (int i = 0; i < params.iters(); ++i) { // update the values on the boundary only BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_); // TODO: Apply stencil. switch (order) { case 2: gpuStencilLoop<2, numYPerStep><<<blocks, threads>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl); break; case 4: gpuStencilLoop<4, numYPerStep><<<blocks, threads>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl); break; case 8: gpuStencilLoop<8, numYPerStep><<<blocks, threads>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl); break; default: printf("ERROR: Order %d not supported", order); exit(1); } check_launch("gpuStencilLoop"); Grid::swap(curr_grid, next_grid); } return stop_timer(&timer); } /** * Kernel to propagate finite difference grid from the current * time point to the next. * * This kernel should be optimized to compute finite difference updates * in blocks of size side * side using shared memory. * * @param next[out] Next grid state. * @param curr Current grid state. * @param gx Number of grid points in the x dimension. * @param gy Number of grid points in the y dimension. * @param xcfl Courant number for x dimension. * @param ycfl Courant number for y dimension. */ template<int side, int order> __global__ void gpuShared(float *next, const float *curr, int gx, int gy, float xcfl, float ycfl) { // TODO const int lane_x = threadIdx.x; const int lane_y = threadIdx.y; // Compute the boarder size const int b = order/2; __shared__ float smem_block[side*side]; const int global_col = lane_x + (blockDim.x - order)*blockIdx.x; // Load the data into shared memory if (global_col < gx) { // Compute the bound of the loop const int y_lim = min(side, gy - (side - order)*blockIdx.y); for (int i = lane_y; i < y_lim; i += blockDim.y) { const int global_row = i + (side - order)*blockIdx.y; const int global_idx = global_row*gx + global_col; const int smem_block_idx = i*blockDim.x + lane_x; smem_block[smem_block_idx] = curr[global_idx]; } } __syncthreads(); // Do the computation here if (global_col < gx - b) { if (lane_x >= b && lane_x < blockDim.x - b) { // Compute the bound of the loop const int y_lim = min(side - b, gy - b - (side - order)*blockIdx.y); for (int i = lane_y + b; i < y_lim; i += blockDim.y) { const int global_row = i + (side - order)*blockIdx.y; const int global_idx = global_row*gx + global_col; const int smem_block_idx = i*blockDim.x + lane_x; next[global_idx] = Stencil<order> (&smem_block[smem_block_idx], side, xcfl, ycfl); } } } } /** * Propagates the finite difference 2D heat diffusion solver * using the gpuShared kernel. * * Use this function to do necessary setup and propagate params.iters() * number of times. * * @param curr_grid The current state of the grid. * @param params Parameters for the finite difference computation. * @returns Time required for computation. */ template<int order> double gpuComputationShared(Grid &curr_grid, const simParams &params) { boundary_conditions BC(params); Grid next_grid(curr_grid); // TODO: Declare variables/Compute parameters. dim3 threads(0, 0); dim3 blocks(0, 0); // Set the size of each block const unsigned int block_dim_x = 64u; const unsigned int block_dim_y = 8u; const unsigned int smem_side = block_dim_x; int nx = params.nx(); int ny = params.ny(); int gx = params.gx(); int gy = params.gy(); float xcfl = params.xcfl(); float ycfl = params.ycfl(); // Compute the block dimension threads.x = block_dim_x; threads.y = block_dim_y; // Assume each dimension of the block is less than 65536 // and compute the grid size blocks.x = ((unsigned int) nx + threads.x - (unsigned int) order - 1)/(threads.x - (unsigned int) order); blocks.y = ((unsigned int) ny + smem_side - (unsigned int) order - 1)/(smem_side - (unsigned int) order); event_pair timer; start_timer(&timer); for (int i = 0; i < params.iters(); ++i) { // update the values on the boundary only BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_); // TODO: Apply stencil. switch (order) { case 2: gpuShared<smem_side, 2><<<blocks, threads>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, gy, xcfl, ycfl); break; case 4: gpuShared<smem_side, 4><<<blocks, threads>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, gy, xcfl, ycfl); break; case 8: gpuShared<smem_side, 8><<<blocks, threads>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, gy, xcfl, ycfl); break; default: printf("ERROR: Order %d not supported", order); exit(1); } check_launch("gpuShared"); Grid::swap(curr_grid, next_grid); } return stop_timer(&timer); }
8d61800ba718eeb88852305336a8a024b74ff839.hip
// !!! This is a file automatically generated by hipify!!! extern "C" { #include "../shape/head.h" } //__host__ void copy_par_to_device(struct par_t *hpar) //{ // /* NOTE: The double pointer dev_par_fpntr contains pointers that point to // * host memory. This won't work. Fix later. Though kernel // * debugging shows dev_par_fpartype = 105.6... // */ // int size_int = sizeof(int)*hpar->nfpar; // int size_dbl = sizeof(double)*hpar->nfpar; // int size_dblpntr = sizeof(double*)*hpar->nfpar; // int size_par = sizeof(hpar); // // gpuErrchk(hipMalloc((void**)&dev_par, size_par)); // gpuErrchk(hipMalloc((void**)&dev_par_fparstep, size_dbl)); // gpuErrchk(hipMalloc((void**)&dev_par_fpartol, size_dbl)); // gpuErrchk(hipMalloc((void**)&dev_par_fparabstol, size_dbl)); // gpuErrchk(hipMalloc((void**)&dev_par_fpntr, size_dblpntr)); // gpuErrchk(hipMalloc((void**)&dev_par_fpartype, size_int)); // // gpuErrchk(hipMemcpy(dev_par, hpar, size_par, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_par_fparstep, hpar->fparstep, size_dbl, // hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_par_fpartol, hpar->fpartol, size_dbl, // hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_par_fparabstol, hpar->fparabstol, size_dbl, // hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_par_fpntr, hpar->fpntr, size_dblpntr, // hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_par_fpartype, hpar->fpartype, size_int, // hipMemcpyHostToDevice)); // //} // //void copy_CUDA_structs(struct par_t *hpar, struct mod_t *hmod, struct dat_t *hdat) //{ // gpuErrchk(hipMalloc((void**)&dev_par, sizeof(struct par_t)*1)); // gpuErrchk(hipMalloc((void**)&dev_mod, sizeof(hmod)*1)); // gpuErrchk(hipMalloc((void**)&dev_dat, sizeof(hdat)*1)); // // gpuErrchk(hipMemcpy(dev_par, &hpar, sizeof(struct par_t), hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod, &hmod, sizeof(hmod), hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_dat, &hdat, sizeof(hdat), hipMemcpyHostToDevice)); // //} __host__ void gpuAssert(hipError_t code, const char *file, int line) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); exit(code); } } /* To-Do: finish allocating double and triple pointers. */ /* To-Do: anything with structures inside structures at the endpoints (such as lots of param_t's at the end of long chain structures) * may need to have those final param_t's declared, allocated, copied * NOTE: Most of the commented out allocations are those of vectors declared with definite size, i.e. double x[3] */ //__host__ void copy_mod_to_device(struct mod_t *hmod) { // /* Assumes single component model */ // /*.................................................................................................................*/ // // /* Allocate and copy the main parent structure first */ // gpuErrchk(hipMalloc((void**)&dev_mod, sizeof(hmod)*1)); // gpuErrchk(hipMemcpy(dev_mod, &hmod, sizeof(hmod), hipMemcpyHostToDevice)); // // /* Allocate mod->spin memory */ // int angsz1 = sizeof(struct param_t) * 3; // int dblsz = sizeof(double) * MAXIMP; // int angsz2 = sizeof(struct param_t) * MAXIMP * 3; // // // gpuErrchk(hipMalloc((void**)&dev_mod_spin, sizeof(hmod->spin))); // //gpuErrchk(hipMalloc((void**)&dev_mod_spin_angle, sizeof(hmod->spin.angle))); // // gpuErrchk(hipMalloc((void**)&dev_mod_spin_omega, sizeof(hmod->spin.omega))); // // gpuErrchk(hipMalloc((void**)&dev_mod_spin_omegadot, sizeof(hmod->spin.omegadot))); // // gpuErrchk(hipMalloc((void**)&dev_mod_spin_t_impulse, sizeof(hmod->spin.t_impulse))); // // gpuErrchk(hipMalloc((void**)&dev_mod_spin_impulse, sizeof(hmod->spin.impulse))); // // gpuErrchk(hipMalloc((void**)&dev_mod_spin_inertia, sizeof(hmod->spin.inertia))); // // /* Copy mod->spin contents */ // gpuErrchk(hipMemcpy(dev_mod_spin, &hmod->spin, sizeof(hmod->spin), hipMemcpyHostToDevice)); // //gpuErrchk(hipMemcpy(dev_mod_spin_angle, &hmod->spin.angle, angsz1, hipMemcpyHostToDevice)); // //gpuErrchk(hipMemcpy(dev_mod_spin_omega, hmod->spin.omega, angsz1, hipMemcpyHostToDevice)); // //gpuErrchk(hipMemcpy(dev_mod_spin_omegadot, &hmod->spin.omegadot, angsz1, hipMemcpyHostToDevice)); // //gpuErrchk(hipMemcpy(dev_mod_spin_t_impulse, hmod->spin.t_impulse, dblsz, hipMemcpyHostToDevice)); // //gpuErrchk(hipMemcpy(dev_mod_spin_impulse, &hmod->spin.impulse, angsz2, hipMemcpyHostToDevice)); // //gpuErrchk(hipMemcpy(dev_mod_spin_inertia, &hmod->spin.inertia, angsz1, hipMemcpyHostToDevice)); // // /*..................................................................................................................*/ // // /* Allocate mod->shape */ // /* mod->shape.comp[0].real (vertices_t structure) */ // int cmp_sz = sizeof(hmod->shape.comp[0]); // int shp_sz = sizeof(hmod->shape); // int inertia_sz = sizeof(double) * 9; // int off_sz = sizeof(struct param_t) * 3; // int ver_sz = sizeof(struct vertices_t); // int f_sz = sizeof(struct facet_t); // int s_sz = sizeof(struct side_t); // int v_sz = sizeof(struct vertex_t); // int ns = hmod->shape.comp[0].real.ns; // int nf = hmod->shape.comp[0].real.nf; // int nv = hmod->shape.comp[0].real.nv; // int afactor = sizeof(double*) * nv; // int int_sz = sizeof(int); // int pint_sz = sizeof(int*); // int dbl_sz = sizeof(double); // int par_sz = sizeof(struct param_t); // // gpuErrchk(hipMalloc((void**)&dev_mod_shape, shp_sz)); // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp, cmp_sz)); // // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_inertia, inertia_sz)); // // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_com, inertia_sz/3)); // // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_m, inertia_sz)); // // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_off, off_sz)); // // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_rot, off_sz)); // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_real, ver_sz)); // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_real_f, f_sz*nf)); // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_real_v, v_sz*nv)); // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_real_s, s_sz*ns)); // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_real_s_v[2], ns*int_sz)); // dbl *[] // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_real_s_f[2], ns*int_sz)); // dbl *[] // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_real_v_a[3], ns*dbl_sz)); // dbl *[] // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_real_v_u[3], ns*dbl_sz)); // dbl *[] // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_real_v_x[3], ns*dbl_sz)); // dbl *[] // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_real_v_n[3], ns*dbl_sz)); // dbl *[] // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_real_v_af, nv*pint_sz)); // dbl ** // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_real_v_as, nv*pint_sz)); // dbl ** // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_real_v_afactor, afactor)); // dbl *** // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_real_v_bfactor, afactor)); // dbl *** // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_real_f_v[3], nf*int_sz)); // dbl *[] // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_real_f_s[3], nf*int_sz)); // dbl *[] // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_real_f_n[3], nf*dbl_sz)); // dbl *[] // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_real_f_x[3], nf*dbl_sz)); // dbl *[] // // /* Copy mod->shape contents */ // gpuErrchk(hipMemcpy(dev_mod_shape, &hmod->shape, shp_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp, &hmod->shape.comp[0], cmp_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_inertia, &hmod->shape.comp[0].inertia, inertia_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_com, &hmod->shape.comp[0].com, inertia_sz/3, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_m, &hmod->shape.comp[0].m, inertia_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_off, &hmod->shape.comp[0].off, off_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_rot, &hmod->shape.comp[0].rot, off_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_real,&hmod->shape.comp[0].real,ver_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_real_f, &hmod->shape.comp[0].real.f, f_sz*nf, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_real_v, &hmod->shape.comp[0].real.v, v_sz*nv, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_real_s, &hmod->shape.comp[0].real.s, s_sz*ns, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_real_s, &hmod->shape.comp[0].real.s->v, 2*ns*int_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_real_f, &hmod->shape.comp[0].real.s->f, 2*ns*int_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_real_v_a, &hmod->shape.comp[0].real.v->a, 3*nv*dbl_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_real_v_u, &hmod->shape.comp[0].real.v->u, 3*nv*dbl_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_real_v_x, &hmod->shape.comp[0].real.v->x, 3*nv*dbl_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_real_v_n, &hmod->shape.comp[0].real.v->n, 3*nv*dbl_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_real_v_af, &hmod->shape.comp[0].real.v->af, nv*pint_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_real_v_as, &hmod->shape.comp[0].real.v->as, nv*pint_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_real_v_afactor, &hmod->shape.comp[0].real.v->afactor, afactor, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_real_v_bfactor, &hmod->shape.comp[0].real.v->bfactor, afactor, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_real_f_v, &hmod->shape.comp[0].real.f->v, 3*nf*int_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_real_f_s, &hmod->shape.comp[0].real.f->s, 3*nf*int_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_real_f_n, &hmod->shape.comp[0].real.f->n, 3*nf*dbl_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_real_f_x, &hmod->shape.comp[0].real.f->x, 3*nf*dbl_sz, hipMemcpyHostToDevice)); // // // if (hmod->shape.comp[0].type == ELLIPSE) { // int ell_sz = sizeof(struct ellipse_t); // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_ell, ell_sz)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_ell, &hmod->shape.comp[0].desc.ell, ell_sz, hipMemcpyHostToDevice)); // } // // if (hmod->shape.comp[0].type == OVOID) { // int ov_sz = sizeof(struct ovoid_t); // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_ovoid, ov_sz)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_ovoid, &hmod->shape.comp[0].desc.ovoid, ov_sz, hipMemcpyHostToDevice)); // } // // /* To-Do: These double pointer allocations and memcpy's need attention */ // if (hmod->shape.comp[0].type == HARMONIC) { // int har_sz = sizeof(struct harmonic_t); // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_har, har_sz)); // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_har_a, par_sz)); // dbl ** // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_har_b, par_sz)); // dbl ** // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_har_a_save, dbl_sz)); // dbl ** // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_har_b_save, dbl_sz)); // dbl ** // // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_har, &hmod->shape.comp[0].desc.har, har_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_har_a, &hmod->shape.comp[0].desc.har.a, par_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_har_b, &hmod->shape.comp[0].desc.har.b, par_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_har_a_save, &hmod->shape.comp[0].desc.har.a_save, dbl_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_har_b_save, &hmod->shape.comp[0].desc.har.b_save, dbl_sz, hipMemcpyHostToDevice)); // } // // if (hmod->shape.comp[0].type == VERTEX) { // int nv1 = hmod->shape.comp[0].desc.ver.nv; // int ns1 = hmod->shape.comp[0].desc.ver.ns; // int nf1 = hmod->shape.comp[0].desc.ver.nf; // int dpt_sz = sizeof(double*); // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_ver, ver_sz)); // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_ver_s, s_sz*ns1)); // // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_ver_s_v[2], int_sz*ns1)); // // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_ver_s_f[2], int_sz*ns1)); // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_ver_f, f_sz*nf1)); // // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_ver_f_v[3], int_sz*nf1)); // // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_ver_f_s[3], int_sz*nf1)); // // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_ver_f_n[3], dbl_sz*nf1)); // // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_ver_f_x[3], dbl_sz*nf1)); // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_ver_v, v_sz*nv1)); // // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_ver_v_a[3], dbl_sz*nv1)); // // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_ver_v_u[3], dbl_sz*nv1)); // // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_ver_v_x[3], dbl_sz*nv1)); // // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_ver_v_n[3], dbl_sz*nv1)); // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_ver_v_af, nv1*int_sz)); // dbl ** // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_ver_v_as, nv1*int_sz)); // dbl ** // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_ver_v_afactor, nv1*dbl_sz)); // dbl *** // gpuErrchk(hipMalloc((void**)&dev_mod_shape_comp_desc_ver_v_bfactor, nv1*dbl_sz)); // dbl *** // // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_ver, &hmod->shape.comp[0].desc.ver, ver_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_ver_s, &hmod->shape.comp[0].desc.ver.s, s_sz*ns1, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_ver_s_v, &hmod->shape.comp[0].desc.ver.s->v, 2*int_sz*ns1, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_ver_s_f, &hmod->shape.comp[0].desc.ver.s->f, 2*int_sz*ns1, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_ver_f, &hmod->shape.comp[0].desc.ver.f, f_sz*nf1, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_ver_f_v, &hmod->shape.comp[0].desc.ver.f->v, 3*int_sz*nf1, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_ver_f_s, &hmod->shape.comp[0].desc.ver.f->s, 3*int_sz*nf1, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_ver_f_n, &hmod->shape.comp[0].desc.ver.f->n, 3*dbl_sz*nf1, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_ver_f_x, &hmod->shape.comp[0].desc.ver.f->x, 3*dbl_sz*nf1, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_ver_v, &hmod->shape.comp[0].desc.ver.v, v_sz*nv1, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_ver_v_a, &hmod->shape.comp[0].desc.ver.v->a, 3*int_sz*nf1, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_ver_v_u, &hmod->shape.comp[0].desc.ver.v->u, 3*int_sz*nf1, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_ver_v_x, &hmod->shape.comp[0].desc.ver.v->x, 3*dbl_sz*nf1, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_ver_v_n, &hmod->shape.comp[0].desc.ver.v->n, 3*dbl_sz*nf1, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_ver_v_af, &hmod->shape.comp[0].desc.ver.v->af, pint_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_ver_v_as, &hmod->shape.comp[0].desc.ver.v->as, pint_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_ver_v_afactor, &hmod->shape.comp[0].desc.ver.v->afactor, dpt_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_shape_comp_desc_ver_v_bfactor, &hmod->shape.comp[0].desc.ver.v->bfactor, dpt_sz, hipMemcpyHostToDevice)); // } // // /*............................................................................................................*/ // /* Allocate mod->photo device pointers */ // /* NOTE: the following allocation and copying of mod->photo contents assumes nrl = nol = 0 or 1, but no more */ // /* First some helper variables for sizing */ // int nrl = hmod->photo.nradlaws; // int nol = hmod->photo.noptlaws; // int u_sz = sizeof(unsigned char); // int pho_sz = sizeof(struct photo_t); // int tab_sz = sizeof(struct tabular_t); // int rc_sz = sizeof(struct RC_t); // int rcpt_sz = sizeof(struct RC_t*); // int parptr_sz = sizeof(struct param_t*) * hmod->photo.radar->tabular.n; // int dblptr_sz = sizeof(double*) * hmod->photo.radar->tabular.n; // int qspc_sz = sizeof(struct quasispec_t); // int hyr_sz = sizeof(struct hybridradar_t); // int hrcs_sz = sizeof(struct harmcosine_t); // int incs_sz = sizeof(struct inhocosine_t); // int r_sz = sizeof(struct R_t); // int rpt_sz = sizeof(struct R_t*); // int hmR_sz = sizeof(struct harmR_t); // int inho_sz = sizeof(struct inhoR_t); // int hpk_sz = sizeof(struct hapke_t); // int phpk_sz = sizeof(struct hapke_t*); // int hmhpk_sz = sizeof(struct harmhapke_t); // int inhpk_sz = sizeof(struct inhohapke_t); // int kas_sz = sizeof(struct kaas_t); // int pkas_sz = sizeof(struct kaas_t*); // int hmkas_sz = sizeof(struct harmkaas_t); // int inkas_sz = sizeof(struct inhokaas_t); // // /* Allocate gpu copy of mod->photo and memcpy */ // gpuErrchk(hipMalloc((void**)&dev_mod_photo, sizeof(hmod->photo))); // gpuErrchk(hipMalloc((void**)&dev_mod_photo_radtype, u_sz*nrl)); // gpuErrchk(hipMalloc((void**)&dev_mod_photo_opttype, u_sz*nol)); // gpuErrchk(hipMemcpy(dev_mod_photo, &hmod->photo, pho_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_photo_radtype, &hmod->photo.radtype, u_sz*nrl, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_photo_opttype, &hmod->photo.opttype, u_sz*nol, hipMemcpyHostToDevice)); // // /* Check for nrl and nol being 0 or 1 */ // if (nrl >= 1) { /* This checks that nradlaws is at least 1 */ // // if (nrl > 1) // printf("\nShape-cuda V1.0 currently supports only one optical and one radar law maximum."); // // /* Allocate gpu copy of mod->photo.radar and memcpy, depending on type */ // if (hmod->photo.radtype[0] == COSINELAW_DIFF) { // // gpuErrchk(hipMalloc((void**)&dev_mod_photo_radar_RC, rc_sz)); // gpuErrchk(hipMemcpy(dev_mod_photo_radar_RC, &hmod->photo.radar->RC, rc_sz, hipMemcpyHostToDevice)); // } // if (hmod->photo.radtype[0] == TABULARLAW) { // gpuErrchk(hipMalloc((void**)&dev_mod_photo_radar_tabular, tab_sz)); // gpuErrchk(hipMalloc((void**)&dev_mod_photo_radar_tabular_rho, parptr_sz)); // gpuErrchk(hipMalloc((void**)&dev_mod_photo_radar_tabular_rho_save, dblptr_sz)); // gpuErrchk(hipMemcpy(dev_mod_photo_radar_tabular, &hmod->photo.radar->tabular, tab_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_photo_radar_tabular_rho, &hmod->photo.radar->tabular.rho, parptr_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_photo_radar_tabular_rho_save, &hmod->photo.radar->tabular.rho_save, dblptr_sz, hipMemcpyHostToDevice)); // } // if (hmod->photo.radtype[0] == GAUSSIANLAW || hmod->photo.radtype[0] == HAGFORSLAW || hmod->photo.radtype[0] == COSINELAW_QS) { // gpuErrchk(hipMalloc((void**)&dev_mod_photo_radar_quasispec, qspc_sz)); // gpuErrchk(hipMemcpy(dev_mod_photo_radar_quasispec, &hmod->photo.radar->quasispec, qspc_sz, hipMemcpyHostToDevice)); // } // if (hmod->photo.radtype[0] == GAUSSIAN_COSINE || hmod->photo.radtype[0] == HAGFORS_COSINE || hmod->photo.radtype[0] == COSINE_COSINE) { // gpuErrchk(hipMalloc((void**)&dev_mod_photo_radar_hybrid, hyr_sz)); // gpuErrchk(hipMemcpy(dev_mod_photo_radar_hybrid, &hmod->photo.radar->hybrid, hyr_sz, hipMemcpyHostToDevice)); // } // if (hmod->photo.radtype[0] == HARMCOSINE_DIFF) { // gpuErrchk(hipMalloc((void**)&dev_mod_photo_radar_harmcosine, hrcs_sz)); // gpuErrchk(hipMalloc((void**)&dev_mod_photo_radar_harmcosine_local, rcpt_sz)); // so what is this - a [][] pointer of RC_t's. Per pixel? How many? // gpuErrchk(hipMemcpy(dev_mod_photo_radar_harmcosine, &hmod->photo.radar->harmcosine, hrcs_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_photo_radar_harmcosine_local, &hmod->photo.radar->harmcosine.local, rcpt_sz, hipMemcpyHostToDevice)); //dbl pointer // } // if (hmod->photo.radtype[0] == INHOCOSINE_DIFF) { // gpuErrchk(hipMalloc((void**)&dev_mod_photo_radar_inhocosine, incs_sz)); // gpuErrchk(hipMalloc((void**)&dev_mod_photo_radar_inhocosine_local, rcpt_sz)); // so what is this - a [][] pointer of RC_t's. Per pixel? How many? // gpuErrchk(hipMemcpy(dev_mod_photo_radar_inhocosine, &hmod->photo.radar->inhocosine, incs_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_photo_radar_inhocosine_local, &hmod->photo.radar->inhocosine.local, rcpt_sz, hipMemcpyHostToDevice)); //dbl pointer // } // } // // if (nol >= 1) {/* This checks that noptlaws is at least 1 */ // // if (nol > 1) // printf("\nShape-cuda V1.0 currently supports only one optical and one radar law maximum."); // // /* Allocate gpu copy of mod->photo.optical and memcpy, depending on type */ // if (hmod->photo.opttype[0] == GEOMETRICAL || hmod->photo.opttype[0] == LAMBERTLAW || hmod->photo.opttype[0] == LOMMEL) { // gpuErrchk(hipMalloc((void**)&dev_mod_photo_optical_R, r_sz)); // gpuErrchk(hipMemcpy(dev_mod_photo_optical_R, &hmod->photo.optical->R, r_sz, hipMemcpyHostToDevice)); // } // if (hmod->photo.opttype[0] == HARMLAMBERT || hmod->photo.opttype[0] == HARMLOMMEL) { // gpuErrchk(hipMalloc((void**)&dev_mod_photo_optical_harmR, hmR_sz)); // gpuErrchk(hipMalloc((void**)&dev_mod_photo_optical_harmR_local, rpt_sz)); // double pointer, but how many? // gpuErrchk(hipMemcpy(dev_mod_photo_optical_harmR, &hmod->photo.optical->harmR, hmR_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_photo_optical_harmR_local, &hmod->photo.optical->harmR.local, rpt_sz, hipMemcpyHostToDevice)); // } // if (hmod->photo.opttype[0] == INHOLAMBERT || hmod->photo.opttype[0] == INHOLOMMEL) { // gpuErrchk(hipMalloc((void**)&dev_mod_photo_optical_inhoR, inho_sz)); // gpuErrchk(hipMalloc((void**)&dev_mod_photo_optical_inhoR_local, rpt_sz)); // double pointer, but how many? // gpuErrchk(hipMemcpy(dev_mod_photo_optical_inhoR, &hmod->photo.optical->inhoR, inho_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_photo_optical_inhoR_local, &hmod->photo.optical->inhoR.local, rpt_sz, hipMemcpyHostToDevice)); // } // if (hmod->photo.opttype[0] == HAPKE) { // gpuErrchk(hipMalloc((void**)&dev_mod_photo_optical_hapke, hpk_sz)); // gpuErrchk(hipMemcpy(dev_mod_photo_optical_hapke, &hmod->photo.optical->hapke, hpk_sz, hipMemcpyHostToDevice)); // } // if (hmod->photo.opttype[0] == HARMHAPKE) { // gpuErrchk(hipMalloc((void**)&dev_mod_photo_optical_harmhapke, hmhpk_sz)); // gpuErrchk(hipMalloc((void**)&dev_mod_photo_optical_harmhapke_local, phpk_sz)); // double pointer, but how many? // gpuErrchk(hipMemcpy(dev_mod_photo_optical_harmhapke, &hmod->photo.optical->harmhapke, hmhpk_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_photo_optical_harmhapke_local, &hmod->photo.optical->harmhapke.local, phpk_sz, hipMemcpyHostToDevice)); // } // if (hmod->photo.opttype[0] == INHOHAPKE) { // gpuErrchk(hipMalloc((void**)&dev_mod_photo_optical_inhohapke, inhpk_sz)); // gpuErrchk(hipMalloc((void**)&dev_mod_photo_optical_inhohapke_local, phpk_sz)); // double pointer, but how many? // gpuErrchk(hipMemcpy(dev_mod_photo_optical_inhohapke, &hmod->photo.optical->inhohapke, inhpk_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_photo_optical_inhohapke_local, &hmod->photo.optical->inhohapke.local, phpk_sz, hipMemcpyHostToDevice)); // } // if (hmod->photo.opttype[0] == KAASALAINEN) { // gpuErrchk(hipMalloc((void**)&dev_mod_photo_optical_kaas, kas_sz)); // gpuErrchk(hipMemcpy(dev_mod_photo_optical_kaas, &hmod->photo.optical->kaas, kas_sz, hipMemcpyHostToDevice)); // } // if (hmod->photo.opttype[0] == HARMKAAS) { // gpuErrchk(hipMalloc((void**)&dev_mod_photo_optical_harmkaas, hmkas_sz)); // gpuErrchk(hipMalloc((void**)&dev_mod_photo_optical_harmkaas_local, pkas_sz)); // double pointer, but how many? // gpuErrchk(hipMemcpy(dev_mod_photo_optical_harmkaas, &hmod->photo.optical->harmkaas, hmkas_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_photo_optical_harmkaas_local, &hmod->photo.optical->harmkaas.local, pkas_sz, hipMemcpyHostToDevice)); // } // if (hmod->photo.opttype[0] == INHOKAAS) { // gpuErrchk(hipMalloc((void**)&dev_mod_photo_optical_inhokaas, inkas_sz)); // gpuErrchk(hipMalloc((void**)&dev_mod_photo_optical_inhokaas_local, pkas_sz)); // double pointer, but how many? // gpuErrchk(hipMemcpy(dev_mod_photo_optical_inhokaas, &hmod->photo.optical->inhokaas, inkas_sz, hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(dev_mod_photo_optical_inhokaas_local, &hmod->photo.optical->inhokaas.local, pkas_sz, hipMemcpyHostToDevice)); // } // } //}
8d61800ba718eeb88852305336a8a024b74ff839.cu
extern "C" { #include "../shape/head.h" } //__host__ void copy_par_to_device(struct par_t *hpar) //{ // /* NOTE: The double pointer dev_par_fpntr contains pointers that point to // * host memory. This won't work. Fix later. Though kernel // * debugging shows dev_par_fpartype = 105.6... // */ // int size_int = sizeof(int)*hpar->nfpar; // int size_dbl = sizeof(double)*hpar->nfpar; // int size_dblpntr = sizeof(double*)*hpar->nfpar; // int size_par = sizeof(hpar); // // gpuErrchk(cudaMalloc((void**)&dev_par, size_par)); // gpuErrchk(cudaMalloc((void**)&dev_par_fparstep, size_dbl)); // gpuErrchk(cudaMalloc((void**)&dev_par_fpartol, size_dbl)); // gpuErrchk(cudaMalloc((void**)&dev_par_fparabstol, size_dbl)); // gpuErrchk(cudaMalloc((void**)&dev_par_fpntr, size_dblpntr)); // gpuErrchk(cudaMalloc((void**)&dev_par_fpartype, size_int)); // // gpuErrchk(cudaMemcpy(dev_par, hpar, size_par, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_par_fparstep, hpar->fparstep, size_dbl, // cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_par_fpartol, hpar->fpartol, size_dbl, // cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_par_fparabstol, hpar->fparabstol, size_dbl, // cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_par_fpntr, hpar->fpntr, size_dblpntr, // cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_par_fpartype, hpar->fpartype, size_int, // cudaMemcpyHostToDevice)); // //} // //void copy_CUDA_structs(struct par_t *hpar, struct mod_t *hmod, struct dat_t *hdat) //{ // gpuErrchk(cudaMalloc((void**)&dev_par, sizeof(struct par_t)*1)); // gpuErrchk(cudaMalloc((void**)&dev_mod, sizeof(hmod)*1)); // gpuErrchk(cudaMalloc((void**)&dev_dat, sizeof(hdat)*1)); // // gpuErrchk(cudaMemcpy(dev_par, &hpar, sizeof(struct par_t), cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod, &hmod, sizeof(hmod), cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_dat, &hdat, sizeof(hdat), cudaMemcpyHostToDevice)); // //} __host__ void gpuAssert(cudaError_t code, const char *file, int line) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); exit(code); } } /* To-Do: finish allocating double and triple pointers. */ /* To-Do: anything with structures inside structures at the endpoints (such as lots of param_t's at the end of long chain structures) * may need to have those final param_t's declared, allocated, copied * NOTE: Most of the commented out allocations are those of vectors declared with definite size, i.e. double x[3] */ //__host__ void copy_mod_to_device(struct mod_t *hmod) { // /* Assumes single component model */ // /*.................................................................................................................*/ // // /* Allocate and copy the main parent structure first */ // gpuErrchk(cudaMalloc((void**)&dev_mod, sizeof(hmod)*1)); // gpuErrchk(cudaMemcpy(dev_mod, &hmod, sizeof(hmod), cudaMemcpyHostToDevice)); // // /* Allocate mod->spin memory */ // int angsz1 = sizeof(struct param_t) * 3; // int dblsz = sizeof(double) * MAXIMP; // int angsz2 = sizeof(struct param_t) * MAXIMP * 3; // // // gpuErrchk(cudaMalloc((void**)&dev_mod_spin, sizeof(hmod->spin))); // //gpuErrchk(cudaMalloc((void**)&dev_mod_spin_angle, sizeof(hmod->spin.angle))); // // gpuErrchk(cudaMalloc((void**)&dev_mod_spin_omega, sizeof(hmod->spin.omega))); // // gpuErrchk(cudaMalloc((void**)&dev_mod_spin_omegadot, sizeof(hmod->spin.omegadot))); // // gpuErrchk(cudaMalloc((void**)&dev_mod_spin_t_impulse, sizeof(hmod->spin.t_impulse))); // // gpuErrchk(cudaMalloc((void**)&dev_mod_spin_impulse, sizeof(hmod->spin.impulse))); // // gpuErrchk(cudaMalloc((void**)&dev_mod_spin_inertia, sizeof(hmod->spin.inertia))); // // /* Copy mod->spin contents */ // gpuErrchk(cudaMemcpy(dev_mod_spin, &hmod->spin, sizeof(hmod->spin), cudaMemcpyHostToDevice)); // //gpuErrchk(cudaMemcpy(dev_mod_spin_angle, &hmod->spin.angle, angsz1, cudaMemcpyHostToDevice)); // //gpuErrchk(cudaMemcpy(dev_mod_spin_omega, hmod->spin.omega, angsz1, cudaMemcpyHostToDevice)); // //gpuErrchk(cudaMemcpy(dev_mod_spin_omegadot, &hmod->spin.omegadot, angsz1, cudaMemcpyHostToDevice)); // //gpuErrchk(cudaMemcpy(dev_mod_spin_t_impulse, hmod->spin.t_impulse, dblsz, cudaMemcpyHostToDevice)); // //gpuErrchk(cudaMemcpy(dev_mod_spin_impulse, &hmod->spin.impulse, angsz2, cudaMemcpyHostToDevice)); // //gpuErrchk(cudaMemcpy(dev_mod_spin_inertia, &hmod->spin.inertia, angsz1, cudaMemcpyHostToDevice)); // // /*..................................................................................................................*/ // // /* Allocate mod->shape */ // /* mod->shape.comp[0].real (vertices_t structure) */ // int cmp_sz = sizeof(hmod->shape.comp[0]); // int shp_sz = sizeof(hmod->shape); // int inertia_sz = sizeof(double) * 9; // int off_sz = sizeof(struct param_t) * 3; // int ver_sz = sizeof(struct vertices_t); // int f_sz = sizeof(struct facet_t); // int s_sz = sizeof(struct side_t); // int v_sz = sizeof(struct vertex_t); // int ns = hmod->shape.comp[0].real.ns; // int nf = hmod->shape.comp[0].real.nf; // int nv = hmod->shape.comp[0].real.nv; // int afactor = sizeof(double*) * nv; // int int_sz = sizeof(int); // int pint_sz = sizeof(int*); // int dbl_sz = sizeof(double); // int par_sz = sizeof(struct param_t); // // gpuErrchk(cudaMalloc((void**)&dev_mod_shape, shp_sz)); // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp, cmp_sz)); // // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_inertia, inertia_sz)); // // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_com, inertia_sz/3)); // // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_m, inertia_sz)); // // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_off, off_sz)); // // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_rot, off_sz)); // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_real, ver_sz)); // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_real_f, f_sz*nf)); // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_real_v, v_sz*nv)); // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_real_s, s_sz*ns)); // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_real_s_v[2], ns*int_sz)); // dbl *[] // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_real_s_f[2], ns*int_sz)); // dbl *[] // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_real_v_a[3], ns*dbl_sz)); // dbl *[] // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_real_v_u[3], ns*dbl_sz)); // dbl *[] // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_real_v_x[3], ns*dbl_sz)); // dbl *[] // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_real_v_n[3], ns*dbl_sz)); // dbl *[] // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_real_v_af, nv*pint_sz)); // dbl ** // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_real_v_as, nv*pint_sz)); // dbl ** // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_real_v_afactor, afactor)); // dbl *** // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_real_v_bfactor, afactor)); // dbl *** // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_real_f_v[3], nf*int_sz)); // dbl *[] // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_real_f_s[3], nf*int_sz)); // dbl *[] // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_real_f_n[3], nf*dbl_sz)); // dbl *[] // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_real_f_x[3], nf*dbl_sz)); // dbl *[] // // /* Copy mod->shape contents */ // gpuErrchk(cudaMemcpy(dev_mod_shape, &hmod->shape, shp_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp, &hmod->shape.comp[0], cmp_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_inertia, &hmod->shape.comp[0].inertia, inertia_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_com, &hmod->shape.comp[0].com, inertia_sz/3, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_m, &hmod->shape.comp[0].m, inertia_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_off, &hmod->shape.comp[0].off, off_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_rot, &hmod->shape.comp[0].rot, off_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_real,&hmod->shape.comp[0].real,ver_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_real_f, &hmod->shape.comp[0].real.f, f_sz*nf, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_real_v, &hmod->shape.comp[0].real.v, v_sz*nv, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_real_s, &hmod->shape.comp[0].real.s, s_sz*ns, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_real_s, &hmod->shape.comp[0].real.s->v, 2*ns*int_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_real_f, &hmod->shape.comp[0].real.s->f, 2*ns*int_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_real_v_a, &hmod->shape.comp[0].real.v->a, 3*nv*dbl_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_real_v_u, &hmod->shape.comp[0].real.v->u, 3*nv*dbl_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_real_v_x, &hmod->shape.comp[0].real.v->x, 3*nv*dbl_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_real_v_n, &hmod->shape.comp[0].real.v->n, 3*nv*dbl_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_real_v_af, &hmod->shape.comp[0].real.v->af, nv*pint_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_real_v_as, &hmod->shape.comp[0].real.v->as, nv*pint_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_real_v_afactor, &hmod->shape.comp[0].real.v->afactor, afactor, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_real_v_bfactor, &hmod->shape.comp[0].real.v->bfactor, afactor, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_real_f_v, &hmod->shape.comp[0].real.f->v, 3*nf*int_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_real_f_s, &hmod->shape.comp[0].real.f->s, 3*nf*int_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_real_f_n, &hmod->shape.comp[0].real.f->n, 3*nf*dbl_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_real_f_x, &hmod->shape.comp[0].real.f->x, 3*nf*dbl_sz, cudaMemcpyHostToDevice)); // // // if (hmod->shape.comp[0].type == ELLIPSE) { // int ell_sz = sizeof(struct ellipse_t); // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_ell, ell_sz)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_ell, &hmod->shape.comp[0].desc.ell, ell_sz, cudaMemcpyHostToDevice)); // } // // if (hmod->shape.comp[0].type == OVOID) { // int ov_sz = sizeof(struct ovoid_t); // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_ovoid, ov_sz)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_ovoid, &hmod->shape.comp[0].desc.ovoid, ov_sz, cudaMemcpyHostToDevice)); // } // // /* To-Do: These double pointer allocations and memcpy's need attention */ // if (hmod->shape.comp[0].type == HARMONIC) { // int har_sz = sizeof(struct harmonic_t); // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_har, har_sz)); // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_har_a, par_sz)); // dbl ** // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_har_b, par_sz)); // dbl ** // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_har_a_save, dbl_sz)); // dbl ** // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_har_b_save, dbl_sz)); // dbl ** // // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_har, &hmod->shape.comp[0].desc.har, har_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_har_a, &hmod->shape.comp[0].desc.har.a, par_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_har_b, &hmod->shape.comp[0].desc.har.b, par_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_har_a_save, &hmod->shape.comp[0].desc.har.a_save, dbl_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_har_b_save, &hmod->shape.comp[0].desc.har.b_save, dbl_sz, cudaMemcpyHostToDevice)); // } // // if (hmod->shape.comp[0].type == VERTEX) { // int nv1 = hmod->shape.comp[0].desc.ver.nv; // int ns1 = hmod->shape.comp[0].desc.ver.ns; // int nf1 = hmod->shape.comp[0].desc.ver.nf; // int dpt_sz = sizeof(double*); // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_ver, ver_sz)); // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_ver_s, s_sz*ns1)); // // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_ver_s_v[2], int_sz*ns1)); // // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_ver_s_f[2], int_sz*ns1)); // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_ver_f, f_sz*nf1)); // // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_ver_f_v[3], int_sz*nf1)); // // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_ver_f_s[3], int_sz*nf1)); // // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_ver_f_n[3], dbl_sz*nf1)); // // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_ver_f_x[3], dbl_sz*nf1)); // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_ver_v, v_sz*nv1)); // // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_ver_v_a[3], dbl_sz*nv1)); // // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_ver_v_u[3], dbl_sz*nv1)); // // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_ver_v_x[3], dbl_sz*nv1)); // // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_ver_v_n[3], dbl_sz*nv1)); // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_ver_v_af, nv1*int_sz)); // dbl ** // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_ver_v_as, nv1*int_sz)); // dbl ** // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_ver_v_afactor, nv1*dbl_sz)); // dbl *** // gpuErrchk(cudaMalloc((void**)&dev_mod_shape_comp_desc_ver_v_bfactor, nv1*dbl_sz)); // dbl *** // // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_ver, &hmod->shape.comp[0].desc.ver, ver_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_ver_s, &hmod->shape.comp[0].desc.ver.s, s_sz*ns1, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_ver_s_v, &hmod->shape.comp[0].desc.ver.s->v, 2*int_sz*ns1, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_ver_s_f, &hmod->shape.comp[0].desc.ver.s->f, 2*int_sz*ns1, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_ver_f, &hmod->shape.comp[0].desc.ver.f, f_sz*nf1, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_ver_f_v, &hmod->shape.comp[0].desc.ver.f->v, 3*int_sz*nf1, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_ver_f_s, &hmod->shape.comp[0].desc.ver.f->s, 3*int_sz*nf1, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_ver_f_n, &hmod->shape.comp[0].desc.ver.f->n, 3*dbl_sz*nf1, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_ver_f_x, &hmod->shape.comp[0].desc.ver.f->x, 3*dbl_sz*nf1, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_ver_v, &hmod->shape.comp[0].desc.ver.v, v_sz*nv1, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_ver_v_a, &hmod->shape.comp[0].desc.ver.v->a, 3*int_sz*nf1, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_ver_v_u, &hmod->shape.comp[0].desc.ver.v->u, 3*int_sz*nf1, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_ver_v_x, &hmod->shape.comp[0].desc.ver.v->x, 3*dbl_sz*nf1, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_ver_v_n, &hmod->shape.comp[0].desc.ver.v->n, 3*dbl_sz*nf1, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_ver_v_af, &hmod->shape.comp[0].desc.ver.v->af, pint_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_ver_v_as, &hmod->shape.comp[0].desc.ver.v->as, pint_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_ver_v_afactor, &hmod->shape.comp[0].desc.ver.v->afactor, dpt_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_shape_comp_desc_ver_v_bfactor, &hmod->shape.comp[0].desc.ver.v->bfactor, dpt_sz, cudaMemcpyHostToDevice)); // } // // /*............................................................................................................*/ // /* Allocate mod->photo device pointers */ // /* NOTE: the following allocation and copying of mod->photo contents assumes nrl = nol = 0 or 1, but no more */ // /* First some helper variables for sizing */ // int nrl = hmod->photo.nradlaws; // int nol = hmod->photo.noptlaws; // int u_sz = sizeof(unsigned char); // int pho_sz = sizeof(struct photo_t); // int tab_sz = sizeof(struct tabular_t); // int rc_sz = sizeof(struct RC_t); // int rcpt_sz = sizeof(struct RC_t*); // int parptr_sz = sizeof(struct param_t*) * hmod->photo.radar->tabular.n; // int dblptr_sz = sizeof(double*) * hmod->photo.radar->tabular.n; // int qspc_sz = sizeof(struct quasispec_t); // int hyr_sz = sizeof(struct hybridradar_t); // int hrcs_sz = sizeof(struct harmcosine_t); // int incs_sz = sizeof(struct inhocosine_t); // int r_sz = sizeof(struct R_t); // int rpt_sz = sizeof(struct R_t*); // int hmR_sz = sizeof(struct harmR_t); // int inho_sz = sizeof(struct inhoR_t); // int hpk_sz = sizeof(struct hapke_t); // int phpk_sz = sizeof(struct hapke_t*); // int hmhpk_sz = sizeof(struct harmhapke_t); // int inhpk_sz = sizeof(struct inhohapke_t); // int kas_sz = sizeof(struct kaas_t); // int pkas_sz = sizeof(struct kaas_t*); // int hmkas_sz = sizeof(struct harmkaas_t); // int inkas_sz = sizeof(struct inhokaas_t); // // /* Allocate gpu copy of mod->photo and memcpy */ // gpuErrchk(cudaMalloc((void**)&dev_mod_photo, sizeof(hmod->photo))); // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_radtype, u_sz*nrl)); // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_opttype, u_sz*nol)); // gpuErrchk(cudaMemcpy(dev_mod_photo, &hmod->photo, pho_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_photo_radtype, &hmod->photo.radtype, u_sz*nrl, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_photo_opttype, &hmod->photo.opttype, u_sz*nol, cudaMemcpyHostToDevice)); // // /* Check for nrl and nol being 0 or 1 */ // if (nrl >= 1) { /* This checks that nradlaws is at least 1 */ // // if (nrl > 1) // printf("\nShape-cuda V1.0 currently supports only one optical and one radar law maximum."); // // /* Allocate gpu copy of mod->photo.radar and memcpy, depending on type */ // if (hmod->photo.radtype[0] == COSINELAW_DIFF) { // // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_radar_RC, rc_sz)); // gpuErrchk(cudaMemcpy(dev_mod_photo_radar_RC, &hmod->photo.radar->RC, rc_sz, cudaMemcpyHostToDevice)); // } // if (hmod->photo.radtype[0] == TABULARLAW) { // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_radar_tabular, tab_sz)); // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_radar_tabular_rho, parptr_sz)); // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_radar_tabular_rho_save, dblptr_sz)); // gpuErrchk(cudaMemcpy(dev_mod_photo_radar_tabular, &hmod->photo.radar->tabular, tab_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_photo_radar_tabular_rho, &hmod->photo.radar->tabular.rho, parptr_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_photo_radar_tabular_rho_save, &hmod->photo.radar->tabular.rho_save, dblptr_sz, cudaMemcpyHostToDevice)); // } // if (hmod->photo.radtype[0] == GAUSSIANLAW || hmod->photo.radtype[0] == HAGFORSLAW || hmod->photo.radtype[0] == COSINELAW_QS) { // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_radar_quasispec, qspc_sz)); // gpuErrchk(cudaMemcpy(dev_mod_photo_radar_quasispec, &hmod->photo.radar->quasispec, qspc_sz, cudaMemcpyHostToDevice)); // } // if (hmod->photo.radtype[0] == GAUSSIAN_COSINE || hmod->photo.radtype[0] == HAGFORS_COSINE || hmod->photo.radtype[0] == COSINE_COSINE) { // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_radar_hybrid, hyr_sz)); // gpuErrchk(cudaMemcpy(dev_mod_photo_radar_hybrid, &hmod->photo.radar->hybrid, hyr_sz, cudaMemcpyHostToDevice)); // } // if (hmod->photo.radtype[0] == HARMCOSINE_DIFF) { // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_radar_harmcosine, hrcs_sz)); // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_radar_harmcosine_local, rcpt_sz)); // so what is this - a [][] pointer of RC_t's. Per pixel? How many? // gpuErrchk(cudaMemcpy(dev_mod_photo_radar_harmcosine, &hmod->photo.radar->harmcosine, hrcs_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_photo_radar_harmcosine_local, &hmod->photo.radar->harmcosine.local, rcpt_sz, cudaMemcpyHostToDevice)); //dbl pointer // } // if (hmod->photo.radtype[0] == INHOCOSINE_DIFF) { // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_radar_inhocosine, incs_sz)); // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_radar_inhocosine_local, rcpt_sz)); // so what is this - a [][] pointer of RC_t's. Per pixel? How many? // gpuErrchk(cudaMemcpy(dev_mod_photo_radar_inhocosine, &hmod->photo.radar->inhocosine, incs_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_photo_radar_inhocosine_local, &hmod->photo.radar->inhocosine.local, rcpt_sz, cudaMemcpyHostToDevice)); //dbl pointer // } // } // // if (nol >= 1) {/* This checks that noptlaws is at least 1 */ // // if (nol > 1) // printf("\nShape-cuda V1.0 currently supports only one optical and one radar law maximum."); // // /* Allocate gpu copy of mod->photo.optical and memcpy, depending on type */ // if (hmod->photo.opttype[0] == GEOMETRICAL || hmod->photo.opttype[0] == LAMBERTLAW || hmod->photo.opttype[0] == LOMMEL) { // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_optical_R, r_sz)); // gpuErrchk(cudaMemcpy(dev_mod_photo_optical_R, &hmod->photo.optical->R, r_sz, cudaMemcpyHostToDevice)); // } // if (hmod->photo.opttype[0] == HARMLAMBERT || hmod->photo.opttype[0] == HARMLOMMEL) { // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_optical_harmR, hmR_sz)); // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_optical_harmR_local, rpt_sz)); // double pointer, but how many? // gpuErrchk(cudaMemcpy(dev_mod_photo_optical_harmR, &hmod->photo.optical->harmR, hmR_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_photo_optical_harmR_local, &hmod->photo.optical->harmR.local, rpt_sz, cudaMemcpyHostToDevice)); // } // if (hmod->photo.opttype[0] == INHOLAMBERT || hmod->photo.opttype[0] == INHOLOMMEL) { // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_optical_inhoR, inho_sz)); // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_optical_inhoR_local, rpt_sz)); // double pointer, but how many? // gpuErrchk(cudaMemcpy(dev_mod_photo_optical_inhoR, &hmod->photo.optical->inhoR, inho_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_photo_optical_inhoR_local, &hmod->photo.optical->inhoR.local, rpt_sz, cudaMemcpyHostToDevice)); // } // if (hmod->photo.opttype[0] == HAPKE) { // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_optical_hapke, hpk_sz)); // gpuErrchk(cudaMemcpy(dev_mod_photo_optical_hapke, &hmod->photo.optical->hapke, hpk_sz, cudaMemcpyHostToDevice)); // } // if (hmod->photo.opttype[0] == HARMHAPKE) { // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_optical_harmhapke, hmhpk_sz)); // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_optical_harmhapke_local, phpk_sz)); // double pointer, but how many? // gpuErrchk(cudaMemcpy(dev_mod_photo_optical_harmhapke, &hmod->photo.optical->harmhapke, hmhpk_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_photo_optical_harmhapke_local, &hmod->photo.optical->harmhapke.local, phpk_sz, cudaMemcpyHostToDevice)); // } // if (hmod->photo.opttype[0] == INHOHAPKE) { // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_optical_inhohapke, inhpk_sz)); // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_optical_inhohapke_local, phpk_sz)); // double pointer, but how many? // gpuErrchk(cudaMemcpy(dev_mod_photo_optical_inhohapke, &hmod->photo.optical->inhohapke, inhpk_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_photo_optical_inhohapke_local, &hmod->photo.optical->inhohapke.local, phpk_sz, cudaMemcpyHostToDevice)); // } // if (hmod->photo.opttype[0] == KAASALAINEN) { // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_optical_kaas, kas_sz)); // gpuErrchk(cudaMemcpy(dev_mod_photo_optical_kaas, &hmod->photo.optical->kaas, kas_sz, cudaMemcpyHostToDevice)); // } // if (hmod->photo.opttype[0] == HARMKAAS) { // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_optical_harmkaas, hmkas_sz)); // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_optical_harmkaas_local, pkas_sz)); // double pointer, but how many? // gpuErrchk(cudaMemcpy(dev_mod_photo_optical_harmkaas, &hmod->photo.optical->harmkaas, hmkas_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_photo_optical_harmkaas_local, &hmod->photo.optical->harmkaas.local, pkas_sz, cudaMemcpyHostToDevice)); // } // if (hmod->photo.opttype[0] == INHOKAAS) { // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_optical_inhokaas, inkas_sz)); // gpuErrchk(cudaMalloc((void**)&dev_mod_photo_optical_inhokaas_local, pkas_sz)); // double pointer, but how many? // gpuErrchk(cudaMemcpy(dev_mod_photo_optical_inhokaas, &hmod->photo.optical->inhokaas, inkas_sz, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(dev_mod_photo_optical_inhokaas_local, &hmod->photo.optical->inhokaas.local, pkas_sz, cudaMemcpyHostToDevice)); // } // } //}
d9b8347562da86490e33ce8ecd419bf87d43d8c6.hip
// !!! This is a file automatically generated by hipify!!! #include<iostream> #include"../cuda_note.h" int main(){ int dev_count = 0; hipError_t err = hipGetDeviceCount(&dev_count); if (err == hipSuccess) { platformInfo(&dev_count); } return 0; }
d9b8347562da86490e33ce8ecd419bf87d43d8c6.cu
#include<iostream> #include"../cuda_note.h" int main(){ int dev_count = 0; cudaError_t err = cudaGetDeviceCount(&dev_count); if (err == cudaSuccess) { platformInfo(&dev_count); } return 0; }
38ed3af8664964a1518df78092420f0daac044ef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMathBlas.cu" #else #define ERROR_ONLY_FP_TYPES(func) \ THError("%s for CUDA tensors only supports floating-point types. Try converting the tensors with .float()", func); THC_API accreal THCTensor_(dot)(THCState *state, THCTensor *self, THCTensor *src) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); THArgCheck(THCTensor_(nElement)(state, self) == THCTensor_(nElement)(state, src), 2, "sizes do not match"); self = THCTensor_(newContiguous)(state, self); src = THCTensor_(newContiguous)(state, src); #ifdef THC_REAL_IS_FLOAT accreal result = THCudaBlas_Sdot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #elif defined(THC_REAL_IS_DOUBLE) accreal result = THCudaBlas_Ddot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #elif defined(THC_REAL_IS_HALF) accreal result = ScalarConvert<half, accreal>::to( THCudaBlas_Hdot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1)); #endif THCTensor_(free)(state, src); THCTensor_(free)(state, self); return result; #else ERROR_ONLY_FP_TYPES("dot"); return ScalarConvert<int, accreal>::to(0); #endif } THC_API void THCTensor_(addmv)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *mat, THCTensor *vec) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, mat, vec)); if( (mat->nDimension != 2) || (vec->nDimension != 1) ) THError("matrix and vector expected"); if( mat->size[1] != vec->size[0] ) THError("size mismatch"); if(t->nDimension != 1) THError("size mismatch"); if(t->size[0] != mat->size[0]) THError("size mismatch"); #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) if(r_ != t) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } if(mat->stride[0] == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 'n', mat->size[0], mat->size[1], alpha, THCTensor_(data)(state, mat), mat->stride[1], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 'n', mat->size[0], mat->size[1], alpha, THCTensor_(data)(state, mat), mat->stride[1], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #endif } else if(mat->stride[1] == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 't', mat->size[1], mat->size[0], alpha, THCTensor_(data)(state, mat), mat->stride[0], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 't', mat->size[1], mat->size[0], alpha, THCTensor_(data)(state, mat), mat->stride[0], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #endif } else { THCTensor *cmat = THCTensor_(newContiguous)(state, mat); #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 't', mat->size[1], mat->size[0], alpha, THCTensor_(data)(state, cmat), cmat->stride[0], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 't', mat->size[1], mat->size[0], alpha, THCTensor_(data)(state, cmat), cmat->stride[0], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #endif THCTensor_(free)(state, cmat); } #elif defined(THC_REAL_IS_HALF) // Currently no Hgemv/SgemvEx in Cublas THCTensor *vecAsMatrix = THCTensor_(newWithTensor)(state, vec); THCTensor_(resize2d)(state, vecAsMatrix, vecAsMatrix->size[0], 1); THCTensor *tAsMatrix = THCTensor_(newWithTensor)(state, t); THCTensor_(resize2d)(state, tAsMatrix, tAsMatrix->size[0], 1); THCTensor_(addmm)(state, r_, beta, tAsMatrix, alpha, mat, vecAsMatrix); // r_ will have answer as matrix, need to return a vector THCTensor_(resize1d)(state, r_, r_->size[0]); THCTensor_(free)(state, vecAsMatrix); THCTensor_(free)(state, tAsMatrix); #endif #else ERROR_ONLY_FP_TYPES("addmv"); #endif } THC_API void THCTensor_(addr)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *vec1, THCTensor *vec2) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, vec1, vec2)); if ( (vec1->nDimension != 1) || (vec2->nDimension != 1) ) { THError("vector and vector expected"); } if (t->nDimension != 2) { THError("size mismatch"); } if ( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) ) { THError("size mismatch"); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) if (r_ != t) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } if(THCNumerics<real>::eq(beta, ScalarConvert<int, real>::to(0))) { THCTensor_(zero)(state, r_); } else if(THCNumerics<real>::ne(beta, ScalarConvert<int, real>::to(1))) { THCTensor_(mul)(state, r_, r_, beta); } if(r_->stride[0] == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec1->size[0], vec2->size[0], alpha, THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, r_), r_->stride[1]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec1->size[0], vec2->size[0], alpha, THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, r_), r_->stride[1]); #endif } else if(r_->stride[1] == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec2->size[0], vec1->size[0], alpha, THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, r_), r_->stride[0]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec2->size[0], vec1->size[0], alpha, THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, r_), r_->stride[0]); #endif } else { THCTensor *cr = THCTensor_(newClone)(state, r_); #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec2->size[0], vec1->size[0], alpha, THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, cr), cr->stride[0]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec2->size[0], vec1->size[0], alpha, THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, cr), cr->stride[0]); #endif THCTensor_(freeCopyTo)(state, cr, r_); } #elif defined(THC_REAL_IS_HALF) // currently no Hger/SgerEx in Cublas. THCTensor *vec2T = THCTensor_(newWithTensor)(state, vec2); THCTensor_(resize2d)(state, vec2T, vec2T->size[0], 1); THCTensor_(transpose)(state, vec2T, NULL, 0, 1); THCTensor *vec1M = THCTensor_(newWithTensor)(state, vec1); THCTensor_(resize2d)(state, vec1M, vec1M->size[0], 1); THCTensor_(addmm)(state, r_, beta, t, alpha, vec1M, vec2T); THCTensor_(free)(state, vec2T); THCTensor_(free)(state, vec1M); #endif #else ERROR_ONLY_FP_TYPES("addr"); #endif } THC_API void THCTensor_(addmm)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *m1, THCTensor *m2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, m1, m2)); char transpose_r, transpose_m1, transpose_m2; THCTensor *r__, *m1_, *m2_; if( (m1->nDimension != 2) || (m2->nDimension != 2) ) THError("matrices expected, got %dD, %dD tensors", m1->nDimension, m2->nDimension); if(t->nDimension != 2) THError("matrix expected, got %dD tensor for t", t->nDimension); if(m1->size[1] != m2->size[0]) { THCDescBuff bm1 = THCTensor_(sizeDesc)(state, m1); THCDescBuff bm2 = THCTensor_(sizeDesc)(state, m2); THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str); } if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) ) { THCDescBuff bt = THCTensor_(sizeDesc)(state, t); THCDescBuff bm1 = THCTensor_(sizeDesc)(state, m1); THCDescBuff bm2 = THCTensor_(sizeDesc)(state, m2); THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str); } if(t != r_) { THCTensor_(resizeAs)(state, r_, t); if (ScalarConvert<real, double>::to(beta) != 0.0) { THCTensor_(copy)(state, r_, t); } } /* r_ */ if(r_->stride[0] == 1 && r_->stride[1] != 0) { transpose_r = 'n'; r__ = r_; } else if(r_->stride[1] == 1 && r_->stride[0] != 0) { THCTensor *swap = m2; m2 = m1; m1 = swap; transpose_r = 't'; r__ = r_; } else { transpose_r = 'n'; THCTensor *transp_r_ = THCTensor_(newTranspose)(state, r_, 0, 1); r__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, r__, NULL, 0, 1); } /* m1 */ if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m1->stride[(transpose_r == 'n' ? 1 : 0)] != 0) { transpose_m1 = 'n'; m1_ = m1; } else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m1->stride[(transpose_r == 'n' ? 0 : 1)] != 0) { transpose_m1 = 't'; m1_ = m1; } else { transpose_m1 = (transpose_r == 'n' ? 't' : 'n'); m1_ = THCTensor_(newContiguous)(state, m1); } /* m2 */ if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m2->stride[(transpose_r == 'n' ? 1 : 0)] != 0) { transpose_m2 = 'n'; m2_ = m2; } else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m2->stride[(transpose_r == 'n' ? 0 : 1)] != 0) { transpose_m2 = 't'; m2_ = m2; } else { transpose_m2 = (transpose_r == 'n' ? 't' : 'n'); m2_ = THCTensor_(newContiguous)(state, m2); } #ifdef THC_REAL_IS_HALF THCudaBlas_Hgemm(state, transpose_m1, transpose_m2, r__->size[(transpose_r == 'n' ? 0 : 1)], r__->size[(transpose_r == 'n' ? 1 : 0)], m1_->size[(transpose_r == 'n' ? 1 : 0)], alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]), beta, THCTensor_(data)(state, r__), r__->stride[(transpose_r == 'n' ? 1 : 0)]); #elif defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgemm(state, transpose_m1, transpose_m2, r__->size[(transpose_r == 'n' ? 0 : 1)], r__->size[(transpose_r == 'n' ? 1 : 0)], m1_->size[(transpose_r == 'n' ? 1 : 0)], alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]), beta, THCTensor_(data)(state, r__), r__->stride[(transpose_r == 'n' ? 1 : 0)]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemm(state, transpose_m1, transpose_m2, r__->size[(transpose_r == 'n' ? 0 : 1)], r__->size[(transpose_r == 'n' ? 1 : 0)], m1_->size[(transpose_r == 'n' ? 1 : 0)], alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]), beta, THCTensor_(data)(state, r__), r__->stride[(transpose_r == 'n' ? 1 : 0)]); #endif /* free intermediate variables */ if(m1_ != m1) { THCTensor_(free)(state, m1_); } if(m2_ != m2) { THCTensor_(free)(state, m2_); } if(r__ != r_) { THCTensor_(freeCopyTo)(state, r__, r_); } #else ERROR_ONLY_FP_TYPES("addmm"); #endif } THC_API void THCTensor_(addbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, real alpha, THCTensor *batch1, THCTensor *batch2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimension)(state, t) == 2, 4, "expected 2D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch1) == 3, 6, "expected 3D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch2) == 3, 7, "expected 3D tensor"); int64_t batchnum = THCTensor_(size)(state, batch1, 0); int64_t m1d1 = THCTensor_(size)(state, batch1, 1); int64_t innerdim = THCTensor_(size)(state, batch1, 2); int64_t m2d2 = THCTensor_(size)(state, batch2, 2); THArgCheck(batchnum == THCTensor_(size)(state, batch2, 0), 7, "equal number of batches expected"); // M is t, as listed in the docs under addbmm THArgCheck(m1d1 == THCTensor_(size)(state, t, 0), 6, "first dimension must match first dimension of M"); THArgCheck(m2d2 == THCTensor_(size)(state, t, 1), 7, "second dimension must match second dimension of M"); THArgCheck(innerdim == THCTensor_(size)(state, batch2, 1), 6, "second dimension must match first dimension of batch2"); if (t != result) { THCTensor_(resizeAs)(state, result, t); if (ScalarConvert<real, double>::to(beta) != 0.0) { THCTensor_(copy)(state, result, t); } } THCTensor *slice1 = THCTensor_(new)(state); THCTensor *slice2 = THCTensor_(new)(state); for (int64_t i=0; i<batchnum; i++) { THCTensor_(select)(state, slice1, batch1, 0, i); THCTensor_(select)(state, slice2, batch2, 0, i); THCTensor_(addmm)(state, result, beta, result, alpha, slice1, slice2); beta = ScalarConvert<int, real>::to(1); } THCTensor_(free)(state, slice1); THCTensor_(free)(state, slice2); #else ERROR_ONLY_FP_TYPES("addbmm"); #endif } __global__ void createBatchGemmBuffer(const real** buffer, real* data, int64_t stride, int64_t num_batches) { const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { buffer[idx] = data + idx * stride; } } __global__ void createBatchGemmBuffer3(const real** buffer1, const real ** buffer2, const real ** buffer3, real* data1, real * data2, real * data3, int64_t stride1, int64_t stride2, int64_t stride3, int64_t num_batches) { const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { buffer1[idx] = data1 + idx * stride1; buffer2[idx] = data2 + idx * stride2; buffer3[idx] = data3 + idx * stride3; } } THC_API void THCTensor_(baddbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, real alpha, THCTensor *batch1, THCTensor *batch2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimension)(state, t) == 3, 4, "expected 3D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch1) == 3, 6, "expected 3D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch2) == 3, 7, "expected 3D tensor"); THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch1, 0), 6, "equal number of batches expected"); THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch2, 0), 7, "equal number of batches expected"); THArgCheck(THCTensor_(size)(state, t, 1) == THCTensor_(size)(state, batch1, 1), 6, "wrong matrix size"); THArgCheck(THCTensor_(size)(state, t, 2) == THCTensor_(size)(state, batch2, 2), 7, "wrong matrix size"); THArgCheck(THCTensor_(size)(state, batch1, 2) == THCTensor_(size)(state, batch2, 1), 6, "wrong matrix size"); if (t != result) { THCTensor_(resizeAs)(state, result, t); if (ScalarConvert<real, double>::to(beta) != 0.0) { THCTensor_(copy)(state, result, t); } } bool transpose_result; char transpose_batch1, transpose_batch2; int64_t lda, ldb, ldc; THCTensor *result_, *batch1_, *batch2_; if (result->stride[1] == 1) { transpose_result = false; result_ = result; ldc = result_->stride[2]; } else if (result->stride[2] == 1) { transpose_result = true; THCTensor *swap = batch2; batch2 = batch1; batch1 = swap; result_ = result; ldc = result_->stride[1]; } else { transpose_result = false; THCTensor *transp_r_ = THCTensor_(newTranspose)(state, result, 1, 2); result_ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, result_, NULL, 1, 2); ldc = result_->stride[2]; } if (batch1->stride[transpose_result ? 2 : 1] == 1 && batch1->stride[transpose_result ? 1 : 2] != 0) { transpose_batch1 = 'n'; batch1_ = batch1; lda = batch1_->stride[transpose_result ? 1 : 2]; } else if (batch1->stride[transpose_result ? 1 : 2] == 1 && batch1->stride[transpose_result ? 2 : 1] != 0) { transpose_batch1 = 't'; batch1_ = batch1; lda = batch1_->stride[transpose_result ? 2 : 1]; } else { transpose_batch1 = transpose_result ? 'n' : 't'; batch1_ = THCTensor_(newContiguous)(state, batch1); lda = batch1_->stride[1]; } if (batch2->stride[transpose_result ? 2 : 1] == 1 && batch2->stride[transpose_result ? 1 : 2] != 0) { transpose_batch2 = 'n'; batch2_ = batch2; ldb = batch2_->stride[transpose_result ? 1 : 2]; } else if (batch2->stride[transpose_result ? 1 : 2] == 1 && batch2->stride[transpose_result ? 2 : 1] != 0) { transpose_batch2 = 't'; batch2_ = batch2; ldb = batch2_->stride[transpose_result ? 2 : 1]; } else { transpose_batch2 = transpose_result ? 'n' : 't'; batch2_ = THCTensor_(newContiguous)(state, batch2); ldb = batch2_->stride[1]; } int64_t num_batches = result_->size[0]; #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) // Compute pointers to matrices in each batch. #if TORCH_HIP_VERSION < 8000 size_t matrices_size = num_batches * sizeof(real*); // Copy pointers to device. const real **d_matrices1, **d_matrices2; real **d_result_matrices; THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1, matrices_size)); THCudaCheck(THCudaMalloc(state, (void**)&d_matrices2, matrices_size)); THCudaCheck(THCudaMalloc(state, (void**)&d_result_matrices, matrices_size)); const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; hipLaunchKernelGGL(( createBatchGemmBuffer3), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), d_matrices1, d_matrices2, (const real**)d_result_matrices, THCTensor_(data)(state, batch1_), THCTensor_(data)(state, batch2_), THCTensor_(data)(state, result_), batch1_->stride[0], batch2_->stride[0], result_->stride[0], num_batches); #ifdef THC_REAL_IS_FLOAT THCudaBlas_SgemmBatched( state, transpose_batch1, transpose_batch2, result_->size[transpose_result ? 2 : 1], result_->size[transpose_result ? 1 : 2], batch1_->size[transpose_result ? 1 : 2], alpha, d_matrices1, lda, d_matrices2, ldb, beta, d_result_matrices, ldc, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_DgemmBatched( state, transpose_batch1, transpose_batch2, result_->size[transpose_result ? 2 : 1], result_->size[transpose_result ? 1 : 2], batch1_->size[transpose_result ? 1 : 2], alpha, d_matrices1, lda, d_matrices2, ldb, beta, d_result_matrices, ldc, num_batches); #endif //THC_REAL THCudaFree(state, d_matrices1); THCudaFree(state, d_matrices2); THCudaFree(state, d_result_matrices); #else #ifdef THC_REAL_IS_FLOAT THCudaBlas_SgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size[transpose_result ? 2 : 1], result_->size[transpose_result ? 1 : 2], batch1_->size[transpose_result ? 1 : 2], alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride[0], THCTensor_(data)(state, batch2_), ldb, batch2_->stride[0], beta, THCTensor_(data)(state, result_), ldc, result_->stride[0], num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_DgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size[transpose_result ? 2 : 1], result_->size[transpose_result ? 1 : 2], batch1_->size[transpose_result ? 1 : 2], alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride[0], THCTensor_(data)(state, batch2_), ldb, batch2_->stride[0], beta, THCTensor_(data)(state, result_), ldc, result_->stride[0], num_batches); #endif //THC_REAL #endif //TORCH_HIP_VERSION #elif defined(THC_REAL_IS_HALF) #if TORCH_HIP_VERSION < 9010 // Currently no HgemmBatched in Cublas for (int64_t i = 0; i < num_batches; ++i) { THCudaBlas_Hgemm( state, transpose_batch1, transpose_batch2, result_->size[transpose_result ? 2 : 1], result_->size[transpose_result ? 1 : 2], batch1_->size[transpose_result ? 1 : 2], alpha, THCTensor_(data)(state, batch1_) + i * batch1_->stride[0], lda, THCTensor_(data)(state, batch2_) + i * batch2_->stride[0], ldb, beta, THCTensor_(data)(state, result_) + i * result_->stride[0], ldc); } #else hipDeviceProp_t* prop = THCState_getCurrentDeviceProperties(state); if (prop->major >= 5){ THCudaBlas_HgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size[transpose_result ? 2 : 1], result_->size[transpose_result ? 1 : 2], batch1_->size[transpose_result ? 1 : 2], alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride[0], THCTensor_(data)(state, batch2_), ldb, batch2_->stride[0], beta, THCTensor_(data)(state, result_), ldc, result_->stride[0], num_batches); } else { for (int64_t i = 0; i < num_batches; ++i) { THCudaBlas_Hgemm( state, transpose_batch1, transpose_batch2, result_->size[transpose_result ? 2 : 1], result_->size[transpose_result ? 1 : 2], batch1_->size[transpose_result ? 1 : 2], alpha, THCTensor_(data)(state, batch1_) + i * batch1_->stride[0], lda, THCTensor_(data)(state, batch2_) + i * batch2_->stride[0], ldb, beta, THCTensor_(data)(state, result_) + i * result_->stride[0], ldc); } } #endif #endif if (batch1_ != batch1) { THCTensor_(free)(state, batch1_); } if (batch2_ != batch2) { THCTensor_(free)(state, batch2_); } if (result_ != result) { THCTensor_(freeCopyTo)(state, result_, result); } #else ERROR_ONLY_FP_TYPES("baddbmm"); #endif } THC_API void THCTensor_(btrifact)(THCState *state, THCTensor *ra_, THCudaIntTensor *rpivots_, THCudaIntTensor *rinfo_, int pivot, THCTensor *a) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THAssert(THCTensor_(checkGPU)(state, 2, ra_, a)); THArgCheck(THCTensor_(nDimension)(state, a) == 3, 3, "expected 3D tensor"); THArgCheck(THCTensor_(size)(state, a, 1) == THCTensor_(size)(state, a, 2), 3, "matrices must be square"); if (ra_ != a) { THCTensor_(resizeAs)(state, ra_, a); // not sure if this is kosher, but things are nicer if we return in column major if (ra_->stride[0] == 1) { THCTensor_(transpose)(state, ra_, NULL, 1, 0); } else if (ra_->stride[2] == 1) { THCTensor_(transpose)(state, ra_, NULL, 1, 2); } THCTensor_(copy)(state, ra_, a); } int n = a->size[1]; int lda; THCTensor *ra__; if (ra_->stride[1] == 1) { // column ordered, what BLAS wants lda = ra_->stride[2]; ra__ = ra_; } else { // not column ordered, need to make it such (requires copy) THCTensor *transp_r_ = THCTensor_(newTranspose)(state, ra_, 1, 2); ra__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, ra__, NULL, 1, 2); lda = ra__->stride[2]; } int64_t num_batches = ra__->size[0]; if (!pivot) { THCudaIntTensor *t = THCudaIntTensor_new(state); THCudaIntTensor_range(state, t, 1, n, 1); THCudaIntTensor_unsqueeze1d(state, t, t, 0); THCudaIntTensor** ptrs = (THCudaIntTensor**) THAlloc(sizeof(THCudaIntTensor*)*num_batches); for (int64_t i=0; i<num_batches; i++) { ptrs[i] = t; } THCudaIntTensor_catArray(state, rpivots_, ptrs, num_batches, 0); THCudaIntTensor_free(state, t); THFree(ptrs); } else { THCudaIntTensor_resize2d(state, rpivots_, num_batches, n); } bool free_rinfo_ = !rinfo_; if (rinfo_ == NULL) rinfo_ = THCudaIntTensor_new(state); THCudaIntTensor_resize1d(state, rinfo_, num_batches); int *info_gpu = THCudaIntTensor_data(state, rinfo_); // Copy pointers to device. real **d_result; size_t matrices_size = num_batches * sizeof(real*); THCudaCheck(THCudaMalloc(state, (void**)&d_result, matrices_size)); const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; hipLaunchKernelGGL(( createBatchGemmBuffer), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), (const real**)d_result, THCTensor_(data)(state, ra__), ra__->stride[0], num_batches); int *pivots_gpu = NULL; if (pivot) { pivots_gpu = THCudaIntTensor_data(state, rpivots_); } #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgetrf(state, n, d_result, lda, pivots_gpu, info_gpu, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgetrf(state, n, d_result, lda, pivots_gpu, info_gpu, num_batches); #endif THCudaFree(state, d_result); if (ra__ != ra_) { THCTensor_(freeCopyTo)(state, ra__, ra_); } if (free_rinfo_) { real min = THCudaIntTensor_minall(state, rinfo_); real max = THCudaIntTensor_maxall(state, rinfo_); THCudaIntTensor_free(state, rinfo_); if (min != 0 || max != 0) { THError("failed to factorize some batch elements (min info == %d, max info == %d)", min, max); } } #else THError("btrifact for CUDA tensors is only supported for floats and doubles"); #endif } THC_API void THCTensor_(btrisolve)(THCState *state, THCTensor *rb_, THCTensor *b, THCTensor *atf, THCudaIntTensor *pivots) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THAssert(THCTensor_(checkGPU)(state, 3, rb_, atf, b)); THArgCheck(THCTensor_(nDimension)(state, atf) == 3, 3, "expected 3D tensor"); THArgCheck(THCTensor_(nDimension)(state, b) == 3 || THCTensor_(nDimension)(state, b) == 2, 4, "expected 2D or 3D tensor"); THArgCheck(THCTensor_(size)(state, atf, 0) == THCTensor_(size)(state, b, 0), 3, "number of batches must be equal"); THArgCheck(THCTensor_(size)(state, atf, 1) == THCTensor_(size)(state, atf, 2), 3, "A matrices must be square"); THArgCheck(THCTensor_(size)(state, atf, 1) == THCTensor_(size)(state, b, 1), 3, "dimensions of A and b must be equal"); if (rb_ != b) { THCTensor_(resizeAs)(state, rb_, b); THCTensor_(copy)(state, rb_, b); } int n = atf->size[1]; int nrhs = rb_->nDimension > 2 ? rb_->size[2] : 1; THCTensor *atf_; THCTensor *rb__; int lda, ldb; // correct ordering of A_tf if (atf->stride[1] == 1) { // column ordered, what BLAS wants lda = atf->stride[2]; atf_ = atf; } else { // not column ordered, need to make it such (requires copy) // it would be nice if we could use the op(A) flags to automatically // transpose A if needed, but this leads to unpredictable behavior if the // user clones A_tf later with a different ordering THCTensor *transp_r_ = THCTensor_(newTranspose)(state, atf, 1, 2); atf_ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, atf_, NULL, 1, 2); lda = atf_->stride[2]; } // correct ordering of B if (rb_->stride[1] == 1) { // column ordered if (rb_->nDimension == 2 || rb_->size[2] == 1) { ldb = n; } else { ldb = rb_->stride[2]; } rb__ = rb_; } else { // make column ordered if (rb_->nDimension > 2) { THCTensor *transp_r_ = THCTensor_(newTranspose)(state, rb_, 1, 2); rb__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, rb__, NULL, 1, 2); ldb = rb__->stride[2]; } else { rb__ = THCTensor_(newClone)(state, rb_); ldb = n; } } int64_t num_batches = rb_->size[0]; size_t matrices_size = num_batches * sizeof(real*); // Copy pointers to device. real **d_result; const real **d_atf; THCudaCheck(THCudaMalloc(state, (void**)&d_result, matrices_size)); THCudaCheck(THCudaMalloc(state, (void**)&d_atf, matrices_size)); const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; hipLaunchKernelGGL(( createBatchGemmBuffer), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), (const real**)d_result, THCTensor_(data)(state, rb__), rb__->stride[0], num_batches); hipLaunchKernelGGL(( createBatchGemmBuffer), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), d_atf, THCTensor_(data)(state, atf_), atf_->stride[0], num_batches); if (!THCudaIntTensor_isContiguous(state, pivots)) { THError("Error: pivots is not contiguous."); } int *pivots_data = THCudaIntTensor_data(state, pivots); int info; #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgetrs(state, 'n', n, nrhs, d_atf, lda, pivots_data, d_result, ldb, &info, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgetrs(state, 'n', n, nrhs, d_atf, lda, pivots_data, d_result, ldb, &info, num_batches); #endif if (info < 0) { THError("Illegal arg %d", -info); } THCudaFree(state, d_result); THCudaFree(state, d_atf); if (atf_ != atf) { THCTensor_(free)(state, atf_); } if (rb__ != rb_) { THCTensor_(freeCopyTo)(state, rb__, rb_); } #else THError("btrisolve for CUDA tensors is only supported for floats and doubles"); #endif } #endif
38ed3af8664964a1518df78092420f0daac044ef.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMathBlas.cu" #else #define ERROR_ONLY_FP_TYPES(func) \ THError("%s for CUDA tensors only supports floating-point types. Try converting the tensors with .float()", func); THC_API accreal THCTensor_(dot)(THCState *state, THCTensor *self, THCTensor *src) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); THArgCheck(THCTensor_(nElement)(state, self) == THCTensor_(nElement)(state, src), 2, "sizes do not match"); self = THCTensor_(newContiguous)(state, self); src = THCTensor_(newContiguous)(state, src); #ifdef THC_REAL_IS_FLOAT accreal result = THCudaBlas_Sdot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #elif defined(THC_REAL_IS_DOUBLE) accreal result = THCudaBlas_Ddot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #elif defined(THC_REAL_IS_HALF) accreal result = ScalarConvert<half, accreal>::to( THCudaBlas_Hdot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1)); #endif THCTensor_(free)(state, src); THCTensor_(free)(state, self); return result; #else ERROR_ONLY_FP_TYPES("dot"); return ScalarConvert<int, accreal>::to(0); #endif } THC_API void THCTensor_(addmv)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *mat, THCTensor *vec) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, mat, vec)); if( (mat->nDimension != 2) || (vec->nDimension != 1) ) THError("matrix and vector expected"); if( mat->size[1] != vec->size[0] ) THError("size mismatch"); if(t->nDimension != 1) THError("size mismatch"); if(t->size[0] != mat->size[0]) THError("size mismatch"); #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) if(r_ != t) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } if(mat->stride[0] == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 'n', mat->size[0], mat->size[1], alpha, THCTensor_(data)(state, mat), mat->stride[1], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 'n', mat->size[0], mat->size[1], alpha, THCTensor_(data)(state, mat), mat->stride[1], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #endif } else if(mat->stride[1] == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 't', mat->size[1], mat->size[0], alpha, THCTensor_(data)(state, mat), mat->stride[0], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 't', mat->size[1], mat->size[0], alpha, THCTensor_(data)(state, mat), mat->stride[0], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #endif } else { THCTensor *cmat = THCTensor_(newContiguous)(state, mat); #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 't', mat->size[1], mat->size[0], alpha, THCTensor_(data)(state, cmat), cmat->stride[0], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 't', mat->size[1], mat->size[0], alpha, THCTensor_(data)(state, cmat), cmat->stride[0], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #endif THCTensor_(free)(state, cmat); } #elif defined(THC_REAL_IS_HALF) // Currently no Hgemv/SgemvEx in Cublas THCTensor *vecAsMatrix = THCTensor_(newWithTensor)(state, vec); THCTensor_(resize2d)(state, vecAsMatrix, vecAsMatrix->size[0], 1); THCTensor *tAsMatrix = THCTensor_(newWithTensor)(state, t); THCTensor_(resize2d)(state, tAsMatrix, tAsMatrix->size[0], 1); THCTensor_(addmm)(state, r_, beta, tAsMatrix, alpha, mat, vecAsMatrix); // r_ will have answer as matrix, need to return a vector THCTensor_(resize1d)(state, r_, r_->size[0]); THCTensor_(free)(state, vecAsMatrix); THCTensor_(free)(state, tAsMatrix); #endif #else ERROR_ONLY_FP_TYPES("addmv"); #endif } THC_API void THCTensor_(addr)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *vec1, THCTensor *vec2) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, vec1, vec2)); if ( (vec1->nDimension != 1) || (vec2->nDimension != 1) ) { THError("vector and vector expected"); } if (t->nDimension != 2) { THError("size mismatch"); } if ( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) ) { THError("size mismatch"); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) if (r_ != t) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } if(THCNumerics<real>::eq(beta, ScalarConvert<int, real>::to(0))) { THCTensor_(zero)(state, r_); } else if(THCNumerics<real>::ne(beta, ScalarConvert<int, real>::to(1))) { THCTensor_(mul)(state, r_, r_, beta); } if(r_->stride[0] == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec1->size[0], vec2->size[0], alpha, THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, r_), r_->stride[1]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec1->size[0], vec2->size[0], alpha, THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, r_), r_->stride[1]); #endif } else if(r_->stride[1] == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec2->size[0], vec1->size[0], alpha, THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, r_), r_->stride[0]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec2->size[0], vec1->size[0], alpha, THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, r_), r_->stride[0]); #endif } else { THCTensor *cr = THCTensor_(newClone)(state, r_); #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec2->size[0], vec1->size[0], alpha, THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, cr), cr->stride[0]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec2->size[0], vec1->size[0], alpha, THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, cr), cr->stride[0]); #endif THCTensor_(freeCopyTo)(state, cr, r_); } #elif defined(THC_REAL_IS_HALF) // currently no Hger/SgerEx in Cublas. THCTensor *vec2T = THCTensor_(newWithTensor)(state, vec2); THCTensor_(resize2d)(state, vec2T, vec2T->size[0], 1); THCTensor_(transpose)(state, vec2T, NULL, 0, 1); THCTensor *vec1M = THCTensor_(newWithTensor)(state, vec1); THCTensor_(resize2d)(state, vec1M, vec1M->size[0], 1); THCTensor_(addmm)(state, r_, beta, t, alpha, vec1M, vec2T); THCTensor_(free)(state, vec2T); THCTensor_(free)(state, vec1M); #endif #else ERROR_ONLY_FP_TYPES("addr"); #endif } THC_API void THCTensor_(addmm)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *m1, THCTensor *m2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, m1, m2)); char transpose_r, transpose_m1, transpose_m2; THCTensor *r__, *m1_, *m2_; if( (m1->nDimension != 2) || (m2->nDimension != 2) ) THError("matrices expected, got %dD, %dD tensors", m1->nDimension, m2->nDimension); if(t->nDimension != 2) THError("matrix expected, got %dD tensor for t", t->nDimension); if(m1->size[1] != m2->size[0]) { THCDescBuff bm1 = THCTensor_(sizeDesc)(state, m1); THCDescBuff bm2 = THCTensor_(sizeDesc)(state, m2); THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str); } if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) ) { THCDescBuff bt = THCTensor_(sizeDesc)(state, t); THCDescBuff bm1 = THCTensor_(sizeDesc)(state, m1); THCDescBuff bm2 = THCTensor_(sizeDesc)(state, m2); THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str); } if(t != r_) { THCTensor_(resizeAs)(state, r_, t); if (ScalarConvert<real, double>::to(beta) != 0.0) { THCTensor_(copy)(state, r_, t); } } /* r_ */ if(r_->stride[0] == 1 && r_->stride[1] != 0) { transpose_r = 'n'; r__ = r_; } else if(r_->stride[1] == 1 && r_->stride[0] != 0) { THCTensor *swap = m2; m2 = m1; m1 = swap; transpose_r = 't'; r__ = r_; } else { transpose_r = 'n'; THCTensor *transp_r_ = THCTensor_(newTranspose)(state, r_, 0, 1); r__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, r__, NULL, 0, 1); } /* m1 */ if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m1->stride[(transpose_r == 'n' ? 1 : 0)] != 0) { transpose_m1 = 'n'; m1_ = m1; } else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m1->stride[(transpose_r == 'n' ? 0 : 1)] != 0) { transpose_m1 = 't'; m1_ = m1; } else { transpose_m1 = (transpose_r == 'n' ? 't' : 'n'); m1_ = THCTensor_(newContiguous)(state, m1); } /* m2 */ if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m2->stride[(transpose_r == 'n' ? 1 : 0)] != 0) { transpose_m2 = 'n'; m2_ = m2; } else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m2->stride[(transpose_r == 'n' ? 0 : 1)] != 0) { transpose_m2 = 't'; m2_ = m2; } else { transpose_m2 = (transpose_r == 'n' ? 't' : 'n'); m2_ = THCTensor_(newContiguous)(state, m2); } #ifdef THC_REAL_IS_HALF THCudaBlas_Hgemm(state, transpose_m1, transpose_m2, r__->size[(transpose_r == 'n' ? 0 : 1)], r__->size[(transpose_r == 'n' ? 1 : 0)], m1_->size[(transpose_r == 'n' ? 1 : 0)], alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]), beta, THCTensor_(data)(state, r__), r__->stride[(transpose_r == 'n' ? 1 : 0)]); #elif defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgemm(state, transpose_m1, transpose_m2, r__->size[(transpose_r == 'n' ? 0 : 1)], r__->size[(transpose_r == 'n' ? 1 : 0)], m1_->size[(transpose_r == 'n' ? 1 : 0)], alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]), beta, THCTensor_(data)(state, r__), r__->stride[(transpose_r == 'n' ? 1 : 0)]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemm(state, transpose_m1, transpose_m2, r__->size[(transpose_r == 'n' ? 0 : 1)], r__->size[(transpose_r == 'n' ? 1 : 0)], m1_->size[(transpose_r == 'n' ? 1 : 0)], alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]), beta, THCTensor_(data)(state, r__), r__->stride[(transpose_r == 'n' ? 1 : 0)]); #endif /* free intermediate variables */ if(m1_ != m1) { THCTensor_(free)(state, m1_); } if(m2_ != m2) { THCTensor_(free)(state, m2_); } if(r__ != r_) { THCTensor_(freeCopyTo)(state, r__, r_); } #else ERROR_ONLY_FP_TYPES("addmm"); #endif } THC_API void THCTensor_(addbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, real alpha, THCTensor *batch1, THCTensor *batch2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimension)(state, t) == 2, 4, "expected 2D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch1) == 3, 6, "expected 3D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch2) == 3, 7, "expected 3D tensor"); int64_t batchnum = THCTensor_(size)(state, batch1, 0); int64_t m1d1 = THCTensor_(size)(state, batch1, 1); int64_t innerdim = THCTensor_(size)(state, batch1, 2); int64_t m2d2 = THCTensor_(size)(state, batch2, 2); THArgCheck(batchnum == THCTensor_(size)(state, batch2, 0), 7, "equal number of batches expected"); // M is t, as listed in the docs under addbmm THArgCheck(m1d1 == THCTensor_(size)(state, t, 0), 6, "first dimension must match first dimension of M"); THArgCheck(m2d2 == THCTensor_(size)(state, t, 1), 7, "second dimension must match second dimension of M"); THArgCheck(innerdim == THCTensor_(size)(state, batch2, 1), 6, "second dimension must match first dimension of batch2"); if (t != result) { THCTensor_(resizeAs)(state, result, t); if (ScalarConvert<real, double>::to(beta) != 0.0) { THCTensor_(copy)(state, result, t); } } THCTensor *slice1 = THCTensor_(new)(state); THCTensor *slice2 = THCTensor_(new)(state); for (int64_t i=0; i<batchnum; i++) { THCTensor_(select)(state, slice1, batch1, 0, i); THCTensor_(select)(state, slice2, batch2, 0, i); THCTensor_(addmm)(state, result, beta, result, alpha, slice1, slice2); beta = ScalarConvert<int, real>::to(1); } THCTensor_(free)(state, slice1); THCTensor_(free)(state, slice2); #else ERROR_ONLY_FP_TYPES("addbmm"); #endif } __global__ void createBatchGemmBuffer(const real** buffer, real* data, int64_t stride, int64_t num_batches) { const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { buffer[idx] = data + idx * stride; } } __global__ void createBatchGemmBuffer3(const real** buffer1, const real ** buffer2, const real ** buffer3, real* data1, real * data2, real * data3, int64_t stride1, int64_t stride2, int64_t stride3, int64_t num_batches) { const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { buffer1[idx] = data1 + idx * stride1; buffer2[idx] = data2 + idx * stride2; buffer3[idx] = data3 + idx * stride3; } } THC_API void THCTensor_(baddbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, real alpha, THCTensor *batch1, THCTensor *batch2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimension)(state, t) == 3, 4, "expected 3D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch1) == 3, 6, "expected 3D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch2) == 3, 7, "expected 3D tensor"); THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch1, 0), 6, "equal number of batches expected"); THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch2, 0), 7, "equal number of batches expected"); THArgCheck(THCTensor_(size)(state, t, 1) == THCTensor_(size)(state, batch1, 1), 6, "wrong matrix size"); THArgCheck(THCTensor_(size)(state, t, 2) == THCTensor_(size)(state, batch2, 2), 7, "wrong matrix size"); THArgCheck(THCTensor_(size)(state, batch1, 2) == THCTensor_(size)(state, batch2, 1), 6, "wrong matrix size"); if (t != result) { THCTensor_(resizeAs)(state, result, t); if (ScalarConvert<real, double>::to(beta) != 0.0) { THCTensor_(copy)(state, result, t); } } bool transpose_result; char transpose_batch1, transpose_batch2; int64_t lda, ldb, ldc; THCTensor *result_, *batch1_, *batch2_; if (result->stride[1] == 1) { transpose_result = false; result_ = result; ldc = result_->stride[2]; } else if (result->stride[2] == 1) { transpose_result = true; THCTensor *swap = batch2; batch2 = batch1; batch1 = swap; result_ = result; ldc = result_->stride[1]; } else { transpose_result = false; THCTensor *transp_r_ = THCTensor_(newTranspose)(state, result, 1, 2); result_ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, result_, NULL, 1, 2); ldc = result_->stride[2]; } if (batch1->stride[transpose_result ? 2 : 1] == 1 && batch1->stride[transpose_result ? 1 : 2] != 0) { transpose_batch1 = 'n'; batch1_ = batch1; lda = batch1_->stride[transpose_result ? 1 : 2]; } else if (batch1->stride[transpose_result ? 1 : 2] == 1 && batch1->stride[transpose_result ? 2 : 1] != 0) { transpose_batch1 = 't'; batch1_ = batch1; lda = batch1_->stride[transpose_result ? 2 : 1]; } else { transpose_batch1 = transpose_result ? 'n' : 't'; batch1_ = THCTensor_(newContiguous)(state, batch1); lda = batch1_->stride[1]; } if (batch2->stride[transpose_result ? 2 : 1] == 1 && batch2->stride[transpose_result ? 1 : 2] != 0) { transpose_batch2 = 'n'; batch2_ = batch2; ldb = batch2_->stride[transpose_result ? 1 : 2]; } else if (batch2->stride[transpose_result ? 1 : 2] == 1 && batch2->stride[transpose_result ? 2 : 1] != 0) { transpose_batch2 = 't'; batch2_ = batch2; ldb = batch2_->stride[transpose_result ? 2 : 1]; } else { transpose_batch2 = transpose_result ? 'n' : 't'; batch2_ = THCTensor_(newContiguous)(state, batch2); ldb = batch2_->stride[1]; } int64_t num_batches = result_->size[0]; #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) // Compute pointers to matrices in each batch. #if CUDA_VERSION < 8000 size_t matrices_size = num_batches * sizeof(real*); // Copy pointers to device. const real **d_matrices1, **d_matrices2; real **d_result_matrices; THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1, matrices_size)); THCudaCheck(THCudaMalloc(state, (void**)&d_matrices2, matrices_size)); THCudaCheck(THCudaMalloc(state, (void**)&d_result_matrices, matrices_size)); const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; createBatchGemmBuffer3<<<grid, block, 0, THCState_getCurrentStream(state)>>>( d_matrices1, d_matrices2, (const real**)d_result_matrices, THCTensor_(data)(state, batch1_), THCTensor_(data)(state, batch2_), THCTensor_(data)(state, result_), batch1_->stride[0], batch2_->stride[0], result_->stride[0], num_batches); #ifdef THC_REAL_IS_FLOAT THCudaBlas_SgemmBatched( state, transpose_batch1, transpose_batch2, result_->size[transpose_result ? 2 : 1], result_->size[transpose_result ? 1 : 2], batch1_->size[transpose_result ? 1 : 2], alpha, d_matrices1, lda, d_matrices2, ldb, beta, d_result_matrices, ldc, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_DgemmBatched( state, transpose_batch1, transpose_batch2, result_->size[transpose_result ? 2 : 1], result_->size[transpose_result ? 1 : 2], batch1_->size[transpose_result ? 1 : 2], alpha, d_matrices1, lda, d_matrices2, ldb, beta, d_result_matrices, ldc, num_batches); #endif //THC_REAL THCudaFree(state, d_matrices1); THCudaFree(state, d_matrices2); THCudaFree(state, d_result_matrices); #else #ifdef THC_REAL_IS_FLOAT THCudaBlas_SgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size[transpose_result ? 2 : 1], result_->size[transpose_result ? 1 : 2], batch1_->size[transpose_result ? 1 : 2], alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride[0], THCTensor_(data)(state, batch2_), ldb, batch2_->stride[0], beta, THCTensor_(data)(state, result_), ldc, result_->stride[0], num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_DgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size[transpose_result ? 2 : 1], result_->size[transpose_result ? 1 : 2], batch1_->size[transpose_result ? 1 : 2], alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride[0], THCTensor_(data)(state, batch2_), ldb, batch2_->stride[0], beta, THCTensor_(data)(state, result_), ldc, result_->stride[0], num_batches); #endif //THC_REAL #endif //CUDA_VERSION #elif defined(THC_REAL_IS_HALF) #if CUDA_VERSION < 9010 // Currently no HgemmBatched in Cublas for (int64_t i = 0; i < num_batches; ++i) { THCudaBlas_Hgemm( state, transpose_batch1, transpose_batch2, result_->size[transpose_result ? 2 : 1], result_->size[transpose_result ? 1 : 2], batch1_->size[transpose_result ? 1 : 2], alpha, THCTensor_(data)(state, batch1_) + i * batch1_->stride[0], lda, THCTensor_(data)(state, batch2_) + i * batch2_->stride[0], ldb, beta, THCTensor_(data)(state, result_) + i * result_->stride[0], ldc); } #else cudaDeviceProp* prop = THCState_getCurrentDeviceProperties(state); if (prop->major >= 5){ THCudaBlas_HgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size[transpose_result ? 2 : 1], result_->size[transpose_result ? 1 : 2], batch1_->size[transpose_result ? 1 : 2], alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride[0], THCTensor_(data)(state, batch2_), ldb, batch2_->stride[0], beta, THCTensor_(data)(state, result_), ldc, result_->stride[0], num_batches); } else { for (int64_t i = 0; i < num_batches; ++i) { THCudaBlas_Hgemm( state, transpose_batch1, transpose_batch2, result_->size[transpose_result ? 2 : 1], result_->size[transpose_result ? 1 : 2], batch1_->size[transpose_result ? 1 : 2], alpha, THCTensor_(data)(state, batch1_) + i * batch1_->stride[0], lda, THCTensor_(data)(state, batch2_) + i * batch2_->stride[0], ldb, beta, THCTensor_(data)(state, result_) + i * result_->stride[0], ldc); } } #endif #endif if (batch1_ != batch1) { THCTensor_(free)(state, batch1_); } if (batch2_ != batch2) { THCTensor_(free)(state, batch2_); } if (result_ != result) { THCTensor_(freeCopyTo)(state, result_, result); } #else ERROR_ONLY_FP_TYPES("baddbmm"); #endif } THC_API void THCTensor_(btrifact)(THCState *state, THCTensor *ra_, THCudaIntTensor *rpivots_, THCudaIntTensor *rinfo_, int pivot, THCTensor *a) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THAssert(THCTensor_(checkGPU)(state, 2, ra_, a)); THArgCheck(THCTensor_(nDimension)(state, a) == 3, 3, "expected 3D tensor"); THArgCheck(THCTensor_(size)(state, a, 1) == THCTensor_(size)(state, a, 2), 3, "matrices must be square"); if (ra_ != a) { THCTensor_(resizeAs)(state, ra_, a); // not sure if this is kosher, but things are nicer if we return in column major if (ra_->stride[0] == 1) { THCTensor_(transpose)(state, ra_, NULL, 1, 0); } else if (ra_->stride[2] == 1) { THCTensor_(transpose)(state, ra_, NULL, 1, 2); } THCTensor_(copy)(state, ra_, a); } int n = a->size[1]; int lda; THCTensor *ra__; if (ra_->stride[1] == 1) { // column ordered, what BLAS wants lda = ra_->stride[2]; ra__ = ra_; } else { // not column ordered, need to make it such (requires copy) THCTensor *transp_r_ = THCTensor_(newTranspose)(state, ra_, 1, 2); ra__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, ra__, NULL, 1, 2); lda = ra__->stride[2]; } int64_t num_batches = ra__->size[0]; if (!pivot) { THCudaIntTensor *t = THCudaIntTensor_new(state); THCudaIntTensor_range(state, t, 1, n, 1); THCudaIntTensor_unsqueeze1d(state, t, t, 0); THCudaIntTensor** ptrs = (THCudaIntTensor**) THAlloc(sizeof(THCudaIntTensor*)*num_batches); for (int64_t i=0; i<num_batches; i++) { ptrs[i] = t; } THCudaIntTensor_catArray(state, rpivots_, ptrs, num_batches, 0); THCudaIntTensor_free(state, t); THFree(ptrs); } else { THCudaIntTensor_resize2d(state, rpivots_, num_batches, n); } bool free_rinfo_ = !rinfo_; if (rinfo_ == NULL) rinfo_ = THCudaIntTensor_new(state); THCudaIntTensor_resize1d(state, rinfo_, num_batches); int *info_gpu = THCudaIntTensor_data(state, rinfo_); // Copy pointers to device. real **d_result; size_t matrices_size = num_batches * sizeof(real*); THCudaCheck(THCudaMalloc(state, (void**)&d_result, matrices_size)); const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>( (const real**)d_result, THCTensor_(data)(state, ra__), ra__->stride[0], num_batches); int *pivots_gpu = NULL; if (pivot) { pivots_gpu = THCudaIntTensor_data(state, rpivots_); } #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgetrf(state, n, d_result, lda, pivots_gpu, info_gpu, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgetrf(state, n, d_result, lda, pivots_gpu, info_gpu, num_batches); #endif THCudaFree(state, d_result); if (ra__ != ra_) { THCTensor_(freeCopyTo)(state, ra__, ra_); } if (free_rinfo_) { real min = THCudaIntTensor_minall(state, rinfo_); real max = THCudaIntTensor_maxall(state, rinfo_); THCudaIntTensor_free(state, rinfo_); if (min != 0 || max != 0) { THError("failed to factorize some batch elements (min info == %d, max info == %d)", min, max); } } #else THError("btrifact for CUDA tensors is only supported for floats and doubles"); #endif } THC_API void THCTensor_(btrisolve)(THCState *state, THCTensor *rb_, THCTensor *b, THCTensor *atf, THCudaIntTensor *pivots) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THAssert(THCTensor_(checkGPU)(state, 3, rb_, atf, b)); THArgCheck(THCTensor_(nDimension)(state, atf) == 3, 3, "expected 3D tensor"); THArgCheck(THCTensor_(nDimension)(state, b) == 3 || THCTensor_(nDimension)(state, b) == 2, 4, "expected 2D or 3D tensor"); THArgCheck(THCTensor_(size)(state, atf, 0) == THCTensor_(size)(state, b, 0), 3, "number of batches must be equal"); THArgCheck(THCTensor_(size)(state, atf, 1) == THCTensor_(size)(state, atf, 2), 3, "A matrices must be square"); THArgCheck(THCTensor_(size)(state, atf, 1) == THCTensor_(size)(state, b, 1), 3, "dimensions of A and b must be equal"); if (rb_ != b) { THCTensor_(resizeAs)(state, rb_, b); THCTensor_(copy)(state, rb_, b); } int n = atf->size[1]; int nrhs = rb_->nDimension > 2 ? rb_->size[2] : 1; THCTensor *atf_; THCTensor *rb__; int lda, ldb; // correct ordering of A_tf if (atf->stride[1] == 1) { // column ordered, what BLAS wants lda = atf->stride[2]; atf_ = atf; } else { // not column ordered, need to make it such (requires copy) // it would be nice if we could use the op(A) flags to automatically // transpose A if needed, but this leads to unpredictable behavior if the // user clones A_tf later with a different ordering THCTensor *transp_r_ = THCTensor_(newTranspose)(state, atf, 1, 2); atf_ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, atf_, NULL, 1, 2); lda = atf_->stride[2]; } // correct ordering of B if (rb_->stride[1] == 1) { // column ordered if (rb_->nDimension == 2 || rb_->size[2] == 1) { ldb = n; } else { ldb = rb_->stride[2]; } rb__ = rb_; } else { // make column ordered if (rb_->nDimension > 2) { THCTensor *transp_r_ = THCTensor_(newTranspose)(state, rb_, 1, 2); rb__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, rb__, NULL, 1, 2); ldb = rb__->stride[2]; } else { rb__ = THCTensor_(newClone)(state, rb_); ldb = n; } } int64_t num_batches = rb_->size[0]; size_t matrices_size = num_batches * sizeof(real*); // Copy pointers to device. real **d_result; const real **d_atf; THCudaCheck(THCudaMalloc(state, (void**)&d_result, matrices_size)); THCudaCheck(THCudaMalloc(state, (void**)&d_atf, matrices_size)); const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>( (const real**)d_result, THCTensor_(data)(state, rb__), rb__->stride[0], num_batches); createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>( d_atf, THCTensor_(data)(state, atf_), atf_->stride[0], num_batches); if (!THCudaIntTensor_isContiguous(state, pivots)) { THError("Error: pivots is not contiguous."); } int *pivots_data = THCudaIntTensor_data(state, pivots); int info; #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgetrs(state, 'n', n, nrhs, d_atf, lda, pivots_data, d_result, ldb, &info, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgetrs(state, 'n', n, nrhs, d_atf, lda, pivots_data, d_result, ldb, &info, num_batches); #endif if (info < 0) { THError("Illegal arg %d", -info); } THCudaFree(state, d_result); THCudaFree(state, d_atf); if (atf_ != atf) { THCTensor_(free)(state, atf_); } if (rb__ != rb_) { THCTensor_(freeCopyTo)(state, rb__, rb_); } #else THError("btrisolve for CUDA tensors is only supported for floats and doubles"); #endif } #endif
1739495815b747fe202f0b0605684f6011156b7b.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************* * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ************************************************************************/ #include <chrono> #include <cstdio> #include <cstdlib> #include <string> #include <vector> #include "nccl.h" #include "test_utilities.h" template<typename T> void RunTest(T** buff, const int N, const ncclDataType_t type, const int root, ncclComm_t* const comms, const std::vector<int>& dList) { // initialize data int nDev = 0; ncclCommCount(comms[0], &nDev); hipStream_t* s = (hipStream_t*)malloc(sizeof(hipStream_t)*nDev); T* buffer = (T*)malloc(N * sizeof(T)); T* result = (T*)malloc(N * sizeof(T)); memset(result, 0, N * sizeof(T)); for (int i = 0; i < nDev; ++i) { CUDACHECK(hipSetDevice(dList[i])); CUDACHECK(hipStreamCreate(s+i)); if (i == root) { Randomize(buff[root], N, root); CUDACHECK(hipMemcpy(result, buff[root], N * sizeof(T), hipMemcpyDeviceToHost)); } else { CUDACHECK(hipMemset(buff[i], 0, N * sizeof(T))); } CUDACHECK(hipDeviceSynchronize()); } // warm up GPU for (int i = 0; i < nDev; ++i) { CUDACHECK(hipSetDevice(dList[i])); ncclBcast((void*)buff[i], ::min(32 * 1024, N), type, root, comms[i], s[i]); } for (int i = 0; i < nDev; ++i) { CUDACHECK(hipSetDevice(dList[i])); CUDACHECK(hipStreamSynchronize(s[i])); } // for (int n = 1; n <= N; n = n << 1) { int n = N; printf("%12i %12i %6s %4i", (int)(n * sizeof(T)), n, TypeName(type).c_str(), root); auto start = std::chrono::high_resolution_clock::now(); for (int i = 0; i < nDev; ++i) { CUDACHECK(hipSetDevice(dList[i])); ncclBcast((void*)buff[i], n, type, root, comms[i], s[i]); } for (int i = 0; i < nDev; ++i) { CUDACHECK(hipSetDevice(dList[i])); CUDACHECK(hipStreamSynchronize(s[i])); } auto stop = std::chrono::high_resolution_clock::now(); double elapsedSec = std::chrono::duration_cast<std::chrono::duration<double>>( stop - start).count(); double algbw = (double)(n * sizeof(T)) / 1.0E9 / elapsedSec; double busbw = algbw; double maxDelta = 0.0; for (int i = 0; i < nDev; ++i) { CUDACHECK(hipSetDevice(dList[i])); double tmpDelta = CheckDelta<T>(buff[i], result, n); maxDelta = ::max(tmpDelta, maxDelta); } printf(" %7.3f %5.2f %5.2f %7.0le\n", elapsedSec * 1.0E3, algbw, busbw, maxDelta); } for(int i=0; i < nDev; ++i) { CUDACHECK(hipSetDevice(dList[i])); CUDACHECK(hipStreamDestroy(s[i])); } free(s); free(buffer); free(result); } template<typename T> void RunTests(const int N, const ncclDataType_t type, ncclComm_t* const comms, const std::vector<int>& dList) { int nDev = 0; ncclCommCount(comms[0], &nDev); T** buff = (T**)malloc(nDev * sizeof(T*)); for (int i = 0; i < nDev; ++i) { CUDACHECK(hipSetDevice(dList[i])); CUDACHECK(hipMalloc(buff + i, N * sizeof(T))); } //for (int root = 1; root < 2; ++root) { for (int root = 0; root < nDev; ++root) { RunTest<T>(buff, N, type, root, comms, dList); } for (int i = 0; i < nDev; ++i) { CUDACHECK(hipSetDevice(dList[i])); CUDACHECK(hipFree(buff[i])); } free(buff); } void usage() { printf("Tests nccl Broadcast with user supplied arguments.\n" " Usage: broadcast_test <data size in bytes> [number of GPUs] " "[GPU 0] [GPU 1] ...\n\n"); } int main(int argc, char* argv[]) { int nVis = 0; CUDACHECK(hipGetDeviceCount(&nVis)); unsigned long long N = 0; if (argc > 1) { int t = sscanf(argv[1], "%llu", &N); if (t == 0) { printf("Error: %s is not an integer!\n\n", argv[1]); usage(); exit(EXIT_FAILURE); } } else { printf("Error: must specify at least data size in bytes!\n\n"); usage(); exit(EXIT_FAILURE); } int nDev = nVis; if (argc > 2) { int t = sscanf(argv[2], "%d", &nDev); if (t == 0) { printf("Error: %s is not an integer!\n\n", argv[1]); usage(); exit(EXIT_FAILURE); } } std::vector<int> dList(nDev); for (int i = 0; i < nDev; ++i) dList[i] = i % nVis; if (argc > 3) { if (argc - 3 != nDev) { printf("Error: insufficient number of GPUs in list\n\n"); usage(); exit(EXIT_FAILURE); } for (int i = 0; i < nDev; ++i) { int t = sscanf(argv[3 + i], "%d", dList.data() + i); if (t == 0) { printf("Error: %s is not an integer!\n\n", argv[2 + i]); usage(); exit(EXIT_FAILURE); } } } ncclComm_t* comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*nDev);; ncclCommInitAll(comms, nDev, dList.data()); printf("# Using devices\n"); for (int g = 0; g < nDev; ++g) { int cudaDev; int rank; hipDeviceProp_t prop; ncclCommCuDevice(comms[g], &cudaDev); ncclCommUserRank(comms[g], &rank); CUDACHECK(hipGetDeviceProperties(&prop, cudaDev)); printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev, prop.pciBusID, prop.name); } printf("\n"); printf("# %10s %12s %6s %4s %7s %5s %5s %7s\n", "bytes", "N", "type", "root", "time", "algbw", "busbw", "delta"); RunTests<char>(N / sizeof(char), ncclChar, comms, dList); RunTests<int>(N / sizeof(int), ncclInt, comms, dList); #ifdef CUDA_HAS_HALF RunTests<half>(N / sizeof(half), ncclHalf, comms, dList); #endif RunTests<float>(N / sizeof(float), ncclFloat, comms, dList); RunTests<double>(N / sizeof(double), ncclDouble, comms, dList); printf("\n"); for(int i = 0; i < nDev; ++i) ncclCommDestroy(comms[i]); free(comms); exit(EXIT_SUCCESS); }
1739495815b747fe202f0b0605684f6011156b7b.cu
/************************************************************************* * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ************************************************************************/ #include <chrono> #include <cstdio> #include <cstdlib> #include <string> #include <vector> #include "nccl.h" #include "test_utilities.h" template<typename T> void RunTest(T** buff, const int N, const ncclDataType_t type, const int root, ncclComm_t* const comms, const std::vector<int>& dList) { // initialize data int nDev = 0; ncclCommCount(comms[0], &nDev); cudaStream_t* s = (cudaStream_t*)malloc(sizeof(cudaStream_t)*nDev); T* buffer = (T*)malloc(N * sizeof(T)); T* result = (T*)malloc(N * sizeof(T)); memset(result, 0, N * sizeof(T)); for (int i = 0; i < nDev; ++i) { CUDACHECK(cudaSetDevice(dList[i])); CUDACHECK(cudaStreamCreate(s+i)); if (i == root) { Randomize(buff[root], N, root); CUDACHECK(cudaMemcpy(result, buff[root], N * sizeof(T), cudaMemcpyDeviceToHost)); } else { CUDACHECK(cudaMemset(buff[i], 0, N * sizeof(T))); } CUDACHECK(cudaDeviceSynchronize()); } // warm up GPU for (int i = 0; i < nDev; ++i) { CUDACHECK(cudaSetDevice(dList[i])); ncclBcast((void*)buff[i], std::min(32 * 1024, N), type, root, comms[i], s[i]); } for (int i = 0; i < nDev; ++i) { CUDACHECK(cudaSetDevice(dList[i])); CUDACHECK(cudaStreamSynchronize(s[i])); } // for (int n = 1; n <= N; n = n << 1) { int n = N; printf("%12i %12i %6s %4i", (int)(n * sizeof(T)), n, TypeName(type).c_str(), root); auto start = std::chrono::high_resolution_clock::now(); for (int i = 0; i < nDev; ++i) { CUDACHECK(cudaSetDevice(dList[i])); ncclBcast((void*)buff[i], n, type, root, comms[i], s[i]); } for (int i = 0; i < nDev; ++i) { CUDACHECK(cudaSetDevice(dList[i])); CUDACHECK(cudaStreamSynchronize(s[i])); } auto stop = std::chrono::high_resolution_clock::now(); double elapsedSec = std::chrono::duration_cast<std::chrono::duration<double>>( stop - start).count(); double algbw = (double)(n * sizeof(T)) / 1.0E9 / elapsedSec; double busbw = algbw; double maxDelta = 0.0; for (int i = 0; i < nDev; ++i) { CUDACHECK(cudaSetDevice(dList[i])); double tmpDelta = CheckDelta<T>(buff[i], result, n); maxDelta = std::max(tmpDelta, maxDelta); } printf(" %7.3f %5.2f %5.2f %7.0le\n", elapsedSec * 1.0E3, algbw, busbw, maxDelta); } for(int i=0; i < nDev; ++i) { CUDACHECK(cudaSetDevice(dList[i])); CUDACHECK(cudaStreamDestroy(s[i])); } free(s); free(buffer); free(result); } template<typename T> void RunTests(const int N, const ncclDataType_t type, ncclComm_t* const comms, const std::vector<int>& dList) { int nDev = 0; ncclCommCount(comms[0], &nDev); T** buff = (T**)malloc(nDev * sizeof(T*)); for (int i = 0; i < nDev; ++i) { CUDACHECK(cudaSetDevice(dList[i])); CUDACHECK(cudaMalloc(buff + i, N * sizeof(T))); } //for (int root = 1; root < 2; ++root) { for (int root = 0; root < nDev; ++root) { RunTest<T>(buff, N, type, root, comms, dList); } for (int i = 0; i < nDev; ++i) { CUDACHECK(cudaSetDevice(dList[i])); CUDACHECK(cudaFree(buff[i])); } free(buff); } void usage() { printf("Tests nccl Broadcast with user supplied arguments.\n" " Usage: broadcast_test <data size in bytes> [number of GPUs] " "[GPU 0] [GPU 1] ...\n\n"); } int main(int argc, char* argv[]) { int nVis = 0; CUDACHECK(cudaGetDeviceCount(&nVis)); unsigned long long N = 0; if (argc > 1) { int t = sscanf(argv[1], "%llu", &N); if (t == 0) { printf("Error: %s is not an integer!\n\n", argv[1]); usage(); exit(EXIT_FAILURE); } } else { printf("Error: must specify at least data size in bytes!\n\n"); usage(); exit(EXIT_FAILURE); } int nDev = nVis; if (argc > 2) { int t = sscanf(argv[2], "%d", &nDev); if (t == 0) { printf("Error: %s is not an integer!\n\n", argv[1]); usage(); exit(EXIT_FAILURE); } } std::vector<int> dList(nDev); for (int i = 0; i < nDev; ++i) dList[i] = i % nVis; if (argc > 3) { if (argc - 3 != nDev) { printf("Error: insufficient number of GPUs in list\n\n"); usage(); exit(EXIT_FAILURE); } for (int i = 0; i < nDev; ++i) { int t = sscanf(argv[3 + i], "%d", dList.data() + i); if (t == 0) { printf("Error: %s is not an integer!\n\n", argv[2 + i]); usage(); exit(EXIT_FAILURE); } } } ncclComm_t* comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*nDev);; ncclCommInitAll(comms, nDev, dList.data()); printf("# Using devices\n"); for (int g = 0; g < nDev; ++g) { int cudaDev; int rank; cudaDeviceProp prop; ncclCommCuDevice(comms[g], &cudaDev); ncclCommUserRank(comms[g], &rank); CUDACHECK(cudaGetDeviceProperties(&prop, cudaDev)); printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev, prop.pciBusID, prop.name); } printf("\n"); printf("# %10s %12s %6s %4s %7s %5s %5s %7s\n", "bytes", "N", "type", "root", "time", "algbw", "busbw", "delta"); RunTests<char>(N / sizeof(char), ncclChar, comms, dList); RunTests<int>(N / sizeof(int), ncclInt, comms, dList); #ifdef CUDA_HAS_HALF RunTests<half>(N / sizeof(half), ncclHalf, comms, dList); #endif RunTests<float>(N / sizeof(float), ncclFloat, comms, dList); RunTests<double>(N / sizeof(double), ncclDouble, comms, dList); printf("\n"); for(int i = 0; i < nDev; ++i) ncclCommDestroy(comms[i]); free(comms); exit(EXIT_SUCCESS); }
a9fc7cde0bdf7ec8d158630959e0d6a91808ad81.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <stdio.h> #include <unistd.h> #include <sys/mman.h> #include <sys/types.h> #include <sys/stat.h> #include <assert.h> #include <fcntl.h> #include <errno.h> #include <vector> #include <time.h> #include <sys/time.h> using std::cout; using std::cerr; using std::endl; using std::cin; using std::vector; constexpr size_t IM_X = 1300; constexpr size_t IM_Y = 600; constexpr size_t IM_V = sizeof(float2); constexpr size_t IM_SIZE = IM_X * IM_Y * IM_V; constexpr size_t XSR = 10; constexpr size_t YSR = 5; __device__ inline float2 mul(float s, float2 v) { v.x *= s; v.y *= s; return v; } __device__ inline float2 add(float2 v1, float2 v2) { v1.x += v2.x; v1.y += v2.y; return v1; } __global__ void integrate(float2* out, hipTextureObject_t vecs, float dt, size_t steps) { float2 k1, k2, k3, k4, p, q; // Initial position p.x = blockIdx.x * blockDim.x + threadIdx.x; p.y = blockIdx.y * blockDim.y + threadIdx.y; // Output location size_t idx = (blockDim.x * gridDim.x * (int)p.y + (int)p.x) * steps; // Apply sample rate p.x *= XSR; p.y *= YSR; // Initial output out[idx++] = p; // Integrate forward for (size_t i = 1; i < steps; i++) { k1 = mul(dt, tex2D<float2>(vecs, p.x, p.y)); q = add(p, mul(0.5, k1)); k2 = mul(dt, tex2D<float2>(vecs, q.x, q.y)); q = add(p, mul(0.5, k2)); k3 = mul(dt, tex2D<float2>(vecs, q.x, q.y)); q = add(p, k3); k4 = mul(dt, tex2D<float2>(vecs, q.x, q.y)); p.x += (1.0/6.0)*(k1.x + 2*k2.x + 2*k3.x + k4.x); p.y += (1.0/6.0)*(k1.y + 2*k2.y + 2*k3.y + k4.y); out[idx++] = p; } } __host__ hipError_t checkCuda(hipError_t result) { if (result != hipSuccess) { cerr << "CUDA Runtime Error: " << hipGetErrorString(result) << endl; abort(); } return result; } __host__ int checkLinux(int result) { if (result == -1) { cerr << "Linux Runtime Error: (" << errno << ") " << strerror(errno) << endl; abort(); } return result; } __host__ void writeCSV(char* file, float2* output, size_t num_particles, size_t steps) { const size_t file_size = num_particles * steps * (20 + 9 + 9 + 3); umask(0111); int fd = checkLinux(open(file, O_RDWR | O_CREAT | O_TRUNC, 06666)); checkLinux(ftruncate(fd, file_size)); char* map = (char*) mmap(NULL, file_size, PROT_WRITE, MAP_SHARED, fd, 0); checkLinux((int)(size_t)map); char* cur = map; const char* header = "line_id, coordinate_x, coordinate_y\n"; checkLinux(write(fd, header, strlen(header))); for (size_t i = 0; i < num_particles; i++) for (size_t s = 0; s < steps; s++) { float2 p = output[i * steps + s]; cur += sprintf(cur, "%llu,%.7f,%.7f\n", i, p.x, p.y); } msync(map, file_size, MS_SYNC); munmap(map, file_size); checkLinux(ftruncate(fd, cur - map)); checkLinux(close(fd)); } vector<const char*> names; vector<timespec> wall; vector<timespec> proc; vector<size_t> levels; size_t cur_level = 0; __host__ static inline void stime(const char* name) { timespec cur_wall, cur_proc; clock_gettime(CLOCK_REALTIME, &cur_wall); clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cur_proc); names.push_back(name); levels.push_back(cur_level++); wall.push_back(cur_wall); proc.push_back(cur_proc); } __host__ static inline void ftime() { timespec cur_wall, cur_proc; clock_gettime(CLOCK_REALTIME, &cur_wall); clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cur_proc); levels.push_back(--cur_level); wall.push_back(cur_wall); proc.push_back(cur_proc); } // from https://gist.github.com/diabloneo/9619917 __host__ static inline void timespecDiff(timespec& a, timespec& b, timespec& result) { result.tv_sec = a.tv_sec - b.tv_sec; result.tv_nsec = a.tv_nsec - b.tv_nsec; if (result.tv_nsec < 0) { --result.tv_sec; result.tv_nsec += 1000000000L; } } __host__ static inline double timespecToMs(const timespec& t) { return (double)t.tv_sec * 1000.0 + (double)t.tv_nsec / 1000000.0; } __host__ static size_t ptime(const char* name, size_t n = 0, size_t i = 0, size_t l = 0) { while (n < names.size() and levels[i] == l) { size_t j = i + 1; auto& sw = wall[i]; auto& sp = proc[i]; int jumped = j; while (l < levels[j]) j++; auto& fw = wall[j]; auto& fp = proc[j]; timespec w, p; timespecDiff(fw, sw, w); timespecDiff(fp, sp, p); for (size_t k = 0; k < l; k++) printf("\t"); printf("\"%s\", \"%s\", %.3f, %.3f\n", name, names[n++], timespecToMs(w), timespecToMs(p)); if (jumped < j) n = ptime(name, n, jumped, l + 1); i = j + 1; } return n; } __host__ int main(int argc, char **argv) { stime("Program"); stime("Setup"); if (argc != 3) { ftime(); ftime(); printf("Usage: ./main image output\n"); return 0; } float dt = 1; //cout << "Enter delta time: "; //cin >> dt; size_t steps = 100; //cout << "Enter number of steps: "; //cin >> steps; // Opening file stime("Read input"); int fd = checkLinux(open(argv[1], O_RDONLY)); // Allocating + Mapping host memory float2 *im; hipArray* im_d; float2 *output_d; float2 *output; // Memory mapping does not provide a performance boost. // It trades off between copy time to GPU or copy to RAM. checkCuda(hipHostMalloc(&im, IM_SIZE)); checkLinux(read(fd, im, IM_SIZE)); close(fd); ftime(); // Modified basic cuda texture manipulation obtained from // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html // Allocate CUDA array in device memory stime("Copy to GPU"); hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 32, 0, 0, hipChannelFormatKindFloat); checkCuda(hipMallocArray(&im_d, &channelDesc, IM_X, IM_Y)); checkCuda(hipMemcpyToArray(im_d, 0, 0, im, IM_SIZE, hipMemcpyHostToDevice)); ftime(); // Specify texture stime("Initialize Texture"); struct hipResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = hipResourceTypeArray; resDesc.res.array.array = im_d; // Specify texture object parameters struct hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = hipAddressModeBorder; texDesc.addressMode[1] = hipAddressModeBorder; texDesc.filterMode = hipFilterModeLinear; texDesc.readMode = hipReadModeElementType; texDesc.maxAnisotropy = 2; texDesc.normalizedCoords = false; // Create texture object hipTextureObject_t imTex = 0; checkCuda(hipCreateTextureObject(&imTex, &resDesc, &texDesc, NULL)); ftime(); dim3 block(26, 24, 1); dim3 grid(5, 5, 1); // dim3 block(1, 24, 1); // dim3 grid(1, 25, 1); const size_t num_particles = block.x * grid.x * block.y * grid.y; const size_t out_size = num_particles * sizeof(float2) * steps; stime("Allocate Output"); checkCuda(hipMalloc(&output_d, out_size)); ftime(); ftime(); stime("Computation"); hipLaunchKernelGGL(( integrate), dim3(grid), dim3(block), 0, 0, output_d, imTex, dt, steps); ftime(); // Copying from device to host stime("Copy to host"); checkCuda(hipHostMalloc(&output, out_size)); checkCuda(hipMemcpy(output, output_d, out_size, hipMemcpyDeviceToHost)); ftime(); stime("Free device memory"); checkCuda(hipFree(output_d)); checkCuda(hipDestroyTextureObject(imTex)); checkCuda(hipFreeArray(im_d)); ftime(); //stime("Write"); //writeCSV(argv[2], output, num_particles, steps); //ftime(); stime("Free host memory"); checkCuda(hipHostFree(im)); checkCuda(hipHostFree(output)); ftime(); ftime(); ptime("GPU"); return 0; }
a9fc7cde0bdf7ec8d158630959e0d6a91808ad81.cu
#include <iostream> #include <stdio.h> #include <unistd.h> #include <sys/mman.h> #include <sys/types.h> #include <sys/stat.h> #include <assert.h> #include <fcntl.h> #include <errno.h> #include <vector> #include <time.h> #include <sys/time.h> using std::cout; using std::cerr; using std::endl; using std::cin; using std::vector; constexpr size_t IM_X = 1300; constexpr size_t IM_Y = 600; constexpr size_t IM_V = sizeof(float2); constexpr size_t IM_SIZE = IM_X * IM_Y * IM_V; constexpr size_t XSR = 10; constexpr size_t YSR = 5; __device__ inline float2 mul(float s, float2 v) { v.x *= s; v.y *= s; return v; } __device__ inline float2 add(float2 v1, float2 v2) { v1.x += v2.x; v1.y += v2.y; return v1; } __global__ void integrate(float2* out, cudaTextureObject_t vecs, float dt, size_t steps) { float2 k1, k2, k3, k4, p, q; // Initial position p.x = blockIdx.x * blockDim.x + threadIdx.x; p.y = blockIdx.y * blockDim.y + threadIdx.y; // Output location size_t idx = (blockDim.x * gridDim.x * (int)p.y + (int)p.x) * steps; // Apply sample rate p.x *= XSR; p.y *= YSR; // Initial output out[idx++] = p; // Integrate forward for (size_t i = 1; i < steps; i++) { k1 = mul(dt, tex2D<float2>(vecs, p.x, p.y)); q = add(p, mul(0.5, k1)); k2 = mul(dt, tex2D<float2>(vecs, q.x, q.y)); q = add(p, mul(0.5, k2)); k3 = mul(dt, tex2D<float2>(vecs, q.x, q.y)); q = add(p, k3); k4 = mul(dt, tex2D<float2>(vecs, q.x, q.y)); p.x += (1.0/6.0)*(k1.x + 2*k2.x + 2*k3.x + k4.x); p.y += (1.0/6.0)*(k1.y + 2*k2.y + 2*k3.y + k4.y); out[idx++] = p; } } __host__ cudaError_t checkCuda(cudaError_t result) { if (result != cudaSuccess) { cerr << "CUDA Runtime Error: " << cudaGetErrorString(result) << endl; abort(); } return result; } __host__ int checkLinux(int result) { if (result == -1) { cerr << "Linux Runtime Error: (" << errno << ") " << strerror(errno) << endl; abort(); } return result; } __host__ void writeCSV(char* file, float2* output, size_t num_particles, size_t steps) { const size_t file_size = num_particles * steps * (20 + 9 + 9 + 3); umask(0111); int fd = checkLinux(open(file, O_RDWR | O_CREAT | O_TRUNC, 06666)); checkLinux(ftruncate(fd, file_size)); char* map = (char*) mmap(NULL, file_size, PROT_WRITE, MAP_SHARED, fd, 0); checkLinux((int)(size_t)map); char* cur = map; const char* header = "line_id, coordinate_x, coordinate_y\n"; checkLinux(write(fd, header, strlen(header))); for (size_t i = 0; i < num_particles; i++) for (size_t s = 0; s < steps; s++) { float2 p = output[i * steps + s]; cur += sprintf(cur, "%llu,%.7f,%.7f\n", i, p.x, p.y); } msync(map, file_size, MS_SYNC); munmap(map, file_size); checkLinux(ftruncate(fd, cur - map)); checkLinux(close(fd)); } vector<const char*> names; vector<timespec> wall; vector<timespec> proc; vector<size_t> levels; size_t cur_level = 0; __host__ static inline void stime(const char* name) { timespec cur_wall, cur_proc; clock_gettime(CLOCK_REALTIME, &cur_wall); clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cur_proc); names.push_back(name); levels.push_back(cur_level++); wall.push_back(cur_wall); proc.push_back(cur_proc); } __host__ static inline void ftime() { timespec cur_wall, cur_proc; clock_gettime(CLOCK_REALTIME, &cur_wall); clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cur_proc); levels.push_back(--cur_level); wall.push_back(cur_wall); proc.push_back(cur_proc); } // from https://gist.github.com/diabloneo/9619917 __host__ static inline void timespecDiff(timespec& a, timespec& b, timespec& result) { result.tv_sec = a.tv_sec - b.tv_sec; result.tv_nsec = a.tv_nsec - b.tv_nsec; if (result.tv_nsec < 0) { --result.tv_sec; result.tv_nsec += 1000000000L; } } __host__ static inline double timespecToMs(const timespec& t) { return (double)t.tv_sec * 1000.0 + (double)t.tv_nsec / 1000000.0; } __host__ static size_t ptime(const char* name, size_t n = 0, size_t i = 0, size_t l = 0) { while (n < names.size() and levels[i] == l) { size_t j = i + 1; auto& sw = wall[i]; auto& sp = proc[i]; int jumped = j; while (l < levels[j]) j++; auto& fw = wall[j]; auto& fp = proc[j]; timespec w, p; timespecDiff(fw, sw, w); timespecDiff(fp, sp, p); for (size_t k = 0; k < l; k++) printf("\t"); printf("\"%s\", \"%s\", %.3f, %.3f\n", name, names[n++], timespecToMs(w), timespecToMs(p)); if (jumped < j) n = ptime(name, n, jumped, l + 1); i = j + 1; } return n; } __host__ int main(int argc, char **argv) { stime("Program"); stime("Setup"); if (argc != 3) { ftime(); ftime(); printf("Usage: ./main image output\n"); return 0; } float dt = 1; //cout << "Enter delta time: "; //cin >> dt; size_t steps = 100; //cout << "Enter number of steps: "; //cin >> steps; // Opening file stime("Read input"); int fd = checkLinux(open(argv[1], O_RDONLY)); // Allocating + Mapping host memory float2 *im; cudaArray* im_d; float2 *output_d; float2 *output; // Memory mapping does not provide a performance boost. // It trades off between copy time to GPU or copy to RAM. checkCuda(cudaMallocHost(&im, IM_SIZE)); checkLinux(read(fd, im, IM_SIZE)); close(fd); ftime(); // Modified basic cuda texture manipulation obtained from // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html // Allocate CUDA array in device memory stime("Copy to GPU"); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 32, 0, 0, cudaChannelFormatKindFloat); checkCuda(cudaMallocArray(&im_d, &channelDesc, IM_X, IM_Y)); checkCuda(cudaMemcpyToArray(im_d, 0, 0, im, IM_SIZE, cudaMemcpyHostToDevice)); ftime(); // Specify texture stime("Initialize Texture"); struct cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeArray; resDesc.res.array.array = im_d; // Specify texture object parameters struct cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeBorder; texDesc.addressMode[1] = cudaAddressModeBorder; texDesc.filterMode = cudaFilterModeLinear; texDesc.readMode = cudaReadModeElementType; texDesc.maxAnisotropy = 2; texDesc.normalizedCoords = false; // Create texture object cudaTextureObject_t imTex = 0; checkCuda(cudaCreateTextureObject(&imTex, &resDesc, &texDesc, NULL)); ftime(); dim3 block(26, 24, 1); dim3 grid(5, 5, 1); // dim3 block(1, 24, 1); // dim3 grid(1, 25, 1); const size_t num_particles = block.x * grid.x * block.y * grid.y; const size_t out_size = num_particles * sizeof(float2) * steps; stime("Allocate Output"); checkCuda(cudaMalloc(&output_d, out_size)); ftime(); ftime(); stime("Computation"); integrate<<<grid, block>>>(output_d, imTex, dt, steps); ftime(); // Copying from device to host stime("Copy to host"); checkCuda(cudaMallocHost(&output, out_size)); checkCuda(cudaMemcpy(output, output_d, out_size, cudaMemcpyDeviceToHost)); ftime(); stime("Free device memory"); checkCuda(cudaFree(output_d)); checkCuda(cudaDestroyTextureObject(imTex)); checkCuda(cudaFreeArray(im_d)); ftime(); //stime("Write"); //writeCSV(argv[2], output, num_particles, steps); //ftime(); stime("Free host memory"); checkCuda(cudaFreeHost(im)); checkCuda(cudaFreeHost(output)); ftime(); ftime(); ptime("GPU"); return 0; }
6263ecfb208dfd3dcb1428261bf6474b3bc6bc19.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/TensorUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/WrapDimUtils.h> #include <THH/THHTensorMathReduce.cuh> #include <THH/THHTensorSort.cuh> #include <THH/THHThrustAllocator.cuh> #include <ATen/AccumulateType.h> #include <ATen/hip/NumericLimits.cuh> #include <type_traits> #include <ATen/native/hip/PersistentSoftmax.cuh> namespace at { namespace native { namespace { template<typename T, typename AccumT, typename OutT> struct LogSoftMaxForwardEpilogue { __device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_input, AccumT sum) : logsum(max_input + ::log(sum)) {} __device__ __forceinline__ OutT operator()(T input) const { return static_cast<OutT>(input - logsum); } const AccumT logsum; }; template<typename T, typename AccumT, typename OutT> struct LogSoftMaxBackwardEpilogue { __device__ __forceinline__ LogSoftMaxBackwardEpilogue(AccumT sum) : sum(sum) {} __device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const { return static_cast<T>(gradOutput - ::exp(static_cast<AccumT>(output)) * sum); } const AccumT sum; }; template<typename T, typename AccumT, typename OutT> struct SoftMaxForwardEpilogue { __device__ __forceinline__ SoftMaxForwardEpilogue(AccumT max_input, AccumT sum) : max_input(max_input) , sum(sum) {} __device__ __forceinline__ OutT operator()(T input) const { return static_cast<OutT>(::exp(input - max_input) / sum); } const AccumT max_input; const AccumT sum; }; template<typename T, typename AccumT, typename OutT> struct SoftMaxBackwardEpilogue { __device__ __forceinline__ SoftMaxBackwardEpilogue(AccumT sum) : sum(sum) {} // XXX: gradOutput that we get here is really gradOutput * output // Look for cmul in SoftMax_updateGradInput __device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const { return static_cast<T>(gradOutput - output * sum); } const AccumT sum; }; //////////////////////////////////////////////////////////////////////////////// // Spatial kernel (fast with large inner_size and small dim_size) //////////////////////////////////////////////////////////////////////////////// // Let's assume that our input has been flattened to have only three dimension: // outer x dim x inner // The spatial algorithm tries to paralellize along all of them. // Within a 2d block threadIdx.y paralellizes over dim slices, and threads that // share it will speed up reductions over dim (along axis x). // The 2d grid is used to paralellize inner dimension over y axis and outer over x. inline dim3 SpatialSoftMax_getGridSize( dim3 block, uint32_t max_active_blocks, uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) { // First, tile as many blocks as we can over the y axis uint32_t inner_blocks = (inner_size + block.y - 1) / block.y; if (inner_blocks > max_active_blocks) inner_blocks = max_active_blocks; // Fill the x axis with as many blocks as we can fit (a little more is ok too) uint32_t outer_blocks = (max_active_blocks + inner_blocks - 1) / inner_blocks; if (outer_blocks > outer_size) outer_blocks = outer_size; return dim3(outer_blocks, inner_blocks); } const int max_threads = 1024; inline dim3 SpatialSoftMax_getBlockSize( uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) { uint32_t inner_threads = inner_size; inner_threads = ::min(inner_threads, static_cast<uint32_t>(max_threads)); uint32_t dim_threads = 1; if (inner_threads <= 64 && dim_size >= 64) { while (inner_threads * dim_threads <= max_threads && dim_threads <= dim_size) dim_threads *= 2; dim_threads /= 2; } return dim3(dim_threads, inner_threads); } template<typename accscalar_t, typename Kernel> void SpatialSoftMax_getLaunchSizes( Kernel k, uint64_t outer_size, uint64_t dim_size, uint64_t inner_size, dim3& grid, dim3& block, uint32_t& smem_size) { block = SpatialSoftMax_getBlockSize(outer_size, dim_size, inner_size); uint32_t block_threads = block.x * block.y; smem_size = block.x == 1 ? 0 : block_threads * sizeof(accscalar_t); int max_active_blocks; #ifdef __HIP_PLATFORM_HCC__ max_active_blocks = 16; #else hipOccupancyMaxActiveBlocksPerMultiprocessor(&max_active_blocks, k, block_threads, smem_size); #endif max_active_blocks *= at::cuda::getCurrentDeviceProperties()->multiProcessorCount; grid = SpatialSoftMax_getGridSize(block, max_active_blocks, outer_size, dim_size, inner_size); } inline dim3 SoftMax_getBlockSize(int ILP, uint64_t dim_size) { uint64_t block_size = 1; uint64_t max_block_size = ::min(dim_size / ILP, static_cast<uint64_t>(max_threads)); while (block_size < max_block_size) block_size *= 2; // Launch at least a single warp - the kernel assumes that. block_size = ::max(block_size, static_cast<uint64_t>(32)); return dim3(block_size); } template<typename T> struct Add { __device__ __forceinline__ T operator()(T a, T b) const { return a + b; } }; template<typename T> struct Max { __device__ __forceinline__ T operator()(T a, T b) const { return a < b ? b : a; } }; // Note that it's not a complete block-wide reduction. // Only threads that share threadIdx.y reduce values. template<typename T, template<typename> class ReduceOp> __forceinline__ __device__ T spatialBlockReduceX(T *shared, T val) { ReduceOp<T> r; shared += threadIdx.y * blockDim.x; __syncthreads(); shared[threadIdx.x] = val; // NOTE: loop starts with __syncthreads() int offset = blockDim.x / 2; while (offset > 0) { __syncthreads(); if (threadIdx.x < offset) shared[threadIdx.x] = r(shared[threadIdx.x], shared[threadIdx.x + offset]); offset /= 2; } __syncthreads(); return shared[0]; } template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __global__ void cunn_SpatialSoftMaxForward( outscalar_t *output, scalar_t *input, uint32_t outer_size, uint32_t dim_size, uint32_t inner_size) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); const uint32_t outer_stride = inner_size * dim_size; const uint32_t dim_stride = inner_size; for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) { const uint32_t outer_offset = outer_index * outer_stride; for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) { const uint32_t data_offset = outer_offset + inner_index; //////////////////////////////////////////////////////////// // These two blocks are really eqivalent, but specializing on // blockDim.x == 1 makes the kernel faster when it's unused. // I didn't want to thread an extra template parameter, and nvcc // seems to be smart enough to hoist the if outside of the loops. //////////////////////////////////////////////////////////// if (blockDim.x > 1) { accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest(); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) { const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]); max_input = Max<accscalar_t>()(max_input, value); } max_input = spatialBlockReduceX<accscalar_t, Max>(sdata,max_input); accscalar_t sum = 0; for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) sum += ::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride]) - max_input); sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]); } else { accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest(); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) { const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]); max_input = Max<accscalar_t>()(max_input, value); } accscalar_t sum = 0; for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) sum += ::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride]) - max_input); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]); } } } } template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __global__ void cunn_SpatialSoftMaxBackward( scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, uint32_t outer_size, uint32_t dim_size, uint32_t inner_size) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); const uint32_t outer_stride = inner_size * dim_size; const uint32_t dim_stride = inner_size; for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) { const uint32_t outer_offset = outer_index * outer_stride; for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) { const uint32_t data_offset = outer_offset + inner_index; // See the comment in forward kernel if (blockDim.x > 1) { accscalar_t sum = 0; for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) sum += gradOutput[data_offset + d * dim_stride]; sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) { gradInput[data_offset + d * dim_stride] = epilogue(gradOutput[data_offset + d * dim_stride], output[data_offset + d * dim_stride]); } } else { accscalar_t sum = 0; for (uint32_t d = 0; d < dim_size; d++) sum += gradOutput[data_offset + d * dim_stride]; Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum); for (uint32_t d = 0; d < dim_size; d++) { gradInput[data_offset + d * dim_stride] = epilogue(gradOutput[data_offset + d * dim_stride], output[data_offset + d * dim_stride]); } } } } } //////////////////////////////////////////////////////////////////////////////// // Regular kernel (fast when dim_size is large; requires inner_size == 1) //////////////////////////////////////////////////////////////////////////////// template <typename T, typename AccumT> struct MaxFloat { __device__ __forceinline__ AccumT operator()(AccumT max, T v) const { return ::max(max, (AccumT)v); } }; template<typename T, typename AccumT> struct AddFloat { __device__ __forceinline__ AccumT operator()(AccumT sum, T v) const { return sum + v; } }; template<typename T, typename AccumT> struct SumExpFloat { __device__ __forceinline__ SumExpFloat(AccumT v) : max_k(v) {} __device__ __forceinline__ AccumT operator()(AccumT sum, T v) const { return sum + ::exp(v - max_k); } const AccumT max_k; }; template <template<typename> class Reduction, typename AccumT> __device__ __forceinline__ AccumT blockReduce(AccumT* smem, AccumT val, const Reduction<AccumT>& r, AccumT defaultVal) { // To avoid RaW races from chaining blockReduce calls together, we need a sync here __syncthreads(); smem[threadIdx.x] = val; __syncthreads(); AccumT warpVal = defaultVal; // First warp will perform per-warp reductions for the remaining warps uint32_t mask = (((uint64_t)1) << (blockDim.x / 32)) - 1; if (threadIdx.x < 32) { int lane = threadIdx.x % 32; if (lane < blockDim.x / 32) { #pragma unroll for (int i = 0; i < 32; ++i) { warpVal = r(warpVal, smem[lane * 32 + i]); } #if TORCH_HIP_VERSION >= 9000 __syncwarp(mask); #endif smem[lane] = warpVal; } } __syncthreads(); // First thread will perform a reduction of the above per-warp reductions AccumT blockVal = defaultVal; if (threadIdx.x == 0) { for (int i = 0; i < blockDim.x / 32; ++i) { blockVal = r(blockVal, smem[i]); } smem[0] = blockVal; } // Sync and broadcast __syncthreads(); return smem[0]; } template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT> __device__ __forceinline__ AccumT ilpReduce(T* data, int size, const Reduction<T, AccumT>& r, AccumT defaultVal) { AccumT threadVal = defaultVal; int offset = threadIdx.x; int last = size % (ILP * blockDim.x); // Body (unroll by ILP times) for (; offset < size - last; offset += blockDim.x * ILP) { T tmp[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) tmp[j] = data[offset + j * blockDim.x]; #pragma unroll for (int j = 0; j < ILP; ++j) threadVal = r(threadVal, tmp[j]); } // Epilogue for (; offset < size; offset += blockDim.x) threadVal = r(threadVal, data[offset]); return threadVal; } template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template <typename, typename, typename> class Epilogue> __global__ void cunn_SoftMaxForward(outscalar_t *output, scalar_t *input, int classes) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); // forward pointers to batch[blockIdx.x] // each block handles a sample in the mini-batch input += blockIdx.x * classes; output += blockIdx.x * classes; // find the max accscalar_t threadMax = ilpReduce<MaxFloat, ILP, scalar_t, accscalar_t>( input, classes, MaxFloat<scalar_t, accscalar_t>(), -at::numeric_limits<accscalar_t>::max()); accscalar_t max_k = blockReduce<Max, accscalar_t>( sdata, threadMax, Max<accscalar_t>(), -at::numeric_limits<accscalar_t>::max()); // reduce all values accscalar_t threadExp = ilpReduce<SumExpFloat, ILP, scalar_t, accscalar_t>( input, classes, SumExpFloat<scalar_t, accscalar_t>(max_k), static_cast<accscalar_t>(0)); accscalar_t sumAll = blockReduce<Add, accscalar_t>( sdata, threadExp, Add<accscalar_t>(), static_cast<accscalar_t>(0)); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_k, sumAll); int offset = threadIdx.x; int last = classes % (ILP * blockDim.x); for (; offset < classes - last; offset += blockDim.x * ILP) { scalar_t tmp[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) tmp[j] = input[offset + j * blockDim.x]; #pragma unroll for (int j = 0; j < ILP; ++j) output[offset + j * blockDim.x] = epilogue(tmp[j]); } for (; offset < classes; offset += blockDim.x) output[offset] = epilogue(input[offset]); } template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __global__ void cunn_SoftMaxBackward(scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, int classes) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); gradInput += blockIdx.x * classes; output += blockIdx.x * classes; gradOutput += blockIdx.x * classes; accscalar_t threadSum = ilpReduce<AddFloat, 4, outscalar_t, accscalar_t>( gradOutput, classes, AddFloat<outscalar_t, accscalar_t>(), accscalar_t(0)); accscalar_t sum_k = blockReduce<Add, accscalar_t>( sdata, threadSum, Add<accscalar_t>(), accscalar_t(0)); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum_k); int offset = threadIdx.x; int last = classes % (ILP * blockDim.x); for (; offset < classes - last; offset += blockDim.x * ILP) { outscalar_t tmpGradOutput[ILP]; outscalar_t tmpOutput[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) { tmpGradOutput[j] = gradOutput[offset + j * blockDim.x]; tmpOutput[j] = output[offset + j * blockDim.x]; } #pragma unroll for (int j = 0; j < ILP; ++j) gradInput[offset + j * blockDim.x] = epilogue(tmpGradOutput[j], tmpOutput[j]); } for (; offset < classes; offset += blockDim.x) gradInput[offset] = epilogue(gradOutput[offset], output[offset]); } template<template<typename, typename, typename> class Epilogue, bool is_log_softmax> Tensor host_softmax(const Tensor & input_, const int64_t dim_, const bool half_to_float){ if (half_to_float) AT_ASSERTM(input_.scalar_type() == ScalarType::Half,"conversion is supported for Half type only"); auto input = input_.contiguous(); Tensor output = half_to_float ? at::empty_like(input, input.options().dtype(ScalarType::Float)) : at::empty_like(input); static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float"); if (input.dim() == 0) input = input.view(1); int64_t dim = maybe_wrap_dim(dim_, input.dim()); TORCH_CHECK(dim >=0 && dim < input.dim(), "dim must be non-negative and less than input dimensions"); int64_t outer_size = 1; int64_t dim_size = input.size(dim); if (input.numel() > 0) { int64_t inner_size = 1; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); for (int64_t i = 0; i < dim; ++i) outer_size *= input.size(i); for (int64_t i = dim + 1; i < input.dim(); ++i) inner_size *= input.size(i); // This kernel spawns a block per each element in the batch. // XXX: it assumes that inner_size == 1 if (inner_size == 1) { const int ILP = 2; dim3 grid(outer_size); dim3 block = SoftMax_getBlockSize(ILP, dim_size); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "host_softmax", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_forward<scalar_t, scalar_t, accscalar_t, is_log_softmax>( output.data<scalar_t>(), input.data<scalar_t>(), dim_size, dim_size, outer_size); } else { hipLaunchKernelGGL(( cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue>) , dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream, output.data<scalar_t>(), input.data<scalar_t>(), dim_size ); } } else { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_forward<scalar_t, accscalar_t, accscalar_t, is_log_softmax>( output.data<accscalar_t>(), input.data<scalar_t>(), dim_size, dim_size, outer_size); } else { hipLaunchKernelGGL(( cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue>) , dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream, output.data<accscalar_t>(), input.data<scalar_t>(), dim_size ); } } }); // This kernel runs in a 2D grid, where each application along y dimension has a fixed // outer_size, and runs in parallel over inner_size. Dimension x is parallel over outer_size. // Reductions over dim are done in a single-threaded manner. } else { uint32_t smem_size; dim3 grid, block; AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "host_softmax", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); hipLaunchKernelGGL(( cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>) , dim3(grid), dim3(block), smem_size, stream, output.data<scalar_t>(), input.data<scalar_t>(), outer_size, dim_size, inner_size ); } else { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); hipLaunchKernelGGL(( cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>) , dim3(grid), dim3(block), smem_size, stream, output.data<accscalar_t>(), input.data<scalar_t>(), outer_size, dim_size, inner_size ); } }); } THCudaCheck(hipGetLastError()); } return output; } template<template<typename, typename, typename> class Epilogue, bool is_log_softmax> Tensor host_softmax_backward(const Tensor &grad_, const Tensor &output_, int64_t dim_, bool half_to_float){ int64_t dim = maybe_wrap_dim(dim_, grad_.dim()); Tensor gI = half_to_float ? at::empty_like(grad_, grad_.options().dtype(ScalarType::Half)) : at::empty_like(grad_); if (grad_.numel() == 0) { return gI; } auto grad = grad_.contiguous(); static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float"); if (grad.dim() == 0) grad = grad.view(1); TORCH_CHECK(dim >=0 && dim < grad.dim(), "dim must be non-negative and less than input dimensions"); auto output = output_.contiguous(); if (output.dim() == 0) output = output.view(1); int64_t outer_size = 1; int64_t dim_size = output.size(dim); int64_t inner_size = 1; for (int64_t i = 0; i < dim; ++i) outer_size *= output.size(i); for (int64_t i = dim + 1; i < output.dim(); ++i) inner_size *= output.size(i); // See descriptions of kernels above. hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (inner_size == 1) { const int ILP = 2; dim3 grid(outer_size); dim3 block = SoftMax_getBlockSize(ILP, dim_size); AT_DISPATCH_FLOATING_TYPES_AND_HALF(gI.scalar_type(), "host_softmax_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_backward<scalar_t, scalar_t, accscalar_t, is_log_softmax>( gI.data<scalar_t>(), grad.data<scalar_t>(), output.data<scalar_t>(), dim_size, dim_size, outer_size); } else { hipLaunchKernelGGL(( cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue>) , dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream, gI.data<scalar_t>(), output.data<scalar_t>(), grad.data<scalar_t>(), dim_size ); } } else { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_backward<accscalar_t, scalar_t, accscalar_t, is_log_softmax>( gI.data<scalar_t>(), grad.data<accscalar_t>(), output.data<accscalar_t>(), dim_size, dim_size, outer_size); } else { hipLaunchKernelGGL(( cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue>) , dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream, gI.data<scalar_t>(), output.data<accscalar_t>(), grad.data<accscalar_t>(), dim_size ); } } }); } else { uint32_t smem_size; dim3 grid, block; AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "host_softmax_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); hipLaunchKernelGGL(( cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>) , dim3(grid), dim3(block), smem_size, stream, gI.data<scalar_t>(), output.data<scalar_t>(), grad.data<scalar_t>(), outer_size, dim_size, inner_size ); } else { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); hipLaunchKernelGGL(( cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>) , dim3(grid), dim3(block), smem_size, stream, gI.data<scalar_t>(), output.data<accscalar_t>(), grad.data<accscalar_t>(), outer_size, dim_size, inner_size ); } }); } THCudaCheck(hipGetLastError()); return gI; } } Tensor log_softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){ return host_softmax<LogSoftMaxForwardEpilogue,true>(input, dim, half_to_float); } Tensor log_softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){ bool half_to_float = grad.scalar_type() != input.scalar_type(); if (half_to_float) { AT_ASSERTM((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float"); } return host_softmax_backward<LogSoftMaxBackwardEpilogue,true>(grad, output, dim, half_to_float); } Tensor softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){ return host_softmax<SoftMaxForwardEpilogue,false>(input, dim, half_to_float); } Tensor softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){ bool half_to_float = grad.scalar_type() != input.scalar_type(); if (half_to_float) { AT_ASSERTM((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float"); } Tensor tmp = grad * output; return host_softmax_backward<SoftMaxBackwardEpilogue,false>(tmp, output, dim, half_to_float); } } }
6263ecfb208dfd3dcb1428261bf6474b3bc6bc19.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/TensorUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/WrapDimUtils.h> #include <THC/THCTensorMathReduce.cuh> #include <THC/THCTensorSort.cuh> #include <THC/THCThrustAllocator.cuh> #include <ATen/AccumulateType.h> #include <ATen/cuda/NumericLimits.cuh> #include <type_traits> #include <ATen/native/cuda/PersistentSoftmax.cuh> namespace at { namespace native { namespace { template<typename T, typename AccumT, typename OutT> struct LogSoftMaxForwardEpilogue { __device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_input, AccumT sum) : logsum(max_input + std::log(sum)) {} __device__ __forceinline__ OutT operator()(T input) const { return static_cast<OutT>(input - logsum); } const AccumT logsum; }; template<typename T, typename AccumT, typename OutT> struct LogSoftMaxBackwardEpilogue { __device__ __forceinline__ LogSoftMaxBackwardEpilogue(AccumT sum) : sum(sum) {} __device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const { return static_cast<T>(gradOutput - std::exp(static_cast<AccumT>(output)) * sum); } const AccumT sum; }; template<typename T, typename AccumT, typename OutT> struct SoftMaxForwardEpilogue { __device__ __forceinline__ SoftMaxForwardEpilogue(AccumT max_input, AccumT sum) : max_input(max_input) , sum(sum) {} __device__ __forceinline__ OutT operator()(T input) const { return static_cast<OutT>(std::exp(input - max_input) / sum); } const AccumT max_input; const AccumT sum; }; template<typename T, typename AccumT, typename OutT> struct SoftMaxBackwardEpilogue { __device__ __forceinline__ SoftMaxBackwardEpilogue(AccumT sum) : sum(sum) {} // XXX: gradOutput that we get here is really gradOutput * output // Look for cmul in SoftMax_updateGradInput __device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const { return static_cast<T>(gradOutput - output * sum); } const AccumT sum; }; //////////////////////////////////////////////////////////////////////////////// // Spatial kernel (fast with large inner_size and small dim_size) //////////////////////////////////////////////////////////////////////////////// // Let's assume that our input has been flattened to have only three dimension: // outer x dim x inner // The spatial algorithm tries to paralellize along all of them. // Within a 2d block threadIdx.y paralellizes over dim slices, and threads that // share it will speed up reductions over dim (along axis x). // The 2d grid is used to paralellize inner dimension over y axis and outer over x. inline dim3 SpatialSoftMax_getGridSize( dim3 block, uint32_t max_active_blocks, uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) { // First, tile as many blocks as we can over the y axis uint32_t inner_blocks = (inner_size + block.y - 1) / block.y; if (inner_blocks > max_active_blocks) inner_blocks = max_active_blocks; // Fill the x axis with as many blocks as we can fit (a little more is ok too) uint32_t outer_blocks = (max_active_blocks + inner_blocks - 1) / inner_blocks; if (outer_blocks > outer_size) outer_blocks = outer_size; return dim3(outer_blocks, inner_blocks); } const int max_threads = 1024; inline dim3 SpatialSoftMax_getBlockSize( uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) { uint32_t inner_threads = inner_size; inner_threads = std::min(inner_threads, static_cast<uint32_t>(max_threads)); uint32_t dim_threads = 1; if (inner_threads <= 64 && dim_size >= 64) { while (inner_threads * dim_threads <= max_threads && dim_threads <= dim_size) dim_threads *= 2; dim_threads /= 2; } return dim3(dim_threads, inner_threads); } template<typename accscalar_t, typename Kernel> void SpatialSoftMax_getLaunchSizes( Kernel k, uint64_t outer_size, uint64_t dim_size, uint64_t inner_size, dim3& grid, dim3& block, uint32_t& smem_size) { block = SpatialSoftMax_getBlockSize(outer_size, dim_size, inner_size); uint32_t block_threads = block.x * block.y; smem_size = block.x == 1 ? 0 : block_threads * sizeof(accscalar_t); int max_active_blocks; #ifdef __HIP_PLATFORM_HCC__ max_active_blocks = 16; #else cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_active_blocks, k, block_threads, smem_size); #endif max_active_blocks *= at::cuda::getCurrentDeviceProperties()->multiProcessorCount; grid = SpatialSoftMax_getGridSize(block, max_active_blocks, outer_size, dim_size, inner_size); } inline dim3 SoftMax_getBlockSize(int ILP, uint64_t dim_size) { uint64_t block_size = 1; uint64_t max_block_size = std::min(dim_size / ILP, static_cast<uint64_t>(max_threads)); while (block_size < max_block_size) block_size *= 2; // Launch at least a single warp - the kernel assumes that. block_size = std::max(block_size, static_cast<uint64_t>(32)); return dim3(block_size); } template<typename T> struct Add { __device__ __forceinline__ T operator()(T a, T b) const { return a + b; } }; template<typename T> struct Max { __device__ __forceinline__ T operator()(T a, T b) const { return a < b ? b : a; } }; // Note that it's not a complete block-wide reduction. // Only threads that share threadIdx.y reduce values. template<typename T, template<typename> class ReduceOp> __forceinline__ __device__ T spatialBlockReduceX(T *shared, T val) { ReduceOp<T> r; shared += threadIdx.y * blockDim.x; __syncthreads(); shared[threadIdx.x] = val; // NOTE: loop starts with __syncthreads() int offset = blockDim.x / 2; while (offset > 0) { __syncthreads(); if (threadIdx.x < offset) shared[threadIdx.x] = r(shared[threadIdx.x], shared[threadIdx.x + offset]); offset /= 2; } __syncthreads(); return shared[0]; } template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __global__ void cunn_SpatialSoftMaxForward( outscalar_t *output, scalar_t *input, uint32_t outer_size, uint32_t dim_size, uint32_t inner_size) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); const uint32_t outer_stride = inner_size * dim_size; const uint32_t dim_stride = inner_size; for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) { const uint32_t outer_offset = outer_index * outer_stride; for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) { const uint32_t data_offset = outer_offset + inner_index; //////////////////////////////////////////////////////////// // These two blocks are really eqivalent, but specializing on // blockDim.x == 1 makes the kernel faster when it's unused. // I didn't want to thread an extra template parameter, and nvcc // seems to be smart enough to hoist the if outside of the loops. //////////////////////////////////////////////////////////// if (blockDim.x > 1) { accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest(); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) { const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]); max_input = Max<accscalar_t>()(max_input, value); } max_input = spatialBlockReduceX<accscalar_t, Max>(sdata,max_input); accscalar_t sum = 0; for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) sum += std::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride]) - max_input); sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]); } else { accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest(); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) { const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]); max_input = Max<accscalar_t>()(max_input, value); } accscalar_t sum = 0; for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) sum += std::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride]) - max_input); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]); } } } } template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __global__ void cunn_SpatialSoftMaxBackward( scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, uint32_t outer_size, uint32_t dim_size, uint32_t inner_size) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); const uint32_t outer_stride = inner_size * dim_size; const uint32_t dim_stride = inner_size; for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) { const uint32_t outer_offset = outer_index * outer_stride; for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) { const uint32_t data_offset = outer_offset + inner_index; // See the comment in forward kernel if (blockDim.x > 1) { accscalar_t sum = 0; for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) sum += gradOutput[data_offset + d * dim_stride]; sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) { gradInput[data_offset + d * dim_stride] = epilogue(gradOutput[data_offset + d * dim_stride], output[data_offset + d * dim_stride]); } } else { accscalar_t sum = 0; for (uint32_t d = 0; d < dim_size; d++) sum += gradOutput[data_offset + d * dim_stride]; Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum); for (uint32_t d = 0; d < dim_size; d++) { gradInput[data_offset + d * dim_stride] = epilogue(gradOutput[data_offset + d * dim_stride], output[data_offset + d * dim_stride]); } } } } } //////////////////////////////////////////////////////////////////////////////// // Regular kernel (fast when dim_size is large; requires inner_size == 1) //////////////////////////////////////////////////////////////////////////////// template <typename T, typename AccumT> struct MaxFloat { __device__ __forceinline__ AccumT operator()(AccumT max, T v) const { return ::max(max, (AccumT)v); } }; template<typename T, typename AccumT> struct AddFloat { __device__ __forceinline__ AccumT operator()(AccumT sum, T v) const { return sum + v; } }; template<typename T, typename AccumT> struct SumExpFloat { __device__ __forceinline__ SumExpFloat(AccumT v) : max_k(v) {} __device__ __forceinline__ AccumT operator()(AccumT sum, T v) const { return sum + std::exp(v - max_k); } const AccumT max_k; }; template <template<typename> class Reduction, typename AccumT> __device__ __forceinline__ AccumT blockReduce(AccumT* smem, AccumT val, const Reduction<AccumT>& r, AccumT defaultVal) { // To avoid RaW races from chaining blockReduce calls together, we need a sync here __syncthreads(); smem[threadIdx.x] = val; __syncthreads(); AccumT warpVal = defaultVal; // First warp will perform per-warp reductions for the remaining warps uint32_t mask = (((uint64_t)1) << (blockDim.x / 32)) - 1; if (threadIdx.x < 32) { int lane = threadIdx.x % 32; if (lane < blockDim.x / 32) { #pragma unroll for (int i = 0; i < 32; ++i) { warpVal = r(warpVal, smem[lane * 32 + i]); } #if CUDA_VERSION >= 9000 __syncwarp(mask); #endif smem[lane] = warpVal; } } __syncthreads(); // First thread will perform a reduction of the above per-warp reductions AccumT blockVal = defaultVal; if (threadIdx.x == 0) { for (int i = 0; i < blockDim.x / 32; ++i) { blockVal = r(blockVal, smem[i]); } smem[0] = blockVal; } // Sync and broadcast __syncthreads(); return smem[0]; } template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT> __device__ __forceinline__ AccumT ilpReduce(T* data, int size, const Reduction<T, AccumT>& r, AccumT defaultVal) { AccumT threadVal = defaultVal; int offset = threadIdx.x; int last = size % (ILP * blockDim.x); // Body (unroll by ILP times) for (; offset < size - last; offset += blockDim.x * ILP) { T tmp[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) tmp[j] = data[offset + j * blockDim.x]; #pragma unroll for (int j = 0; j < ILP; ++j) threadVal = r(threadVal, tmp[j]); } // Epilogue for (; offset < size; offset += blockDim.x) threadVal = r(threadVal, data[offset]); return threadVal; } template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template <typename, typename, typename> class Epilogue> __global__ void cunn_SoftMaxForward(outscalar_t *output, scalar_t *input, int classes) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); // forward pointers to batch[blockIdx.x] // each block handles a sample in the mini-batch input += blockIdx.x * classes; output += blockIdx.x * classes; // find the max accscalar_t threadMax = ilpReduce<MaxFloat, ILP, scalar_t, accscalar_t>( input, classes, MaxFloat<scalar_t, accscalar_t>(), -at::numeric_limits<accscalar_t>::max()); accscalar_t max_k = blockReduce<Max, accscalar_t>( sdata, threadMax, Max<accscalar_t>(), -at::numeric_limits<accscalar_t>::max()); // reduce all values accscalar_t threadExp = ilpReduce<SumExpFloat, ILP, scalar_t, accscalar_t>( input, classes, SumExpFloat<scalar_t, accscalar_t>(max_k), static_cast<accscalar_t>(0)); accscalar_t sumAll = blockReduce<Add, accscalar_t>( sdata, threadExp, Add<accscalar_t>(), static_cast<accscalar_t>(0)); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_k, sumAll); int offset = threadIdx.x; int last = classes % (ILP * blockDim.x); for (; offset < classes - last; offset += blockDim.x * ILP) { scalar_t tmp[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) tmp[j] = input[offset + j * blockDim.x]; #pragma unroll for (int j = 0; j < ILP; ++j) output[offset + j * blockDim.x] = epilogue(tmp[j]); } for (; offset < classes; offset += blockDim.x) output[offset] = epilogue(input[offset]); } template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __global__ void cunn_SoftMaxBackward(scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, int classes) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); gradInput += blockIdx.x * classes; output += blockIdx.x * classes; gradOutput += blockIdx.x * classes; accscalar_t threadSum = ilpReduce<AddFloat, 4, outscalar_t, accscalar_t>( gradOutput, classes, AddFloat<outscalar_t, accscalar_t>(), accscalar_t(0)); accscalar_t sum_k = blockReduce<Add, accscalar_t>( sdata, threadSum, Add<accscalar_t>(), accscalar_t(0)); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum_k); int offset = threadIdx.x; int last = classes % (ILP * blockDim.x); for (; offset < classes - last; offset += blockDim.x * ILP) { outscalar_t tmpGradOutput[ILP]; outscalar_t tmpOutput[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) { tmpGradOutput[j] = gradOutput[offset + j * blockDim.x]; tmpOutput[j] = output[offset + j * blockDim.x]; } #pragma unroll for (int j = 0; j < ILP; ++j) gradInput[offset + j * blockDim.x] = epilogue(tmpGradOutput[j], tmpOutput[j]); } for (; offset < classes; offset += blockDim.x) gradInput[offset] = epilogue(gradOutput[offset], output[offset]); } template<template<typename, typename, typename> class Epilogue, bool is_log_softmax> Tensor host_softmax(const Tensor & input_, const int64_t dim_, const bool half_to_float){ if (half_to_float) AT_ASSERTM(input_.scalar_type() == ScalarType::Half,"conversion is supported for Half type only"); auto input = input_.contiguous(); Tensor output = half_to_float ? at::empty_like(input, input.options().dtype(ScalarType::Float)) : at::empty_like(input); static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float"); if (input.dim() == 0) input = input.view(1); int64_t dim = maybe_wrap_dim(dim_, input.dim()); TORCH_CHECK(dim >=0 && dim < input.dim(), "dim must be non-negative and less than input dimensions"); int64_t outer_size = 1; int64_t dim_size = input.size(dim); if (input.numel() > 0) { int64_t inner_size = 1; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); for (int64_t i = 0; i < dim; ++i) outer_size *= input.size(i); for (int64_t i = dim + 1; i < input.dim(); ++i) inner_size *= input.size(i); // This kernel spawns a block per each element in the batch. // XXX: it assumes that inner_size == 1 if (inner_size == 1) { const int ILP = 2; dim3 grid(outer_size); dim3 block = SoftMax_getBlockSize(ILP, dim_size); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "host_softmax", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_forward<scalar_t, scalar_t, accscalar_t, is_log_softmax>( output.data<scalar_t>(), input.data<scalar_t>(), dim_size, dim_size, outer_size); } else { cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue> <<<grid, block, block.x * sizeof(accscalar_t), stream>>>( output.data<scalar_t>(), input.data<scalar_t>(), dim_size ); } } else { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_forward<scalar_t, accscalar_t, accscalar_t, is_log_softmax>( output.data<accscalar_t>(), input.data<scalar_t>(), dim_size, dim_size, outer_size); } else { cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue> <<<grid, block, block.x * sizeof(accscalar_t), stream>>>( output.data<accscalar_t>(), input.data<scalar_t>(), dim_size ); } } }); // This kernel runs in a 2D grid, where each application along y dimension has a fixed // outer_size, and runs in parallel over inner_size. Dimension x is parallel over outer_size. // Reductions over dim are done in a single-threaded manner. } else { uint32_t smem_size; dim3 grid, block; AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "host_softmax", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue> <<<grid, block, smem_size, stream>>>( output.data<scalar_t>(), input.data<scalar_t>(), outer_size, dim_size, inner_size ); } else { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue> <<<grid, block, smem_size, stream>>>( output.data<accscalar_t>(), input.data<scalar_t>(), outer_size, dim_size, inner_size ); } }); } THCudaCheck(cudaGetLastError()); } return output; } template<template<typename, typename, typename> class Epilogue, bool is_log_softmax> Tensor host_softmax_backward(const Tensor &grad_, const Tensor &output_, int64_t dim_, bool half_to_float){ int64_t dim = maybe_wrap_dim(dim_, grad_.dim()); Tensor gI = half_to_float ? at::empty_like(grad_, grad_.options().dtype(ScalarType::Half)) : at::empty_like(grad_); if (grad_.numel() == 0) { return gI; } auto grad = grad_.contiguous(); static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float"); if (grad.dim() == 0) grad = grad.view(1); TORCH_CHECK(dim >=0 && dim < grad.dim(), "dim must be non-negative and less than input dimensions"); auto output = output_.contiguous(); if (output.dim() == 0) output = output.view(1); int64_t outer_size = 1; int64_t dim_size = output.size(dim); int64_t inner_size = 1; for (int64_t i = 0; i < dim; ++i) outer_size *= output.size(i); for (int64_t i = dim + 1; i < output.dim(); ++i) inner_size *= output.size(i); // See descriptions of kernels above. cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (inner_size == 1) { const int ILP = 2; dim3 grid(outer_size); dim3 block = SoftMax_getBlockSize(ILP, dim_size); AT_DISPATCH_FLOATING_TYPES_AND_HALF(gI.scalar_type(), "host_softmax_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_backward<scalar_t, scalar_t, accscalar_t, is_log_softmax>( gI.data<scalar_t>(), grad.data<scalar_t>(), output.data<scalar_t>(), dim_size, dim_size, outer_size); } else { cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue> <<<grid, block, block.x * sizeof(accscalar_t), stream>>>( gI.data<scalar_t>(), output.data<scalar_t>(), grad.data<scalar_t>(), dim_size ); } } else { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_backward<accscalar_t, scalar_t, accscalar_t, is_log_softmax>( gI.data<scalar_t>(), grad.data<accscalar_t>(), output.data<accscalar_t>(), dim_size, dim_size, outer_size); } else { cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue> <<<grid, block, block.x * sizeof(accscalar_t), stream>>>( gI.data<scalar_t>(), output.data<accscalar_t>(), grad.data<accscalar_t>(), dim_size ); } } }); } else { uint32_t smem_size; dim3 grid, block; AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "host_softmax_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue> <<<grid, block, smem_size, stream>>>( gI.data<scalar_t>(), output.data<scalar_t>(), grad.data<scalar_t>(), outer_size, dim_size, inner_size ); } else { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue> <<<grid, block, smem_size, stream>>>( gI.data<scalar_t>(), output.data<accscalar_t>(), grad.data<accscalar_t>(), outer_size, dim_size, inner_size ); } }); } THCudaCheck(cudaGetLastError()); return gI; } } Tensor log_softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){ return host_softmax<LogSoftMaxForwardEpilogue,true>(input, dim, half_to_float); } Tensor log_softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){ bool half_to_float = grad.scalar_type() != input.scalar_type(); if (half_to_float) { AT_ASSERTM((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float"); } return host_softmax_backward<LogSoftMaxBackwardEpilogue,true>(grad, output, dim, half_to_float); } Tensor softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){ return host_softmax<SoftMaxForwardEpilogue,false>(input, dim, half_to_float); } Tensor softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){ bool half_to_float = grad.scalar_type() != input.scalar_type(); if (half_to_float) { AT_ASSERTM((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float"); } Tensor tmp = grad * output; return host_softmax_backward<SoftMaxBackwardEpilogue,false>(tmp, output, dim, half_to_float); } } }
ef6f9cbef82006e8eca7beaf43e9967843c58a6f.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #include "../NativeOps.h" #include <hip/hip_runtime.h> #include <cuda_launch_config.h> #include <buffer.h> #include <helpers/shape.h> #include "../Environment.h" #include <helpers/TAD.h> #include <ops/specials.h> #include <loops/reduce3.h> #include <loops/indexreduce.h> #include <loops/summarystatsreduce.h> #include <loops/random.h> #include <loops/broadcasting.h> #include <loops/broadcasting_bool.h> #include <loops/scalar.h> #include <loops/scalar_bool.h> #include <loops/pairwise_transform.h> #include <loops/pairwise_bool.h> #include <loops/transform_same.h> #include <loops/transform_float.h> #include <loops/transform_strict.h> #include <loops/transform_bool.h> #include <loops/transform_any.h> #include <loops/reduce_float.h> #include <loops/reduce_same.h> #include <loops/reduce_bool.h> #include <loops/reduce_long.h> //#include <thread> #include <map> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <cuda_device_runtime_api.h> #include <pointercast.h> #include <stdio.h> #include <stdlib.h> #include <loops/type_conversions.h> #include <op_boilerplate.h> #include <loops/aggregates.h> #include <helpers/threshold.h> #include <ShapeList.h> #include <Context.h> #include <ops/specials_cuda.h> #include <graph/exceptions/datatype_exception.h> #include <helpers/CudaLaunchHelper.h> // FIXME: we need cuda-specific implementations #include <helpers/logger.h> #include <NDArray.h> #include <GraphExecutioner.h> #include <graph/GraphHolder.h> #include <graph/VariablesSet.h> #include <ops/declarable/OpRegistrator.h> #include <ops/declarable/CustomOperations.h> //#include <sys/time.h> // b40c only available for gcc :( #ifdef __clang__ // do nothing #elif __GNUC__ #include <b40c/util/error_utils.cuh> #include <b40c/util/multiple_buffering.cuh> #include <b40c/radix_sort/enactor.cuh> #endif #include <hiprand/hiprand.h> #include <Status.h> #include <helpers/DebugHelper.h> using namespace nd4j; #include <loops/special_kernels.h> hipDeviceProp_t *deviceProperties; hipFuncAttributes *funcAttributes = new hipFuncAttributes[64]; int blockLimit = 128; int maxThreads = 512; bool allowedP2P = false; bool supportedP2P = false; #ifdef __EXPERIMENTAL__ bool experimentalSupport = true; #else bool experimentalSupport = false; #endif int minThreads = 32; __constant__ char deviceConstantMemory[49152]; typedef struct { long streamId; long callId; } __syncInfo; typedef __syncInfo SyncInfo; /** * This is utility kernel, that updates given special buffer with proper values in device memory */ extern "C" __global__ void prepareShapeBuffer(int *dimension, int *maxDimension, Nd4jLong *specialPointer, int rows, nd4j::DataType dataType) { Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid > 0) return; dimension[0] = 0; maxDimension[0] = 1; specialPointer[0] = 2; specialPointer[1] = rows; specialPointer[2] = 1; specialPointer[3] = 1; specialPointer[4] = 1; specialPointer[5] = 0; specialPointer[6] = 1; specialPointer[7] = 99; ArrayOptions::setDataType(specialPointer, dataType); //printf("special[0]: [%lld]\n", (long long) specialPointer[0]); //shape::printShapeInfoLinear("prepareShapeBuffer", specialPointer); } // this method isn't used, left here for legacy and caution purposes // TLDR: don't use this way, it sucks void CUDART_CB syncCallback(hipStream_t stream, hipError_t status, void *data){ SyncInfo *sync = reinterpret_cast<SyncInfo *>(data); //printf("Finished stream: [%i], kernel call: [%i]\n", sync->streamId, sync->callId); } // this method just does type conversion in fancy way int getDeviceId(Nd4jPointer ptrToDeviceId) { return (int)(Nd4jLong)ptrToDeviceId; } template <typename T> dim3 getOptimalDimensions(Nd4jLong n,hipFuncAttributes attributes, hipDeviceProp_t properties) { // we can combine the two to compute a block size int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties); // no real sense launching more threads, then number of elements we have if (num_threads > n) num_threads = n; if (maxThreads > 0 && num_threads > maxThreads) num_threads = maxThreads; // compute the number of blocks of size num_threads to launch int num_blocks = n / num_threads; // check for partial block at the end if (num_blocks > blockLimit) num_blocks = blockLimit; if (num_blocks < 4 && n > 128) { num_blocks = 4; num_threads = n / num_blocks; } if (num_threads >= 768) { num_blocks = num_blocks * 2; num_threads = num_threads / 2; } if(n % num_threads && num_blocks < blockLimit) ++num_blocks; //(num_threads * sizeof(T)) + attributes.sharedSizeBytes); return dim3(num_blocks,num_threads, 3000); } int getBaseMemorySize(int xRank, hipFuncAttributes funcAttr) { int memory_limit = 256; //funcAttr.sharedSizeBytes; // TODO: remove this later memory_limit += sizeof(UnifiedSharedMemory) + 32; // sizeof(shape::TAD) + (xRank * 4 * 4) /* if (xRank == 0) xRank = 2; memory_limit += (xRank * 2 + 4) * 3 * 4; // we reserve memory for xShape + T1/T2 shapes memory_limit += yRank == 0 ? 0 : (yRank * 2 + 4) * 4; memory_limit += zRank == 0 ? 0 : (zRank * 2 + 4) * 4; memory_limit += (xRank * 4) * 6; memory_limit += MAX_RANK * 4; // special case, needed roughtly in one pase */ return memory_limit; } /* * Basic CUDA constants here: number of blocks per MP */ int getDeviceBlockThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; int blockThreshold = 8; if (ccMajor >= 5) blockThreshold = 32; else if (ccMajor == 3) blockThreshold = 16; else if (ccMajor < 3) blockThreshold = 8; return blockThreshold; } dim3 getBasicLaunchParams(int deviceId, long problemLength, int sharedMemoryPerThread, hipFuncAttributes funcAttr) { int countMP = deviceProperties[deviceId].multiProcessorCount; int blockThreshold = getDeviceBlockThreshold(deviceId); int num_threads = problemLength / (countMP * blockThreshold); num_threads = nd4j::math::nd4j_min<int>(num_threads, maxThreads); num_threads = nd4j::math::nd4j_max<int>(num_threads, 64); num_threads = nd4j::math::nd4j_max<int>(num_threads, minThreads); int num_blocks = nd4j::math::nd4j_max<int>(problemLength / num_threads, 1); num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); int memory_limit = (sharedMemoryPerThread * num_threads) + getBaseMemorySize(1, funcAttr); dim3 launchDims = dim3(num_blocks, num_threads, memory_limit); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Preliminary basic launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i]\n", num_blocks, num_threads, memory_limit); return launchDims; } /* * This message returns shared memory threshold value. default overflow ratio is 0.3 */ int getDeviceSharedThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; // please note threshold isn't multiple of 32, and that's NOT a mistake int shmemThreshold; if (ccMajor == 6 && ccMinor == 0) shmemThreshold = 65536; else if (ccMajor == 6 && ccMinor == 1) shmemThreshold = 49152; else if (ccMajor == 5 && ccMinor == 2) shmemThreshold = 98304; else if (ccMajor == 5) shmemThreshold = 65536; else if (ccMajor == 3 && ccMinor == 7) shmemThreshold = 114688; else shmemThreshold = 49152; return shmemThreshold / 0.3; } dim3 getBetterDimensions(int deviceId, int numTads, int tadLength, int xRank, hipFuncAttributes funcAttr, int dimensionLength, int elementSize, int reduction) { int num_threads = nd4j::math::nd4j_min<int>(tadLength, maxThreads); int countMP = deviceProperties[deviceId].multiProcessorCount; int regPerBlock = deviceProperties[deviceId].regsPerBlock; int warpSize = deviceProperties[deviceId].warpSize; int blockThreshold = getDeviceBlockThreshold(deviceId); int shmemThreshold = getDeviceSharedThreshold(deviceId); // round num_threads to nearest warpSize num_threads -= num_threads % warpSize; num_threads = nd4j::math::nd4j_max<int>(1, num_threads); if (num_threads < warpSize && tadLength < warpSize) num_threads = tadLength; // since we use shared memory as fast memory for some cases - we need to count that in int memory_limit = getBaseMemorySize(xRank, funcAttr); int memory_floor = memory_limit; int effective_block_limit = countMP * blockThreshold; int num_blocks = numTads; //nd4j::math::nd4j_min<int>(numTads, effective_block_limit); int desiredShared = shmemThreshold / nd4j::math::nd4j_max<int>((num_blocks / countMP), 1); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Launch context: numBlocks: [%i], numThreads: [%i], countMap: [%i], shmemThreshold: [%i], desiredShared: [%i], elementSize: [%i]\n", num_blocks, num_threads, countMP, shmemThreshold, desiredShared, elementSize); // at this moment we've stored all required information for things. time to count in reduction multipliers int reduction_per_block = 0; bool found = false; if (reduction > 0) while (!found) { reduction_per_block = (num_threads * elementSize * reduction); if (memory_limit + reduction_per_block < desiredShared) { memory_limit += reduction_per_block; found = true; } else { if (num_threads > minThreads) { num_threads -= 32; } else { memory_limit += reduction_per_block; found = true; } } } // at this moment we know total memory used per block, and we also know per-mp limit. int max_active_blocks = shmemThreshold / nd4j::math::nd4j_max<int>(memory_limit, 1); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("MAB: [%i], memory_floor: [%i], memory_limit: [%i], reductionPerBlock: [%i]\n", max_active_blocks, memory_floor, memory_limit, reduction_per_block); // we don't want to spawn more blocks, that gpu can actually handle without queue //num_blocks = nd4j::math::nd4j_min<int>(num_blocks, max_active_blocks); num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); // if (num_blocks > countMP) // num_blocks = num_blocks - (num_blocks % countMP); num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1); int targetBlocksPerMP = num_blocks / countMP; // now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM if (targetBlocksPerMP * num_threads > 2048) { while (targetBlocksPerMP * num_threads > 2048) { if (num_threads <= minThreads) break; num_threads -= 32; } reduction_per_block = (num_threads * elementSize * reduction); memory_limit = memory_floor + reduction_per_block; } if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Preliminary reduce launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], reduction_per_block: [%i], blocksPerMP: [%i]\n", num_blocks, num_threads, memory_limit, reduction_per_block, targetBlocksPerMP); return dim3(num_blocks,num_threads, memory_limit); } /* * This method returns kernel launch param for linear memory access */ dim3 getFlatLaunchParams(int deviceId, Nd4jLong *dXShapeInfo, Nd4jLong *dYShapeInfo, hipFuncAttributes funcAttr) { auto xRank = shape::rank(dXShapeInfo); auto yRank = dYShapeInfo == nullptr ? 0 : shape::rank(dYShapeInfo); auto zRank = 0; int memory_limit = getBaseMemorySize(xRank, funcAttr); int countMP = deviceProperties[deviceId].multiProcessorCount; int regPerBlock = deviceProperties[deviceId].regsPerBlock; int blockThreshold = getDeviceBlockThreshold(deviceId); int shmemThreshold = getDeviceSharedThreshold(deviceId); auto xLength = shape::length(dXShapeInfo); int effective_block_limit = countMP * blockThreshold; // for flat calls we just want as much concurrent blocks, as possible, and we're not tied to TAD here int num_threads = xLength / effective_block_limit; if (num_threads < minThreads) num_threads = minThreads; num_threads = num_threads - (num_threads % 32); int memory_floor = memory_limit; int num_blocks = xLength / num_threads; num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); // num_blocks = nd4j::math::nd4j_min<int>(num_blocks, effective_block_limit); num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1); int targetBlocksPerMP = num_blocks / countMP; // now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM if (targetBlocksPerMP * num_threads > 2048 && num_threads >= 128) { while (targetBlocksPerMP * num_threads > 2048) { if (num_threads <= minThreads) break; num_threads -= 32; } } if (xLength / num_threads > blockLimit) num_blocks *= 2; dim3 launchDims = dim3(num_blocks, num_threads, memory_limit); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Preliminary scalar launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], blocksPerMP: [%i], problemLength: [%i], effectiveBlockLimit: [%i]\n", num_blocks, num_threads, memory_limit, targetBlocksPerMP, xLength, effective_block_limit); return launchDims; } /** * This method returns kernel launch params with TAD-based memory access * * @param deviceId * @param dXShapeInfo * @param tadShapeInfo * @param funcAttr * @param dimensionLength * @param elementSize * @param reductionSize * @return */ dim3 getReduceLaunchParams(int deviceId, Nd4jLong *dXShapeInfo, Nd4jLong *tadShapeInfo, hipFuncAttributes funcAttr, int dimensionLength, int elementSize, int reductionSize) { Nd4jLong tadLength = 0; Nd4jLong numTads = 0; if (tadShapeInfo != nullptr) { tadLength = shape::length(tadShapeInfo); numTads = shape::length(dXShapeInfo) / tadLength; if (tadLength == 1) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("A xLength: [%i], zLength: [%i]\n", shape::length(dXShapeInfo), shape::length(tadShapeInfo)); } } else{ // we have special case - reduction along all dimensions tadLength = nd4j::math::nd4j_min<int>(shape::length(dXShapeInfo), 768); numTads = shape::length(dXShapeInfo) / tadLength; } auto xRank = shape::rank(dXShapeInfo); int zRank = tadShapeInfo == nullptr ? 0 : shape::rank(tadShapeInfo); dim3 launchDims = getBetterDimensions(deviceId, numTads, tadLength, xRank, funcAttr, dimensionLength, elementSize, reductionSize); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) { //|| launchDims.dX == 1 printf("Reduce LaunchParams: xLength: [%i], numTads: [%i], tadLength: [%i], launchDims.dX: [%i], launchDims.dY: [%i], launchDims.dZ: [%i]\n", shape::length(dXShapeInfo), numTads, tadLength, launchDims.x, launchDims.y, launchDims.z); } return launchDims; } /** * Returns optimal launch parameters * given the extra pointers passed in. * The extra pointer should be * the host pointer for the shape information * associated with the data. * From there it is used to obtain the length * from which we can derive the optimal launch parameters. * */ template <typename T> dim3 getOptimalLaunchParameters(const Nd4jLong *hXShapeInfo, hipFuncAttributes attributes, hipDeviceProp_t properties) { auto n = shape::length(hXShapeInfo); dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Params: gridSize: [%i], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.x, launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y)); return launchDims; } nd4j::buffer::Buffer<Nd4jLong> * createScalarBuffer(hipStream_t stream) { Nd4jLong *scalarShapeInfo = shape::createScalarShapeInfo(); nd4j::buffer::Buffer<Nd4jLong> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream); nd4j::buffer::copyDataToGpu(&buff, stream); return buff; } class ScalarShapeInformation { private: nd4j::buffer::Buffer<Nd4jLong> *scalarDimension; nd4j::buffer::Buffer<Nd4jLong> *scalarShapeInfo; // std::thread::id threadId; public: ScalarShapeInformation(hipStream_t stream) { auto scalarDimensionBuff = reinterpret_cast<Nd4jLong *>(malloc(sizeof(Nd4jLong))); CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer"); scalarDimensionBuff[0] = MAX_DIMENSION; scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream); scalarShapeInfo = createScalarBuffer(stream); // threadId = std::this_thread::get_id(); } ~ScalarShapeInformation() { nd4j::buffer::freeBuffer(&scalarShapeInfo); nd4j::buffer::freeBuffer(&scalarDimension); } Nd4jLong *getShapeInfoHostPointer() { return scalarShapeInfo->data; } Nd4jLong * getShapeInfoGpuPointer() { return scalarShapeInfo->gData; } Nd4jLong * getDimensionHostPointer() { return scalarDimension->data; } Nd4jLong * getDimensionGpuPointer() { return scalarDimension->gData; } }; template <typename T> class ScalarInfo { nd4j::buffer::Buffer<T> *scalarData; ScalarShapeInformation *shapeInfo; T finalResult; hipStream_t streamRef; public: ScalarInfo(hipStream_t stream) { T *scalarResult = reinterpret_cast<T*>(malloc(sizeof(T))); CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer"); shapeInfo = new ScalarShapeInformation(stream); scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream); streamRef = stream; nd4j::buffer::copyDataToGpu(&scalarData, stream); } T getFinalResultFromDevice() { nd4j::buffer::copyDataFromGpu(&scalarData, streamRef); return scalarData->data[0]; } /** * Get the device shape information * representing a scalar */ Nd4jLong *getDeviceShapeInfo() { return shapeInfo->getShapeInfoGpuPointer(); } /** * Get the dZ pointers */ T *getDevicePointer() { return scalarData->gData; } /** * Get the infinite dimension device pointer */ Nd4jLong *getDimensionDevicePointer() { return shapeInfo->getDimensionGpuPointer(); } ~ScalarInfo() { nd4j::buffer::freeBuffer(&scalarData); delete shapeInfo; } }; void NativeOps::execPairwiseTransform( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); dim3 launchDims(256, 1024, 8192); if (xType != zType && yType != zType) throw std::runtime_error("NativeOps::execPairwiseTransform requires Z operand to have either X or Y type"); #ifndef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::pairwise_transforms::PairWiseTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dY, dYShapeInfo, hYShapeInfo, dZ, dZShapeInfo, hZShapeInfo, extraParams), LIBND4J_TYPES, LIBND4J_TYPES) #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::pairwise_transforms::PairWiseTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dY, dYShapeInfo, hYShapeInfo, dZ, dZShapeInfo, hZShapeInfo, extraParams), LIBND4J_TYPES) #endif DEBUG_KERNEL(stream, opNum); } void NativeOps::execPairwiseTransformBool( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isB(zType)) throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransformBool wrong Z operand data type", nd4j::DataType::BOOL, zType); if (yType != xType) throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransformBool both operands must have same data type", xType, yType); auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims(256, 1024, 16384); BUILD_DOUBLE_SELECTOR(xType, zType, functions::pairwise_transforms::PairWiseBoolTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams), LIBND4J_TYPES, BOOL_TYPES) } //////////////////////////////////////////////////////////////////////// void NativeOps::execSummaryStatsScalar(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, bool biasCorrected) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); dim3 launchDims = dim3(256, 256, 32768); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, reductionPointer), LIBND4J_TYPES, FLOAT_TYPES); } void NativeOps::execBroadcastBool( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, int *dimension, int dimensionLength) { auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); auto dTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]); auto dTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]); if (!DataTypeUtils::isB(zType)) throw std::runtime_error("NativeOps::execBroadcastBool requires Z operand to have BOOL type"); if (yType != xType) throw std::runtime_error("NativeOps::execBroadcastBool requires both X & Y operands to have same type"); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F3 opNum:[%i]\n", opNum); dim3 launchDims(256, 256, 16384); BUILD_DOUBLE_SELECTOR(xType, zType, functions::broadcast::BroadcastBool, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES, BOOL_TYPES) DEBUG_KERNEL(stream, opNum); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param dY * @param dYShapeInfo * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execBroadcast( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, int *dimension, int dimensionLength) { /* hipEvent_t start; hipEventCreateWithFlags(&start, hipEventDisableTiming); timespec tsX; timespec tsY; clock_gettime(CLOCK_REALTIME, &tsX); */ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); auto dTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]); auto dTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F3 opNum:[%i]\n", opNum); dim3 launchDims(256, 256, 16384); #ifndef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES); #endif DEBUG_KERNEL(stream, opNum); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ void NativeOps::execReduceFloat(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("FF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isR(zType)) throw std::runtime_error("NativeOps::execReduceFloat requires Z operand to have floating point type"); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, FLOAT_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceFloat(...) failed"); } void NativeOps::execReduceSame(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("SF8 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != xType) throw datatype_exception::build("NativeOps::execReduceSame requires both X & Z operands to have same type", xType, zType); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceSame(...) failed"); } void NativeOps::execReduceSame(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, int *dimension, int dimensionLength) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("SF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto xRank = shape::rank(hXShapeInfo); if (zType != xType) throw datatype_exception::build("NativeOps::execReduceSame requires both X & Z operands to have same type", xType, zType); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceSame(...) failed"); } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduceLong(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, int *dimension,int dimensionLength) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("LF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::INT64) throw datatype_exception::build("NativeOps::execReduceLong wrong Z data type", nd4j::DataType::INT64, zType); auto xRank = shape::rank(hXShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, LONG_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed"); } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduceLong(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("LF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::INT64) throw datatype_exception::build("NativeOps::execReduceLong wrong Z data type", nd4j::DataType::INT64, zType); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, LONG_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed"); } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduceBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, int *dimension, int dimensionLength) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("BF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::BOOL) throw std::runtime_error("NativeOps::execReduceBool requires Z operand to have BOOL type"); auto xRank = shape::rank(hXShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, BOOL_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed"); } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduceBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("BF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::BOOL) throw std::runtime_error("NativeOps::execReduceBool requires Z operand to have BOOL type"); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, BOOL_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed"); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execIndexReduce( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, int *dimension,int dimensionLength) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); Nd4jLong *hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); Nd4jLong *dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); Nd4jLong *dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F2 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); if (zType != nd4j::DataType::INT64) throw datatype_exception::build("NativeOps::execIndexReduce requires Z operand to have INT64 type", zType); auto dz = reinterpret_cast<Nd4jLong*>(dZ); BUILD_SINGLE_SELECTOR(xType, functions::indexreduce::IndexReduce, ::executeIndexReduce(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz, dZShapeInfo, shape::rank(hZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ void NativeOps::execReduceFloat( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, int *dimension,int dimensionLength) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F8 opNum:[%i]\n", opNum); void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto xRank = shape::rank(hXShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX,dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, FLOAT_TYPES); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams */ void NativeOps::execIndexReduceScalar( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo){ if (nd4j::Environment::getInstance()->isDebug()) printf("F1 opNum:[%i]\n", opNum); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // void *resultPointer = reinterpret_cast<float *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); if (nd4j::Environment::getInstance()->isDebugAndVerbose() && launchDims.x == 1) printf("AF1 opNum:[%i]\n", opNum); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); // FIXME: we want Z to be one of integer types //if (!DataTypeUtils::isZ(zType)) // throw nd4j::datatype_exception("NativeOps::execIndexReduceScalar requires Z operand to have one of integer types") if (zType != nd4j::DataType::INT64) throw nd4j::datatype_exception::build("NativeOps::execIndexReduceScalar requires Z operand to have INT64 data type", zType); auto dz = reinterpret_cast<Nd4jLong*>(dZ); BUILD_SINGLE_SELECTOR(xType, functions::indexreduce::IndexReduce, ::executeIndexReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz, nullptr, 0, nullptr, 0, 1, allocationPointer, reductionPointer, nullptr, nullptr), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execIndexReduceScalar(...) failed"); } void NativeOps::execTransformSame(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims(512, 512, 16384); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (xType != zType) throw std::runtime_error("NativeOps::execTransformSame requires X & Z to have same type"); //nd4j_printf("Going to execute transformSame; opNum: %i\n", opNum); BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformSame, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execTransformSame(...) failed"); } void NativeOps::execTransformBool(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims(512, 512, 16384); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isB(zType)) throw std::runtime_error("NativeOps::execTransformBool requires Z to have same boolean type"); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformBool, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, BOOL_TYPES); } void NativeOps::execTransformAny(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); switch (opNum) { case transform::IsMax: { bool scalarCheat = false; if (extraParams == nullptr) { scalarCheat = true; } auto special = reinterpret_cast<double *>(extraPointers[17]); if (scalarCheat) { auto scalarShape = ShapeBuilders::createScalarShapeInfo(nd4j::DataType::INT64); /** * In case of vector-input for IsMax, it just turns into IndexReduce call + further filler call */ execIndexReduceScalar(extraPointers, indexreduce::IndexMax, nullptr, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, scalarShape, special, nullptr); Nd4jLong maxIdx = -119; checkCudaErrors(hipStreamSynchronize(*stream)); hipMemcpyAsync(&maxIdx, special, sizeof(Nd4jLong), hipMemcpyDeviceToHost, *stream); checkCudaErrors(hipStreamSynchronize(*stream)); int targetIdx = 0; if (shape::order(hXShapeInfo) == 'c' || shape::order(hXShapeInfo) == 'f' && maxIdx * shape::stride(hXShapeInfo)[shape::rank(hXShapeInfo) - 1] >= shape::length(hXShapeInfo)) targetIdx = maxIdx; else targetIdx = maxIdx * shape::stride(hXShapeInfo)[shape::rank(hXShapeInfo) - 1]; dim3 launchDims(1, 512, 1024); BUILD_SINGLE_SELECTOR(zType, fillIsMaxGeneric, (launchDims, stream, dZ, shape::length(hZShapeInfo), targetIdx), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed"); delete[] scalarShape; } else { auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]); auto hostTShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[19]); auto tadMaxShapeInfo = reinterpret_cast<Nd4jLong *> (extraPointers[10]); auto tadMaxOffsets = reinterpret_cast<Nd4jLong *> (extraPointers[11]); int *dimension = reinterpret_cast<int *> (extraPointers[15]); int dimensionLength = getDeviceId(extraPointers[18]); // we call for IMax on specified dimension execIndexReduce(extraPointers, indexreduce::IndexMax, nullptr, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, hostTShapeInfo, special, hostYShapeInfo, dimension, dimensionLength); DEBUG_KERNEL(stream, opNum); dim3 launchDims(256, 256, 16384); // at this point, all IMax indexes are gathered, and we execute filler BUILD_SINGLE_SELECTOR(zType, fillDimensionalIsMaxGeneric, (launchDims, stream, special, dZ, dZShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed"); } } break; default: { dim3 launchDims(512, 512, 16384); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformAny, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, LIBND4J_TYPES); } } } void NativeOps::execTransformStrict(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims(512, 512, 16384); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (xType != zType || !DataTypeUtils::isR(xType)) throw datatype_exception::build("NativeOps::execTransformStrict requires X & Z to have same floating point type", xType, zType); switch (opNum) { case transform::SoftMax: case transform::SoftMaxDerivative: case transform::LogSoftMax: { if (shape::isVector(hXShapeInfo)) { int length = shape::length(hXShapeInfo); int block = nd4j::math::nd4j_min<int>(length, 256); launchDims.x = 1; launchDims.y = block; launchDims.z += (block * sizeof(double) * 4); BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformStrict, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), FLOAT_TYPES); } else { auto shape = shape::shapeOf(hXShapeInfo); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); // special pointer for special buffer for special ops auto specialPointer = reinterpret_cast<double *>(extraPointers[6]); auto dimension = reinterpret_cast<int *>(specialPointer); auto maxDimension = dimension + 1; auto maxShapeBuffer = reinterpret_cast<Nd4jLong *>(maxDimension + 1); auto special = reinterpret_cast<double *> (maxShapeBuffer + (MAX_RANK * 2 + 4)); Nd4jPointer tempPointers[16]; tempPointers[0] = extraPointers[0]; tempPointers[1] = extraPointers[1]; tempPointers[2] = extraPointers[2]; tempPointers[3] = extraPointers[3]; tempPointers[4] = extraPointers[4]; tempPointers[5] = extraPointers[5]; tempPointers[6] = extraPointers[6]; tempPointers[7] = extraPointers[7]; tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[12]; tempPointers[13] = extraPointers[13]; tempPointers[14] = extraPointers[14]; tempPointers[15] = extraPointers[15]; Nd4jLong maxShape[2] = {shape::shapeOf(hXShapeInfo)[0], 1}; auto hostMaxShapeBuffer = shape::shapeBuffer(2, xType, maxShape); tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer; tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer; hipLaunchKernelGGL(( prepareShapeBuffer), dim3(1), dim3(1), 128, *stream, dimension, maxDimension, maxShapeBuffer, shape[0], xType); DEBUG_KERNEL(stream, opNum); //shape::printShapeInfo(maxShapeBuffer); tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; // max 3 execReduceSame(tempPointers, reduce::Max, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, maxDimension, 1); DEBUG_KERNEL(stream, opNum); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // sub 1 execBroadcast(tempPointers, broadcast::Subtract, hX, hXShapeInfo, dX, dXShapeInfo, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, nullptr, hZShapeInfo, dZ, dZShapeInfo, dimension, 1); DEBUG_KERNEL(stream, opNum); // exp 3 execTransformFloat(extraPointers, transform::Exp, hZ, hZShapeInfo, dZ, dZShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams); DEBUG_KERNEL(stream, opNum); tempPointers[8] = tempPointers[7]; tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; //sum 1 execReduceSame(tempPointers, reduce::Sum, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, maxDimension, 1); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // divide 3 execBroadcast(tempPointers, broadcast::Divide, hZ, hZShapeInfo, dZ, dZShapeInfo, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, nullptr, hZShapeInfo, dZ, dZShapeInfo, dimension, 1); DEBUG_KERNEL(stream, opNum); // log 3 if (opNum == transform::LogSoftMax) execTransformFloat(extraPointers, transform::Log, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, hZShapeInfo, dZ, dZShapeInfo, extraParams); else if (opNum == transform::SoftMaxDerivative) execTransformStrict(extraPointers, transform::SpecialDerivative, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, hZShapeInfo, dZ, dZShapeInfo, extraParams); nd4j::DebugHelper::checkErrorCode(stream, "SoftMax(...) failed"); delete hostMaxShapeBuffer; } } break; default: { BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformStrict, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), FLOAT_TYPES); } } } void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isR(zType)) throw datatype_exception::build("NativeOps::execTransformFloat requires Z to have floating point type", zType); if (opNum == transform::Histogram) { dim3 launchDims(256, 256, 32768); Nd4jPointer maskedAllocPointer; auto length = shape::length(hZShapeInfo); hipMalloc(reinterpret_cast<void **>(&maskedAllocPointer), length * launchDims.x * DataTypeUtils::sizeOf(nd4j::DataType::INT64)); auto imaskedAllocPointer = reinterpret_cast<int *>(maskedAllocPointer); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformFloat, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, imaskedAllocPointer, reductionPointer, nullptr, nullptr), LIBND4J_TYPES, FLOAT_TYPES); checkCudaErrors(hipStreamSynchronize(*stream)); hipFree(maskedAllocPointer); } else { dim3 launchDims(512, 512, 16384); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformFloat, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, FLOAT_TYPES); } } /** * Append an input array * to the end of a flat array * in a particular order * @param offset the offset of the array to start at * @param order the order * @param dZ the dZ array * @param dZShapeInfo the shape info for te array * @param input the input for the array * @param inputShapeInfo the shape information for that array */ void NativeOps::flatten(Nd4jPointer *extraPointers, int offset, char order, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hInput, Nd4jLong *hInputShapeInfo, void *dInput, Nd4jLong *dInputShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F22 opNum:[7]\n"); // int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hYShapeInfo), 2, funcAttributes[30]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF222 opNum:[7]\n"); auto type = nd4j::ArrayOptions::dataType(hInputShapeInfo); BUILD_SINGLE_SELECTOR(type, flattenKernelGeneric, (launchDims, stream, extraPointers, offset, order, dZ, dZShapeInfo, dInput, dInputShapeInfo), LIBND4J_TYPES); DEBUG_KERNEL(stream, -1); } void NativeOps::checkP2P() { int curDevice = 0; hipGetDevice(&curDevice); int devCnt = 0; hipGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; bool tempSupport = true; if (devCnt > 1) { for (int dX = 0; dX < devCnt; dX++) { for (int dY = 0; dY < devCnt; dY++) { if (dX == dY) continue; int canAccess = 0; hipSetDevice(dX); hipDeviceCanAccessPeer(&canAccess, dX , dY); if (!canAccess) { tempSupport = false; break; } } } supportedP2P = tempSupport; hipSetDevice(curDevice); } else { // if we have only 1 device - we say that we support P2P, since all data will be on 1 device supportedP2P = true; } } void NativeOps::enableP2P(bool enable) { if (enable == allowedP2P) return; int curDevice = 0; hipGetDevice(&curDevice); int devCnt = 0; hipGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; if (devCnt > 1) { for (int dX = 0; dX < devCnt; dX++) { for (int dY = 0; dY < devCnt; dY++) { if (dX == dY) continue; int canAccess = 0; hipSetDevice(dX); hipDeviceCanAccessPeer(&canAccess, dX , dY); if (canAccess) { if (enable) { hipDeviceEnablePeerAccess(dY, 0); } else { hipDeviceDisablePeerAccess(dY); } } else { if (nd4j::Environment::getInstance()->isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY); } } } hipSetDevice(curDevice); } allowedP2P = enable; hipSetDevice(curDevice); } bool NativeOps::isP2PAvailable() { return supportedP2P; } void NativeOps::initializeDevicesAndFunctions() { int devCnt = 0; hipGetDeviceCount(&devCnt); deviceProperties = new hipDeviceProp_t[devCnt]; for (int i = 0; i < devCnt; i++) { hipSetDevice(i); hipGetDeviceProperties(&deviceProperties[i], i); hipDeviceSetLimit(hipLimitStackSize, 4096); } hipSetDevice(0); checkP2P(); // enabling p2p gpu access if it's supported if (supportedP2P && devCnt > 1) enableP2P(allowedP2P); } void NativeOps::initializeFunctions(Nd4jPointer *functions) { nd4j::BlasHelper::getInstance()->initializeDeviceFunctions(functions); /* this->hipblasSgemv = (CublasSgemv)functions[0]; this->hipblasDgemv = (CublasDgemv)functions[1]; this->hipblasHgemm = (CublasHgemm)functions[2]; this->hipblasSgemm = (CublasSgemm)functions[3]; this->hipblasDgemm = (CublasDgemm)functions[4]; this->cublasSgemmEx = (CublasSgemmEx)functions[5]; this->hipblasHgemmBatched = (CublasHgemmBatched)functions[6]; this->hipblasSgemmBatched = (CublasSgemmBatched)functions[7]; this->hipblasDgemmBatched = (CublasDgemmBatched)functions[8]; */ } /** * This method acquires memory chunk of requested size on host side * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param flags optional parameter */ Nd4jPointer NativeOps::mallocHost(Nd4jLong memorySize, int flags) { Nd4jPointer pointer; // hipHostMallocMapped |hipHostMallocPortable hipError_t res = hipHostMalloc(reinterpret_cast<void **>(&pointer), memorySize, hipHostMallocDefault); if (res != 0) pointer = 0L; return pointer; } /** * This method acquires memory chunk of requested size on specified device * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc * @param flags optional parameter */ Nd4jPointer NativeOps::mallocDevice(Nd4jLong memorySize, Nd4jPointer ptrToDeviceId, int flags) { Nd4jPointer pointer; hipError_t res = hipMalloc(reinterpret_cast<void **>(&pointer), memorySize); if (res != 0) pointer = 0L; return pointer; } /** * This method releases previously allocated host memory space * * @param pointer pointer that'll be freed */ int NativeOps::freeHost(Nd4jPointer pointer) { hipError_t res = hipHostFree(reinterpret_cast<void *>(pointer)); if (res != 0) pointer = 0L; return 1L; } /** * This method releases previously allocated memory space on device * * @param pointer pointer that'll be freed * @param ptrToDeviceId pointer to deviceId. */ int NativeOps::freeDevice(Nd4jPointer pointer, Nd4jPointer ptrToDeviceId) { hipError_t res = hipFree(reinterpret_cast<void *>(pointer)); if (res != 0) pointer = 0L; return 1L; } Nd4jPointer NativeOps::createContext() { return 0L; } Nd4jPointer NativeOps::createStream() { Nd4jPointer nativeStream = (Nd4jPointer) malloc(sizeof(hipStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream"); hipError_t dZ = hipStreamCreate(reinterpret_cast<hipStream_t *>(&nativeStream)); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("hipStreamCreate(...) failed"); return nativeStream; } Nd4jPointer NativeOps::createEvent() { Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(hipEvent_t)); CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer"); hipError_t dZ = hipEventCreateWithFlags(reinterpret_cast<hipEvent_t *>(&nativeEvent), hipEventDisableTiming); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("hipEventCreateWithFlags(...) failed"); return nativeEvent; } int NativeOps::registerEvent(Nd4jPointer event, Nd4jPointer stream) { hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event); hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&stream); hipError_t dZ = hipEventRecord(*pEvent, *pStream); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("hipEventRecord(...) failed"); return 1; } int NativeOps::setDevice(Nd4jPointer ptrToDeviceId) { int deviceId = getDeviceId(ptrToDeviceId); hipError_t dZ = hipSetDevice(deviceId); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("hipSetDevice(...) failed"); return 1; } Nd4jLong NativeOps::getDeviceFreeMemory(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); int orig = -1; hipGetDevice(&orig); if (device >= 0 && device != orig) { hipSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; hipMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { hipSetDevice(orig); } return (Nd4jLong) memFree; } Nd4jLong NativeOps::getDeviceTotalMemory(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); int orig = -1; hipGetDevice(&orig); if (device >= 0 && device != orig) { hipSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; hipMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { hipSetDevice(orig); } return (Nd4jLong) memTotal; } int NativeOps::memcpy(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { return memcpyAsync(dst, src, size, flags, reserved); } int NativeOps::memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved); hipMemcpyKind kind; DEBUG_KERNEL(pStream, 0); switch (flags) { case 0: { kind = hipMemcpyHostToHost; } break; case 1: { kind = hipMemcpyHostToDevice; } break; case 2: { kind = hipMemcpyDeviceToHost; } case 3: { kind = hipMemcpyDeviceToDevice; } break; default: { printf("UNDEFINED MEMCPY!\n"); break; } } hipError_t dZ = hipMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind, *pStream); if (dZ != 0) { checkCudaErrors(dZ); printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ)); fflush(stdout); fflush(stderr); throw std::runtime_error("hipMemcpyAsync(...) failed"); //return 0L; } return 1; } int NativeOps::memset(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) { hipError_t dZ = hipMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size)); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("hipMemset(...) failed"); return 1; } int NativeOps::memsetAsync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) { hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved); hipError_t dZ = hipMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("hipMemsetAsync(...) failed"); return 1; } int NativeOps::destroyEvent(Nd4jPointer event) { hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event); hipError_t dZ = hipEventDestroy(*pEvent); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaEvenDestroy(...) failed"); return 1; } int NativeOps::streamSynchronize(Nd4jPointer stream) { hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&stream); hipError_t dZ = hipStreamSynchronize(*pStream); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("hipStreamSynchronize(...) failed"); return 1L; } int NativeOps::eventSynchronize(Nd4jPointer event) { hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event); hipError_t dZ = hipEventSynchronize(*pEvent); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("hipEventSynchronize(...) failed"); return 1L; } int NativeOps::getAvailableDevices() { int devCnt = 0; hipGetDeviceCount(&devCnt); return devCnt; } void NativeOps::enableDebugMode(bool reallyEnable) { nd4j::Environment::getInstance()->setDebug(reallyEnable); } void NativeOps::setGridLimit(int gridSize) { if (gridSize > 8192) gridSize = 8192; if (gridSize < 1) gridSize = 1; blockLimit = gridSize; } int NativeOps::ompGetMaxThreads() { return maxThreads; } int NativeOps::ompGetNumThreads() { return maxThreads; } void NativeOps::setOmpNumThreads(int threads) { if (threads > 1024) threads = 1024; if (threads < 32) threads = 32; maxThreads = threads; } void NativeOps::enableVerboseMode(bool reallyEnable) { nd4j::Environment::getInstance()->setVerbose(reallyEnable); } int NativeOps::getDeviceMajor(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].major; } int NativeOps::getDeviceMinor(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].minor; } const char * NativeOps::getDeviceName(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].name; } /** * Concatneate multi array of the same shape together * along a particular dimension */ void NativeOps::concat( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, Nd4jPointer *ddata, Nd4jPointer *dinputShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hXShapeInfo = hZShapeInfo; auto hShapePointers = reinterpret_cast<Nd4jLong **>(inputShapeInfo); // numArrays will be used as number of TADs, so each block process 1 input int smem = 8192; bool isVstack = false; bool isScalar = true; bool isHstack = false; for (int i = 0; i < numArrays; i++) { if (!shape::isScalar(hShapePointers[i])) { isScalar = false; break; } } if (!isScalar && dimension == 0 && shape::rank(hZShapeInfo) == 2 && shape::order(hZShapeInfo) == 'c' ) { isVstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hShapePointers[i]) || shape::elementWiseStride(hShapePointers[i]) <= 0 || shape::order(hShapePointers[i]) != 'c') { isVstack = false; break; } } } // let's try to fit N-dimensional vstack if (!isVstack && !isScalar && dimension == 0 && shape::order(hXShapeInfo) == 'c') { auto length0 = shape::length(hShapePointers[0]); isVstack = true; for (int i = 0; i < numArrays; i++) { if (shape::elementWiseStride(hShapePointers[i]) <= 0 || shape::order(hShapePointers[i]) != 'c' || length0 != shape::length(hShapePointers[i])) { isVstack = false; break; } } } if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hZShapeInfo)) { isHstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hShapePointers[i]) || shape::elementWiseStride(hShapePointers[i]) <= 0) { isHstack = false; break; } } } if (isScalar) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going scalar concat\n"); dim3 launchDims(128, 128, 16384); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); BUILD_SINGLE_SELECTOR(zType, concatKernelScalarGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), dZ), LIBND4J_TYPES); } else if (isVstack) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going VStack concat\n"); dim3 launchDims(128, 512, 16384); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); BUILD_SINGLE_SELECTOR(zType, concatKernelVStackGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), reinterpret_cast<Nd4jPointer *>(dinputShapeInfo[0]), dZ, dZShapeInfo), LIBND4J_TYPES); } else if (isHstack) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going HStack concat\n"); dim3 launchDims(128, 128, 16384); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); BUILD_SINGLE_SELECTOR(zType, concatKernelHStackGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), reinterpret_cast<Nd4jPointer *>(dinputShapeInfo[0]), dZ, dZShapeInfo), LIBND4J_TYPES); } else { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going generic concat\n"); auto devZTadShape = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto devZOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); dim3 launchDims(128, 128, 8192); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); BUILD_SINGLE_SELECTOR(zType, concatKernelGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), reinterpret_cast<Nd4jPointer *>(dinputShapeInfo[0]), dZ, dZShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]), devZTadShape, devZOffsets), LIBND4J_TYPES); } if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("sharedMemory requested for concatFloat: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs); nd4j::DebugHelper::checkErrorCode(stream, "Legacy ConcatFloat(...) failed"); } void NativeOps::specialConcat( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { nd4j::SpecialMethods<float>::concatCpuGeneric( dimension, numArrays, data, inputShapeInfo, dZ, dZShapeInfo); } /** * This method saves */ void NativeOps::tadOnlyShapeInfo(Nd4jLong *dXShapeInfo, int *dimension, int dimensionLength, Nd4jLong *target, Nd4jLong *offsets) { shape::TAD tad; tad.init(dXShapeInfo, dimension, dimensionLength); //tad->setOutputBuffer(target); tad.createTadOnlyShapeInfo(); tad.createOffsets(); std::memcpy(reinterpret_cast<void *>(target), tad.tadOnlyShapeInfo, shape::shapeInfoByteLength(tad.tadOnlyShapeInfo)); std::memcpy(reinterpret_cast<void *>(offsets), tad.tadOffsets, tad.numTads * sizeof(Nd4jLong)); } int NativeOps::memcpyConstantAsync(Nd4jLong dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved); hipMemcpyKind kind; DEBUG_KERNEL(pStream, -1); switch (flags) { case 0: { kind = hipMemcpyHostToHost; } break; case 1: { kind = hipMemcpyHostToDevice; } break; case 2: { kind = hipMemcpyDeviceToHost; } case 3: { kind = hipMemcpyDeviceToDevice; } break; } //hipError_t dZ = hipMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream); hipError_t dZ = hipMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("hipMemcpyToSymbolAsync(...) failed"); return 1; } Nd4jPointer NativeOps::getConstantSpace() { Nd4jPointer dConstAddr; hipError_t dZ = hipGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory); if (dZ != 0) throw std::runtime_error("hipGetSymbolAddress(...) failed"); return dConstAddr; } void NativeOps::pullRows(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jLong n, Nd4jLong *indexes, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims(64, 256, 1024); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric, (launchDims, stream, dX, dZ, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets), LIBND4J_TYPES); DEBUG_KERNEL(stream, -1); } void NativeOps::average(Nd4jPointer *extras, Nd4jPointer *x, Nd4jLong *xShapeInfo, Nd4jPointer *dx, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dz, Nd4jLong *dzShapeInfo, int n, Nd4jLong length, bool propagate) { hipStream_t * stream = reinterpret_cast<hipStream_t *>(&extras[1]); int mode = getDeviceId(extras[3]); auto dX = reinterpret_cast<void **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("averageFloat called\n"); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // launching on gpu if (mode == 0) { dim3 launchDims(256, 256, 4096); // averagingKernelFloat<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(dX, dz, n, length, propagate); BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed"); } else { // launching on host memory BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate), LIBND4J_TYPES); } } void NativeOps::accumulate(Nd4jPointer *extras, Nd4jPointer *x, Nd4jLong *xShapeInfo, Nd4jPointer *dx, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dz, Nd4jLong *dzShapeInfo, int n, Nd4jLong length) { auto stream = reinterpret_cast<hipStream_t *>(&extras[1]); int mode = getDeviceId(extras[3]); auto dX = reinterpret_cast<void **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("accumulateFloat called\n"); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // launching on gpu if (mode == 0) { dim3 launchDims(n, 256, 16384); BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n,length), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed"); } else { // launching on host memory BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length), LIBND4J_TYPES); } } void NativeOps::shuffle(Nd4jPointer *extras, Nd4jPointer *x, Nd4jPointer *xShapeInfo, Nd4jPointer *dx, Nd4jPointer *dXShapeInfo, Nd4jPointer *z, Nd4jPointer *zShapeInfo, Nd4jPointer *dz, Nd4jPointer *dZShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); auto dX = reinterpret_cast<void **>(dx); auto dZ = reinterpret_cast<void **>(dz); auto xShape = reinterpret_cast<Nd4jLong **>(xShapeInfo); auto dxShape = reinterpret_cast<Nd4jLong **>(dXShapeInfo); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo); auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets); auto xType = nd4j::ArrayOptions::dataType(xShape[0]); dim3 launchDims(N, 256, 8192); BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric, (launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset), LIBND4J_TYPES); DEBUG_KERNEL(stream, 0); } /* void NativeOps::execMetaPredicateShape(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraA, void *extraB, double scalarA, double scalarB) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, functions::grid::GRIDShaped, ::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraA, extraB, scalarA, scalarB), LIBND4J_TYPES); // functions::grid::GRIDShaped<float>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dX, dXShapeInfo, dy, dYShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB); DEBUG_KERNEL(stream, opNumA); } */ bool NativeOps::isExperimentalEnabled() { return nd4j::Environment::getInstance()->isExperimentalBuild(); } void NativeOps::setOmpMinThreads(int threads) { minThreads = nd4j::math::nd4j_max<int>(32, threads); minThreads = nd4j::math::nd4j_min<int>(maxThreads, minThreads); } int NativeOps::getDevice() { int curDevice = -1; hipGetDevice(&curDevice); return curDevice; } void NativeOps::setElementThreshold(int num) { // this is no-op for CUDA } void NativeOps::setTADThreshold(int num) { // this is no-op for CUDA } void NativeOps::execSummaryStats(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, bool biasCorrected) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(256, 256, 32768); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isR(zType)) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, nullptr), LIBND4J_TYPES, FLOAT_TYPES); } void NativeOps::execSummaryStats(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, int *dimension, int dimensionLength, bool biasCorrected, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); dim3 launchDims = dim3(256, 256, 32768); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isR(zType)) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, biasCorrected, reductionPointer), LIBND4J_TYPES, FLOAT_TYPES); } void NativeOps::execReduce3(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); dim3 launchDims(256, 256, 32768); if (xType != yType) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Y operand to have X type", xType, yType); if (!DataTypeUtils::isR(zType)) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, 1, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), LIBND4J_TYPES, FLOAT_TYPES) DEBUG_KERNEL(stream, opNum); } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduce3(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); if (xType != yType) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Y operand to have X type", xType, yType); if (!DataTypeUtils::isR(zType)) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), LIBND4J_TYPES, FLOAT_TYPES) } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduce3Scalar(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto allocationPointer = reinterpret_cast<int *>(extraPointers[3]); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); if (xType != yType) throw nd4j::datatype_exception::build("NativeOps::execReduce3Scalar requires Y operand to have X type", xType, yType); if (!DataTypeUtils::isR(zType)) throw nd4j::datatype_exception::build("NativeOps::execReduce3Scalar requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::execScalar(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, allocationPointer, reductionPointer, nullptr), LIBND4J_TYPES, FLOAT_TYPES); } void NativeOps::execScalarBool( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalar, Nd4jLong *hScalarShapeInfo, void *dScalar, Nd4jLong *dScalarShapeInfo, void *extraParams) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(256, 512, 8192); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (xType != yType ) throw std::runtime_error("NativeOps::execScalarBool requires X & Y to have same type"); if (!DataTypeUtils::isB(zType) ) throw std::runtime_error("NativeOps::execScalarBool requires Z operand to have BOOL type"); BUILD_DOUBLE_SELECTOR(xType, zType, functions::scalar::ScalarBoolTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalar, extraParams), LIBND4J_TYPES, BOOL_TYPES); DEBUG_KERNEL(stream, opNum); } void NativeOps::execScalarBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalars, Nd4jLong *hScalarShapeInfo, void *dScalars, Nd4jLong *dScalarShapeInfo, void *extraParams, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims(256, 512, 8192); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (xType != yType ) throw std::runtime_error("NativeOps::execScalarBool requires X & Y to have same type"); if (!DataTypeUtils::isB(zType) ) throw std::runtime_error("NativeOps::execScalarBool requires Z operand to have BOOL type"); BUILD_DOUBLE_SELECTOR(xType, yType, functions::scalar::ScalarBoolTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, BOOL_TYPES); DEBUG_KERNEL(stream, opNum); } void NativeOps::execScalar( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalar, Nd4jLong *hScalarShapeInfo, void *dScalar, Nd4jLong *dScalarShapeInfo, void *extraParams) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims(256, 512, 8192); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); #ifndef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo, hZShapeInfo, dScalar, extraParams), LIBND4J_TYPES, LIBND4J_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo, hZShapeInfo, dScalar, extraParams), LIBND4J_TYPES); #endif DEBUG_KERNEL(stream, opNum); } void NativeOps::execScalar(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalars, Nd4jLong *hScalarShapeInfo, void *dScalars, Nd4jLong *dScalarShapeInfo, void *extraParams, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); dim3 launchDims(256, 256, 16384); #ifndef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES); #endif DEBUG_KERNEL(stream, opNum); } void NativeOps::execAggregate(Nd4jPointer *extraPointers, int opNum, void **arguments, int numArguments, Nd4jLong **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, void *realArguments, int numRealArguments, nd4j::DataType dtype) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numBlocks, numThreads, shmem); BUILD_SINGLE_SELECTOR(dtype, functions::aggregate::AggregatedFunction, ::aggregateKernelGeneric(launchDims, stream, opNum, arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), FLOAT_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execAggregateFloat(...) failed"); } void NativeOps::execAggregateBatch(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments, nd4j::DataType dtype) { // not implemented yet hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numAggregates, numThreads, shmem); BUILD_SINGLE_SELECTOR(dtype, functions::aggregate::AggregatedFunction, ::aggregateBatchKernelGeneric(launchDims, stream, opNum, numAggregates, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), FLOAT_TYPES); DEBUG_KERNEL(stream, opNum); } void NativeOps::execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto sizeOf = sizeof(nd4j::graph::RandomGenerator); Nd4jPointer stateDevice; hipError_t res = hipMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf); checkCudaErrors(hipStreamSynchronize(*stream)); checkCudaErrors(hipMemcpyAsync(stateDevice, stateHost, sizeOf, hipMemcpyHostToDevice, *stream)); dim3 launchDims = dim3(512, 512, 32768); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); // functions::random::RandomFunction<float>::executeCudaSingle(launchDims, extraPointers, opNum, stateHost, dZ, dZShapeInfo, extraArguments), BUILD_SINGLE_SELECTOR(zType, functions::random::RandomFunction, ::executeCudaSingle(launchDims, extraPointers, opNum, stateDevice, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES); checkCudaErrors(hipMemcpyAsync(stateHost, stateDevice, sizeOf, hipMemcpyDeviceToHost, *stream)); checkCudaErrors(hipStreamSynchronize(*stream)); hipFree(stateDevice); } void NativeOps::execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto sizeOf = sizeof(nd4j::graph::RandomGenerator); Nd4jPointer stateDevice; hipError_t res = hipMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf); checkCudaErrors(hipStreamSynchronize(*stream)); checkCudaErrors(hipMemcpyAsync(stateDevice, stateHost, sizeOf, hipMemcpyHostToDevice, *stream)); dim3 launchDims = dim3(512, 512, 32768); auto xType = nd4j::ArrayOptions::dataType(hZShapeInfo); // functions::random::RandomFunction<float>::executeCudaDouble(launchDims, extraPointers, opNum, stateHost, dX, dXShapeInfo, dZ, dZShapeInfo, extraArguments); BUILD_SINGLE_SELECTOR(xType, functions::random::RandomFunction, ::executeCudaDouble(launchDims, extraPointers, opNum, stateDevice, dX, dXShapeInfo, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES); checkCudaErrors(hipMemcpyAsync(stateHost, stateDevice, sizeOf, hipMemcpyDeviceToHost, *stream)); checkCudaErrors(hipStreamSynchronize(*stream)); hipFree(stateDevice); } void NativeOps::execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto sizeOf = sizeof(nd4j::graph::RandomGenerator); Nd4jPointer stateDevice; hipError_t res = hipMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf); checkCudaErrors(hipStreamSynchronize(*stream)); checkCudaErrors(hipMemcpyAsync(stateDevice, stateHost, sizeOf, hipMemcpyHostToDevice, *stream)); dim3 launchDims = dim3(512, 512, 32768); auto xType = nd4j::ArrayOptions::dataType(hZShapeInfo); // functions::random::RandomFunction<float>::executeCudaTriple(launchDims, extraPointers, opNum, stateHost, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraArguments); BUILD_SINGLE_SELECTOR(xType, functions::random::RandomFunction, ::executeCudaTriple(launchDims, extraPointers, opNum, stateDevice, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES); checkCudaErrors(hipMemcpyAsync(stateHost, stateDevice, sizeOf, hipMemcpyDeviceToHost, *stream)); checkCudaErrors(hipStreamSynchronize(*stream)); hipFree(stateDevice); } Nd4jPointer NativeOps::initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) { unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // we don't synchronize at random initialization, it's safe to go unsync here // hipStreamSynchronize(*stream); auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer); auto buffer = new nd4j::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost), reinterpret_cast<uint64_t *>(ptrDev)); buffer->propagateToDevice(buffer, *stream); nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A"); // we generate sequence in the host memory nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // and copy it to gpu hipMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, hipMemcpyHostToDevice, *stream); nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B"); return buffer; } void NativeOps::destroyRandom(Nd4jPointer ptrBuffer) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrBuffer); // FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice hipDeviceSynchronize(); delete buffer; } void NativeOps::refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); hipStreamSynchronize(*stream); uint64_t *ptrDev = buffer->getDeviceBuffer(); // update rng state buffer->setSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); // refresh buffer on host size nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // copy back to gpu hipMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, hipMemcpyHostToDevice, *stream); } void NativeOps::reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); hipStreamSynchronize(*stream); // update rng state buffer->reSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); } /** * Return the length of a shape buffer * based on the pointer * @param buffer the buffer pointer to check * @return */ int NativeOps::lengthForShapeBufferPointer(Nd4jPointer buffer) { auto shapeBuffer = reinterpret_cast<Nd4jLong *>(buffer); return shape::shapeInfoLength(shape::rank(shapeBuffer)); } /** * The pointer to get the address for * * @param address the address to get the pointer * @return the pointer for the given address */ Nd4jPointer NativeOps::pointerForAddress(Nd4jLong address) { return reinterpret_cast<Nd4jPointer >(address); } void NativeOps::tear(Nd4jPointer *extras, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, Nd4jPointer *targets, Nd4jLong *zShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); dim3 launchDims(512, 512, 512); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, tearKernelGeneric, (launchDims, stream, dX, dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed"); } void prescanArrayRecursive(Nd4jPointer *extras, int *dZ, int *dX, int numElements, int level) { auto stream = reinterpret_cast<hipStream_t *>(&extras[1]); auto g_scanBlockSums = reinterpret_cast<int **>(&extras[2]); int blockSize = 512; // max size of the thread blocks int numBlocks = nd4j::math::nd4j_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize)))); int numThreads; if (numBlocks > 1) numThreads = blockSize; else if (nd4j::isPowerOfTwo(numElements)) numThreads = numElements / 2; else numThreads = nd4j::floorPow2(numElements); int numEltsPerBlock = numThreads * 2; // if this is a non-power-of-2 array, the last block will be non-full // compute the smallest power of 2 able to compute its scan. int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock; int numThreadsLastBlock = nd4j::math::nd4j_max<int>(1, numEltsLastBlock / 2); int np2LastBlock = 0; int sharedMemLastBlock = 0; if (numEltsLastBlock != numEltsPerBlock) { np2LastBlock = 1; if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock); unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS; sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace); } // padding space is used to avoid shared memory bank conflicts int extraSpace = numEltsPerBlock / NUM_BANKS; int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace); // setup execution parameters // if NP2, we process the last block separately dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1); dim3 threads(numThreads, 1, 1); dim3 gridOnes(1, 1, 1); dim3 threadsOnes(numThreadsLastBlock, 1, 1); if (sharedMemSize < 2048) sharedMemSize = 2048; if (sharedMemLastBlock < 2048) sharedMemLastBlock = 2048; // execute the scan if (numBlocks > 1) { nd4j::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level], numThreads * 2, 0, 0); if (np2LastBlock) { nd4j::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } // After scanning all the sub-blocks, we are mostly done. But now we // need to take all of the last values of the sub-blocks and scan those. // This will give us a new value that must be sdded to each block to // get the final results. // recursive (CPU) call prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1); hipLaunchKernelGGL(( nd4j::uniformAdd), dim3(grid), dim3(threads), 1024, *stream, dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0); if (np2LastBlock) { hipLaunchKernelGGL(( nd4j::uniformAdd), dim3(1), dim3(numThreadsLastBlock), 1024, *stream, dZ, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } } else if (isPowerOfTwo(numElements)) { nd4j::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0); } else { nd4j::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0); } } void NativeOps::encodeThresholdP1(Nd4jPointer *extras, void *dx, Nd4jLong *hXShapeInfo, Nd4jLong N, int *dz, float threshold) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 1024); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, encoderKernelP1Generic, (launchDims, stream, dx, N, dz, threshold), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP1Float(...) failed"); } void NativeOps::encodeThresholdP2Int(Nd4jPointer *extraPointers, int *dx, Nd4jLong N, int *dz) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); //encoderKernelP2Float<<<numBlocks, blockSize , 1024 * sizeof(float), *stream>>>(dx, N, dz); prescanArrayRecursive(extraPointers, dz, dx + 1, (int) N, 0); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP2Int(...) failed"); } void NativeOps::encodeThresholdP3(Nd4jPointer *extraPointers, void *dx, Nd4jLong *hXShapeInfo, int *offsets, Nd4jLong N, int *dz){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 4096); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, encoderKernelP3Generic, (launchDims, stream, dx, offsets, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP3Float(...) failed"); } void NativeOps::decodeThreshold(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, void *dz, Nd4jLong *zShapeInfo){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // we probably want to have smaller blocks here, memory writes are misaligned anyway int blockSize = 128; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 1024); auto zType = nd4j::ArrayOptions::dataType(zShapeInfo); BUILD_SINGLE_SELECTOR(zType, decoderKernelGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "decodeThresholdFloat(...) failed"); } void NativeOps::execReduce3All(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParamsVals, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, int *dimension, int dimensionLength, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, Nd4jLong *yTadShapeInfo, Nd4jLong *yOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D119 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims(shape::length(hZShapeInfo), 256, 32768); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AD119 opNum:[%i]\n", opNum); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (yType != xType) throw nd4j::datatype_exception::build("NativeOps::execReduce3All both operands must have same data type", xType, yType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::execAll(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParamsVals, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets), LIBND4J_TYPES, FLOAT_TYPES); DEBUG_KERNEL(stream, opNum); } void NativeOps::sort(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, bool descending) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[ 1]); auto xLength = shape::length(xShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2*k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric, (launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending), LIBND4J_TYPES); } } } else { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window<<=1) { int n = window; int rev = 0; do{ int half = n >> 1; BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric, (launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES); n>>=1; rev = 1; } while(n > 1); } } nd4j::DebugHelper::checkErrorCode(stream, "sort(...) failed"); } void NativeOps::sortTad(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) { // to be implemented hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims(512, 512, 32768); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, oesTadGeneric, (launchDims, stream, dX, dXShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "sortTadFloat(...) failed"); } void NativeOps::sortCooIndices(Nd4jPointer *extraPointers, Nd4jLong *indices, void *values, Nd4jLong length, int rank) { throw std::runtime_error("sortCooIndices:: Not implemented yet"); } Nd4jLong NativeOps::encodeBitmap(Nd4jPointer *extraPointers, void *dx, Nd4jLong *hXShapeInfo, Nd4jLong N, int *dz, float threshold) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *resultPointer = reinterpret_cast<int *>(extraPointers[2]); int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims(512, 512, 32768); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, cudaEncodeBitmapGeneric, (launchDims, stream, dx, N, dz, resultPointer, reductionPointer, threshold), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeBitmapFloat(...) failed"); Nd4jLong dZ = (Nd4jLong) resultPointer[0]; resultPointer[0] = 0; return dZ; } void NativeOps::decodeBitmap(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, void *dz, Nd4jLong *zShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims(512, 512, 16384); auto xType = nd4j::ArrayOptions::dataType(zShapeInfo); BUILD_SINGLE_SELECTOR(xType, cudaDecodeBitmapGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "decodeBitmapFloat(...) failed"); } Nd4jLong* NativeOps::mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jLong length) { return nullptr; } void NativeOps::munmapFile(Nd4jPointer *extraPointers, Nd4jLong* ptrMap, Nd4jLong length) { } nd4j::graph::ResultWrapper* NativeOps::executeFlatGraph(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) { return nullptr; } const char* NativeOps::getAllCustomOps() { return nd4j::ops::OpRegistrator::getInstance()->getAllCustomOperations(); } nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { nd4j::graph::VariableSpace varSpace; Context block(2, &varSpace); nd4j::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numInputShapes; e++) { auto shape_ = reinterpret_cast<Nd4jLong *>(inputShapes[e]); // we shouldn't copy buffer if that's empty array void *buffer_ = nd4j::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e]; auto array = new nd4j::NDArray(buffer_, shape_); array->triggerAllocationFlag(false, false); // block should contain references to proper variable varSpace.putVariable(1, e, array); block.pickInput(1, e); inShapes.push_back(shape_); } auto shapeList = op->calculateOutputShape(&inShapes, block); if (varSpace.workspace() != nullptr) shapeList->detach(); return shapeList; } nd4j::ShapeList* NativeOps::calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { Context block(1); nd4j::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numInputShapes; e++) inShapes.push_back(reinterpret_cast<Nd4jLong *>(inputShapes[e])); auto shapeList = op->calculateOutputShape(&inShapes, block); return shapeList; } nd4j::ShapeList* NativeOps::calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } static FORCEINLINE Nd4jStatus realExec(nd4j::ops::DeclarableOp* op, Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) { if (op == nullptr) nd4j_printf("Can't find requested operation: [%lld]\n", hash); // we're using the same fake nodeId everywhere here std::vector<nd4j::NDArray*> inputs(numInputs); std::vector<nd4j::NDArray*> outputs(numOutputs); std::vector<double> ttArgs(numTArgs); std::vector<bool> bbArgs(0); std::vector<Nd4jLong> iiArgs(numIArgs); // filling block now with inputs for (int e = 0; e < numInputs; e++) { auto shape = reinterpret_cast<Nd4jLong *>(inputShapes[e]); void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e]; inputs[e] = new nd4j::NDArray(buffer, shape); } // if not inplace - transferring output arrays if (!isInplace) for (int e = 0; e < numOutputs; e++) { // we want to keep original output shape intact auto shape = shape::copyShape(reinterpret_cast<Nd4jLong *>(outputShapes[e])); void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e]; auto array = new nd4j::NDArray(buffer, shape); outputs[e] = array; // and we want to release shape copy once we're done array->triggerAllocationFlag(false, true); } for (int e = 0; e < numIArgs; e++) iiArgs[e] = iArgs[e]; for (int e = 0; e < numTArgs; e++) ttArgs[e] = tArgs[e]; // hypothetically at this point we have everything filled auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, isInplace); //auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace); if (!isInplace) for (int e = 0; e < numOutputs; e++) { //shape::printShapeInfoLinear("JVM output shape", (int *) outputShapes[e]); //shape::printShapeInfoLinear("C++ output shape", (int *) outputs[e]->shapeInfo()); //outputs[e]->printIndexedBuffer("C++ raw output"); //outputs[e]->printBuffer("C++ indexed output"); if (outputs[e]->ordering() != shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e]))) outputs[e]->streamline(shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e]))); } /* if (!isInplace) { if (dZ->size() != numOutputs) { return ND4J_STATUS_BAD_OUTPUT; } for (int e = 0; e < numOutputs; e++) { auto buffer = (T *) outputBuffers[e]; auto shape = (int *) outputShapes[e]; nd4j::NDArray<T> tmp(buffer, shape); if (tmp.lengthOf() != dZ->at(e)->lengthOf()) { nd4j_printf("Provided output array for [%s] has length of %i, but actual dZ has length of %i\n", op->getOpName()->c_str(), tmp.lengthOf(), dZ->at(e)->lengthOf()); return ND4J_STATUS_BAD_OUTPUT; } tmp.assign(dZ->at(e)); } } else { // if op is inplace, our ResultSet holds pointers dZ->purge(); } delete dZ; */ for (auto v: inputs) delete v; for (auto v: outputs) delete v; return Status::OK(); } int NativeOps::execCustomOp(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace); } int NativeOps::registerGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) { auto graph = nd4j::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer); nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph); return ND4J_STATUS_OK; } static VariablesSet* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { auto graph = nd4j::graph::GraphHolder::getInstance()->pullGraph(graphId); auto varSpace = graph->getVariableSpace()->clone(); std::vector<nd4j::NDArray*> handles; for (int e = 0; e < numInputs; e++) { auto idx = inputIndices[e]; // we'll delete this array later, together with cloned VariableSpace auto array = new nd4j::NDArray(inputBuffers[e], reinterpret_cast<Nd4jLong *>(inputShapes[e])); handles.emplace_back(array); if (varSpace->hasVariable(idx)) { auto var = varSpace->getVariable(idx); if (var->hasNDArray()) delete var->getNDArray(); var->setNDArray(array); } else varSpace->putVariable(idx, array); } auto dZ = nd4j::graph::GraphExecutioner::execute(graph, varSpace); auto varSet = new nd4j::graph::VariablesSet(dZ); if (dZ == ND4J_STATUS_OK) { // pull back results, and provide them auto outputs = graph->fetchOutputs(); for (int e = 0; e < outputs->size(); e++) { // we're only getting variable ID/Index from original grap. values will be taken from cloned workspace std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index()); auto var = varSpace->getVariable(varId); varSet->push_back(var->clone()); } delete outputs; } delete varSpace; return varSet; } VariablesSet* NativeOps::executeStoredGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs); } int NativeOps::unregisterGraph(Nd4jPointer *extraPointers, Nd4jLong graphId) { nd4j::graph::GraphHolder::getInstance()->dropGraphAny(graphId); return ND4J_STATUS_OK; } void NativeOps::deletePointerArray(Nd4jPointer pointer) { Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer); delete[] ptr; } void NativeOps::deleteIntArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<int *>(pointer); delete[] ptr; } void NativeOps::deleteLongArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<Nd4jLong *>(pointer); delete[] ptr; } template <typename T> static void deleteVariablesSetT(Nd4jPointer pointer) { nd4j::graph::VariablesSet* ptr = reinterpret_cast<nd4j::graph::VariablesSet*>(pointer); delete ptr; } void NativeOps::deleteVariablesSet(Nd4jPointer pointer) { deleteVariablesSetT<double>(pointer); } void NativeOps::deleteShapeList(Nd4jPointer shapeList) { nd4j::ShapeList* list = reinterpret_cast<nd4j::ShapeList*>(shapeList); list->destroy(); delete list; } const char* NativeOps::getAllOperations() { return nd4j::OpTracker::getInstance()->exportOperations(); } Nd4jPointer NativeOps::getGraphState(Nd4jLong id) { return (Nd4jPointer) new nd4j::graph::GraphState(id); } void NativeOps::deleteGraphState(Nd4jPointer state) { auto stateP = reinterpret_cast<nd4j::graph::GraphState*>(state); delete stateP; } Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, nd4j::graph::GraphState *state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { /** * That's basically exec, with VariableSpace provided in GraphState: * depending on operation (i.e. while of if), different logic executors could be used */ auto graph = state->graph(); auto varSpace = state->variableSpace(); // Node is dynamically created, and has nothing beyond it: only inputs and outputs // this node has id of 0, and inputs are Node node(OpType_LOGIC, opHash, 0); // mapping inputs for (int e = 0; e < numInputs; e++) { auto buffer = inputBuffers[e]; auto shapeInfo = reinterpret_cast<Nd4jLong *>(inputShapes[e]); auto array = new nd4j::NDArray(buffer, shapeInfo, varSpace->workspace()); // now we just put array to VarSpace varSpace->putVariable(0, e, array); node.pickInput(0, e); } // mapping scopes for (int e = 0; e < numScopes; e++) { // we should check scope existence in GraphState/Graph int scopeId = (int) scopes[e]; if (!state->hasScope(scopeId)) { // nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId); return Status::THROW(); } node.pickInput(scopeId, 0); } auto dZ = LogicExecutor::processNode(graph, &node); if (dZ != Status::OK()) return dZ; // mapping outputs for (int e = 0; e < numOutputs; e++) { auto buffer = outputBuffers[e]; auto shapeInfo = reinterpret_cast<Nd4jLong *>(outputShapes[e]); NDArray array(buffer, shapeInfo, varSpace->workspace()); // now we just put array to VarSpace to the same ID //varSpace->putVariable(0, e, array); auto t = varSpace->getVariable(0, e)->getNDArray(); array.assign(t); } // removing input variables for (int e = 0; e < numInputs; e++) { varSpace->dropVariable(0, e); } // after some bla-bla-bla we should have Graph and Node for current op return Status::OK(); } Nd4jStatus NativeOps::execCustomOpWithScope(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { return execCustomOpWithScope(extraPointers, reinterpret_cast<nd4j::graph::GraphState*>(state), opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs); } void NativeOps::deleteResultWrapper(Nd4jPointer ptr) { // just 0 room for compiler s@!t auto p = reinterpret_cast<nd4j::graph::ResultWrapper *>(ptr); delete p; } int NativeOps::estimateThreshold(Nd4jPointer *extraPointers, Nd4jPointer dX, Nd4jLong *dXShapeInfo, int N, float threshold) { throw std::runtime_error("estimateThreshold: Not implemented yet"); } /* * TypeDef: * void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, long N, int dstType, Nd4jPointer dZ); */ void NativeOps::convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, Nd4jLong N, int dstType, Nd4jPointer dZ) { auto dx = reinterpret_cast<void *>(dX); auto dz = reinterpret_cast<void *>(dZ); if (srcType == ND4J_FLOAT8) { if (dstType == ND4J_FLOAT8) { // convertKernel<double, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int8>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint8>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int16>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint16>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_FLOAT32) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_INT8) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::int8, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { //convertKernel<nd4j::int8, nd4j::int8>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: eventually we might want to add it } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_UINT8) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<uint8_t, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: still might want to add } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_FLOAT16) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<float16, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: .... ^^^ } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz); } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<float16>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_INT16) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<int16_t, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO... } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz); } else { printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_FLOAT24) { } else if (srcType == ND4J_FLOAT32) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<float, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz); } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<float>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_DOUBLE) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<double, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { // } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<double>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_THRESHOLD) { if (dstType == ND4J_FLOAT16) { //nd4j::convertFromThreshold<float16>(nullptr, dx, N, dz); } else if (dstType == ND4J_FLOAT32) { //nd4j::convertFromThreshold<float>(nullptr, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { //nd4j::convertFromThreshold<double>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } Nd4jPointer NativeOps::createUtf8String(Nd4jPointer *extraPointers, const char *string, int length) { auto u = new nd4j::utf8string(string, length); return reinterpret_cast<Nd4jPointer>(u); } void NativeOps::deleteUtf8String(Nd4jPointer *extraPointers, Nd4jPointer ptr) { delete(reinterpret_cast<nd4j::utf8string*>(ptr)); }
ef6f9cbef82006e8eca7beaf43e9967843c58a6f.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #include "../NativeOps.h" #include <cuda.h> #include <cuda_launch_config.h> #include <buffer.h> #include <helpers/shape.h> #include "../Environment.h" #include <helpers/TAD.h> #include <ops/specials.h> #include <loops/reduce3.h> #include <loops/indexreduce.h> #include <loops/summarystatsreduce.h> #include <loops/random.h> #include <loops/broadcasting.h> #include <loops/broadcasting_bool.h> #include <loops/scalar.h> #include <loops/scalar_bool.h> #include <loops/pairwise_transform.h> #include <loops/pairwise_bool.h> #include <loops/transform_same.h> #include <loops/transform_float.h> #include <loops/transform_strict.h> #include <loops/transform_bool.h> #include <loops/transform_any.h> #include <loops/reduce_float.h> #include <loops/reduce_same.h> #include <loops/reduce_bool.h> #include <loops/reduce_long.h> //#include <thread> #include <map> #include <cuda.h> #include <cuda_runtime_api.h> #include <cuda_runtime.h> #include <cuda_device_runtime_api.h> #include <pointercast.h> #include <stdio.h> #include <stdlib.h> #include <loops/type_conversions.h> #include <op_boilerplate.h> #include <loops/aggregates.h> #include <helpers/threshold.h> #include <ShapeList.h> #include <Context.h> #include <ops/specials_cuda.h> #include <graph/exceptions/datatype_exception.h> #include <helpers/CudaLaunchHelper.h> // FIXME: we need cuda-specific implementations #include <helpers/logger.h> #include <NDArray.h> #include <GraphExecutioner.h> #include <graph/GraphHolder.h> #include <graph/VariablesSet.h> #include <ops/declarable/OpRegistrator.h> #include <ops/declarable/CustomOperations.h> //#include <sys/time.h> // b40c only available for gcc :( #ifdef __clang__ // do nothing #elif __GNUC__ #include <b40c/util/error_utils.cuh> #include <b40c/util/multiple_buffering.cuh> #include <b40c/radix_sort/enactor.cuh> #endif #include <curand.h> #include <Status.h> #include <helpers/DebugHelper.h> using namespace nd4j; #include <loops/special_kernels.h> cudaDeviceProp *deviceProperties; cudaFuncAttributes *funcAttributes = new cudaFuncAttributes[64]; int blockLimit = 128; int maxThreads = 512; bool allowedP2P = false; bool supportedP2P = false; #ifdef __EXPERIMENTAL__ bool experimentalSupport = true; #else bool experimentalSupport = false; #endif int minThreads = 32; __constant__ char deviceConstantMemory[49152]; typedef struct { long streamId; long callId; } __syncInfo; typedef __syncInfo SyncInfo; /** * This is utility kernel, that updates given special buffer with proper values in device memory */ extern "C" __global__ void prepareShapeBuffer(int *dimension, int *maxDimension, Nd4jLong *specialPointer, int rows, nd4j::DataType dataType) { Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid > 0) return; dimension[0] = 0; maxDimension[0] = 1; specialPointer[0] = 2; specialPointer[1] = rows; specialPointer[2] = 1; specialPointer[3] = 1; specialPointer[4] = 1; specialPointer[5] = 0; specialPointer[6] = 1; specialPointer[7] = 99; ArrayOptions::setDataType(specialPointer, dataType); //printf("special[0]: [%lld]\n", (long long) specialPointer[0]); //shape::printShapeInfoLinear("prepareShapeBuffer", specialPointer); } // this method isn't used, left here for legacy and caution purposes // TLDR: don't use this way, it sucks void CUDART_CB syncCallback(cudaStream_t stream, cudaError_t status, void *data){ SyncInfo *sync = reinterpret_cast<SyncInfo *>(data); //printf("Finished stream: [%i], kernel call: [%i]\n", sync->streamId, sync->callId); } // this method just does type conversion in fancy way int getDeviceId(Nd4jPointer ptrToDeviceId) { return (int)(Nd4jLong)ptrToDeviceId; } template <typename T> dim3 getOptimalDimensions(Nd4jLong n,cudaFuncAttributes attributes, cudaDeviceProp properties) { // we can combine the two to compute a block size int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties); // no real sense launching more threads, then number of elements we have if (num_threads > n) num_threads = n; if (maxThreads > 0 && num_threads > maxThreads) num_threads = maxThreads; // compute the number of blocks of size num_threads to launch int num_blocks = n / num_threads; // check for partial block at the end if (num_blocks > blockLimit) num_blocks = blockLimit; if (num_blocks < 4 && n > 128) { num_blocks = 4; num_threads = n / num_blocks; } if (num_threads >= 768) { num_blocks = num_blocks * 2; num_threads = num_threads / 2; } if(n % num_threads && num_blocks < blockLimit) ++num_blocks; //(num_threads * sizeof(T)) + attributes.sharedSizeBytes); return dim3(num_blocks,num_threads, 3000); } int getBaseMemorySize(int xRank, cudaFuncAttributes funcAttr) { int memory_limit = 256; //funcAttr.sharedSizeBytes; // TODO: remove this later memory_limit += sizeof(UnifiedSharedMemory) + 32; // sizeof(shape::TAD) + (xRank * 4 * 4) /* if (xRank == 0) xRank = 2; memory_limit += (xRank * 2 + 4) * 3 * 4; // we reserve memory for xShape + T1/T2 shapes memory_limit += yRank == 0 ? 0 : (yRank * 2 + 4) * 4; memory_limit += zRank == 0 ? 0 : (zRank * 2 + 4) * 4; memory_limit += (xRank * 4) * 6; memory_limit += MAX_RANK * 4; // special case, needed roughtly in one pase */ return memory_limit; } /* * Basic CUDA constants here: number of blocks per MP */ int getDeviceBlockThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; int blockThreshold = 8; if (ccMajor >= 5) blockThreshold = 32; else if (ccMajor == 3) blockThreshold = 16; else if (ccMajor < 3) blockThreshold = 8; return blockThreshold; } dim3 getBasicLaunchParams(int deviceId, long problemLength, int sharedMemoryPerThread, cudaFuncAttributes funcAttr) { int countMP = deviceProperties[deviceId].multiProcessorCount; int blockThreshold = getDeviceBlockThreshold(deviceId); int num_threads = problemLength / (countMP * blockThreshold); num_threads = nd4j::math::nd4j_min<int>(num_threads, maxThreads); num_threads = nd4j::math::nd4j_max<int>(num_threads, 64); num_threads = nd4j::math::nd4j_max<int>(num_threads, minThreads); int num_blocks = nd4j::math::nd4j_max<int>(problemLength / num_threads, 1); num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); int memory_limit = (sharedMemoryPerThread * num_threads) + getBaseMemorySize(1, funcAttr); dim3 launchDims = dim3(num_blocks, num_threads, memory_limit); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Preliminary basic launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i]\n", num_blocks, num_threads, memory_limit); return launchDims; } /* * This message returns shared memory threshold value. default overflow ratio is 0.3 */ int getDeviceSharedThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; // please note threshold isn't multiple of 32, and that's NOT a mistake int shmemThreshold; if (ccMajor == 6 && ccMinor == 0) shmemThreshold = 65536; else if (ccMajor == 6 && ccMinor == 1) shmemThreshold = 49152; else if (ccMajor == 5 && ccMinor == 2) shmemThreshold = 98304; else if (ccMajor == 5) shmemThreshold = 65536; else if (ccMajor == 3 && ccMinor == 7) shmemThreshold = 114688; else shmemThreshold = 49152; return shmemThreshold / 0.3; } dim3 getBetterDimensions(int deviceId, int numTads, int tadLength, int xRank, cudaFuncAttributes funcAttr, int dimensionLength, int elementSize, int reduction) { int num_threads = nd4j::math::nd4j_min<int>(tadLength, maxThreads); int countMP = deviceProperties[deviceId].multiProcessorCount; int regPerBlock = deviceProperties[deviceId].regsPerBlock; int warpSize = deviceProperties[deviceId].warpSize; int blockThreshold = getDeviceBlockThreshold(deviceId); int shmemThreshold = getDeviceSharedThreshold(deviceId); // round num_threads to nearest warpSize num_threads -= num_threads % warpSize; num_threads = nd4j::math::nd4j_max<int>(1, num_threads); if (num_threads < warpSize && tadLength < warpSize) num_threads = tadLength; // since we use shared memory as fast memory for some cases - we need to count that in int memory_limit = getBaseMemorySize(xRank, funcAttr); int memory_floor = memory_limit; int effective_block_limit = countMP * blockThreshold; int num_blocks = numTads; //nd4j::math::nd4j_min<int>(numTads, effective_block_limit); int desiredShared = shmemThreshold / nd4j::math::nd4j_max<int>((num_blocks / countMP), 1); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Launch context: numBlocks: [%i], numThreads: [%i], countMap: [%i], shmemThreshold: [%i], desiredShared: [%i], elementSize: [%i]\n", num_blocks, num_threads, countMP, shmemThreshold, desiredShared, elementSize); // at this moment we've stored all required information for things. time to count in reduction multipliers int reduction_per_block = 0; bool found = false; if (reduction > 0) while (!found) { reduction_per_block = (num_threads * elementSize * reduction); if (memory_limit + reduction_per_block < desiredShared) { memory_limit += reduction_per_block; found = true; } else { if (num_threads > minThreads) { num_threads -= 32; } else { memory_limit += reduction_per_block; found = true; } } } // at this moment we know total memory used per block, and we also know per-mp limit. int max_active_blocks = shmemThreshold / nd4j::math::nd4j_max<int>(memory_limit, 1); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("MAB: [%i], memory_floor: [%i], memory_limit: [%i], reductionPerBlock: [%i]\n", max_active_blocks, memory_floor, memory_limit, reduction_per_block); // we don't want to spawn more blocks, that gpu can actually handle without queue //num_blocks = nd4j::math::nd4j_min<int>(num_blocks, max_active_blocks); num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); // if (num_blocks > countMP) // num_blocks = num_blocks - (num_blocks % countMP); num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1); int targetBlocksPerMP = num_blocks / countMP; // now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM if (targetBlocksPerMP * num_threads > 2048) { while (targetBlocksPerMP * num_threads > 2048) { if (num_threads <= minThreads) break; num_threads -= 32; } reduction_per_block = (num_threads * elementSize * reduction); memory_limit = memory_floor + reduction_per_block; } if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Preliminary reduce launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], reduction_per_block: [%i], blocksPerMP: [%i]\n", num_blocks, num_threads, memory_limit, reduction_per_block, targetBlocksPerMP); return dim3(num_blocks,num_threads, memory_limit); } /* * This method returns kernel launch param for linear memory access */ dim3 getFlatLaunchParams(int deviceId, Nd4jLong *dXShapeInfo, Nd4jLong *dYShapeInfo, cudaFuncAttributes funcAttr) { auto xRank = shape::rank(dXShapeInfo); auto yRank = dYShapeInfo == nullptr ? 0 : shape::rank(dYShapeInfo); auto zRank = 0; int memory_limit = getBaseMemorySize(xRank, funcAttr); int countMP = deviceProperties[deviceId].multiProcessorCount; int regPerBlock = deviceProperties[deviceId].regsPerBlock; int blockThreshold = getDeviceBlockThreshold(deviceId); int shmemThreshold = getDeviceSharedThreshold(deviceId); auto xLength = shape::length(dXShapeInfo); int effective_block_limit = countMP * blockThreshold; // for flat calls we just want as much concurrent blocks, as possible, and we're not tied to TAD here int num_threads = xLength / effective_block_limit; if (num_threads < minThreads) num_threads = minThreads; num_threads = num_threads - (num_threads % 32); int memory_floor = memory_limit; int num_blocks = xLength / num_threads; num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); // num_blocks = nd4j::math::nd4j_min<int>(num_blocks, effective_block_limit); num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1); int targetBlocksPerMP = num_blocks / countMP; // now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM if (targetBlocksPerMP * num_threads > 2048 && num_threads >= 128) { while (targetBlocksPerMP * num_threads > 2048) { if (num_threads <= minThreads) break; num_threads -= 32; } } if (xLength / num_threads > blockLimit) num_blocks *= 2; dim3 launchDims = dim3(num_blocks, num_threads, memory_limit); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Preliminary scalar launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], blocksPerMP: [%i], problemLength: [%i], effectiveBlockLimit: [%i]\n", num_blocks, num_threads, memory_limit, targetBlocksPerMP, xLength, effective_block_limit); return launchDims; } /** * This method returns kernel launch params with TAD-based memory access * * @param deviceId * @param dXShapeInfo * @param tadShapeInfo * @param funcAttr * @param dimensionLength * @param elementSize * @param reductionSize * @return */ dim3 getReduceLaunchParams(int deviceId, Nd4jLong *dXShapeInfo, Nd4jLong *tadShapeInfo, cudaFuncAttributes funcAttr, int dimensionLength, int elementSize, int reductionSize) { Nd4jLong tadLength = 0; Nd4jLong numTads = 0; if (tadShapeInfo != nullptr) { tadLength = shape::length(tadShapeInfo); numTads = shape::length(dXShapeInfo) / tadLength; if (tadLength == 1) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("A xLength: [%i], zLength: [%i]\n", shape::length(dXShapeInfo), shape::length(tadShapeInfo)); } } else{ // we have special case - reduction along all dimensions tadLength = nd4j::math::nd4j_min<int>(shape::length(dXShapeInfo), 768); numTads = shape::length(dXShapeInfo) / tadLength; } auto xRank = shape::rank(dXShapeInfo); int zRank = tadShapeInfo == nullptr ? 0 : shape::rank(tadShapeInfo); dim3 launchDims = getBetterDimensions(deviceId, numTads, tadLength, xRank, funcAttr, dimensionLength, elementSize, reductionSize); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) { //|| launchDims.dX == 1 printf("Reduce LaunchParams: xLength: [%i], numTads: [%i], tadLength: [%i], launchDims.dX: [%i], launchDims.dY: [%i], launchDims.dZ: [%i]\n", shape::length(dXShapeInfo), numTads, tadLength, launchDims.x, launchDims.y, launchDims.z); } return launchDims; } /** * Returns optimal launch parameters * given the extra pointers passed in. * The extra pointer should be * the host pointer for the shape information * associated with the data. * From there it is used to obtain the length * from which we can derive the optimal launch parameters. * */ template <typename T> dim3 getOptimalLaunchParameters(const Nd4jLong *hXShapeInfo, cudaFuncAttributes attributes, cudaDeviceProp properties) { auto n = shape::length(hXShapeInfo); dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Params: gridSize: [%i], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.x, launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y)); return launchDims; } nd4j::buffer::Buffer<Nd4jLong> * createScalarBuffer(cudaStream_t stream) { Nd4jLong *scalarShapeInfo = shape::createScalarShapeInfo(); nd4j::buffer::Buffer<Nd4jLong> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream); nd4j::buffer::copyDataToGpu(&buff, stream); return buff; } class ScalarShapeInformation { private: nd4j::buffer::Buffer<Nd4jLong> *scalarDimension; nd4j::buffer::Buffer<Nd4jLong> *scalarShapeInfo; // std::thread::id threadId; public: ScalarShapeInformation(cudaStream_t stream) { auto scalarDimensionBuff = reinterpret_cast<Nd4jLong *>(malloc(sizeof(Nd4jLong))); CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer"); scalarDimensionBuff[0] = MAX_DIMENSION; scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream); scalarShapeInfo = createScalarBuffer(stream); // threadId = std::this_thread::get_id(); } ~ScalarShapeInformation() { nd4j::buffer::freeBuffer(&scalarShapeInfo); nd4j::buffer::freeBuffer(&scalarDimension); } Nd4jLong *getShapeInfoHostPointer() { return scalarShapeInfo->data; } Nd4jLong * getShapeInfoGpuPointer() { return scalarShapeInfo->gData; } Nd4jLong * getDimensionHostPointer() { return scalarDimension->data; } Nd4jLong * getDimensionGpuPointer() { return scalarDimension->gData; } }; template <typename T> class ScalarInfo { nd4j::buffer::Buffer<T> *scalarData; ScalarShapeInformation *shapeInfo; T finalResult; cudaStream_t streamRef; public: ScalarInfo(cudaStream_t stream) { T *scalarResult = reinterpret_cast<T*>(malloc(sizeof(T))); CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer"); shapeInfo = new ScalarShapeInformation(stream); scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream); streamRef = stream; nd4j::buffer::copyDataToGpu(&scalarData, stream); } T getFinalResultFromDevice() { nd4j::buffer::copyDataFromGpu(&scalarData, streamRef); return scalarData->data[0]; } /** * Get the device shape information * representing a scalar */ Nd4jLong *getDeviceShapeInfo() { return shapeInfo->getShapeInfoGpuPointer(); } /** * Get the dZ pointers */ T *getDevicePointer() { return scalarData->gData; } /** * Get the infinite dimension device pointer */ Nd4jLong *getDimensionDevicePointer() { return shapeInfo->getDimensionGpuPointer(); } ~ScalarInfo() { nd4j::buffer::freeBuffer(&scalarData); delete shapeInfo; } }; void NativeOps::execPairwiseTransform( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); dim3 launchDims(256, 1024, 8192); if (xType != zType && yType != zType) throw std::runtime_error("NativeOps::execPairwiseTransform requires Z operand to have either X or Y type"); #ifndef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::pairwise_transforms::PairWiseTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dY, dYShapeInfo, hYShapeInfo, dZ, dZShapeInfo, hZShapeInfo, extraParams), LIBND4J_TYPES, LIBND4J_TYPES) #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::pairwise_transforms::PairWiseTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dY, dYShapeInfo, hYShapeInfo, dZ, dZShapeInfo, hZShapeInfo, extraParams), LIBND4J_TYPES) #endif DEBUG_KERNEL(stream, opNum); } void NativeOps::execPairwiseTransformBool( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isB(zType)) throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransformBool wrong Z operand data type", nd4j::DataType::BOOL, zType); if (yType != xType) throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransformBool both operands must have same data type", xType, yType); auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims(256, 1024, 16384); BUILD_DOUBLE_SELECTOR(xType, zType, functions::pairwise_transforms::PairWiseBoolTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams), LIBND4J_TYPES, BOOL_TYPES) } //////////////////////////////////////////////////////////////////////// void NativeOps::execSummaryStatsScalar(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, bool biasCorrected) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); dim3 launchDims = dim3(256, 256, 32768); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, reductionPointer), LIBND4J_TYPES, FLOAT_TYPES); } void NativeOps::execBroadcastBool( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, int *dimension, int dimensionLength) { auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); auto dTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]); auto dTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]); if (!DataTypeUtils::isB(zType)) throw std::runtime_error("NativeOps::execBroadcastBool requires Z operand to have BOOL type"); if (yType != xType) throw std::runtime_error("NativeOps::execBroadcastBool requires both X & Y operands to have same type"); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F3 opNum:[%i]\n", opNum); dim3 launchDims(256, 256, 16384); BUILD_DOUBLE_SELECTOR(xType, zType, functions::broadcast::BroadcastBool, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES, BOOL_TYPES) DEBUG_KERNEL(stream, opNum); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param dY * @param dYShapeInfo * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execBroadcast( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, int *dimension, int dimensionLength) { /* cudaEvent_t start; cudaEventCreateWithFlags(&start, cudaEventDisableTiming); timespec tsX; timespec tsY; clock_gettime(CLOCK_REALTIME, &tsX); */ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); auto dTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]); auto dTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F3 opNum:[%i]\n", opNum); dim3 launchDims(256, 256, 16384); #ifndef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES); #endif DEBUG_KERNEL(stream, opNum); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ void NativeOps::execReduceFloat(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("FF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isR(zType)) throw std::runtime_error("NativeOps::execReduceFloat requires Z operand to have floating point type"); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, FLOAT_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceFloat(...) failed"); } void NativeOps::execReduceSame(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("SF8 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != xType) throw datatype_exception::build("NativeOps::execReduceSame requires both X & Z operands to have same type", xType, zType); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceSame(...) failed"); } void NativeOps::execReduceSame(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, int *dimension, int dimensionLength) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("SF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto xRank = shape::rank(hXShapeInfo); if (zType != xType) throw datatype_exception::build("NativeOps::execReduceSame requires both X & Z operands to have same type", xType, zType); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceSame(...) failed"); } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduceLong(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, int *dimension,int dimensionLength) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("LF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::INT64) throw datatype_exception::build("NativeOps::execReduceLong wrong Z data type", nd4j::DataType::INT64, zType); auto xRank = shape::rank(hXShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, LONG_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed"); } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduceLong(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("LF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::INT64) throw datatype_exception::build("NativeOps::execReduceLong wrong Z data type", nd4j::DataType::INT64, zType); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, LONG_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed"); } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduceBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, int *dimension, int dimensionLength) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("BF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::BOOL) throw std::runtime_error("NativeOps::execReduceBool requires Z operand to have BOOL type"); auto xRank = shape::rank(hXShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, BOOL_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed"); } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduceBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("BF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::BOOL) throw std::runtime_error("NativeOps::execReduceBool requires Z operand to have BOOL type"); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, BOOL_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed"); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execIndexReduce( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, int *dimension,int dimensionLength) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); Nd4jLong *hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); Nd4jLong *dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); Nd4jLong *dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F2 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); if (zType != nd4j::DataType::INT64) throw datatype_exception::build("NativeOps::execIndexReduce requires Z operand to have INT64 type", zType); auto dz = reinterpret_cast<Nd4jLong*>(dZ); BUILD_SINGLE_SELECTOR(xType, functions::indexreduce::IndexReduce, ::executeIndexReduce(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz, dZShapeInfo, shape::rank(hZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ void NativeOps::execReduceFloat( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, int *dimension,int dimensionLength) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F8 opNum:[%i]\n", opNum); void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto xRank = shape::rank(hXShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX,dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, FLOAT_TYPES); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams */ void NativeOps::execIndexReduceScalar( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo){ if (nd4j::Environment::getInstance()->isDebug()) printf("F1 opNum:[%i]\n", opNum); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // void *resultPointer = reinterpret_cast<float *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); if (nd4j::Environment::getInstance()->isDebugAndVerbose() && launchDims.x == 1) printf("AF1 opNum:[%i]\n", opNum); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); // FIXME: we want Z to be one of integer types //if (!DataTypeUtils::isZ(zType)) // throw nd4j::datatype_exception("NativeOps::execIndexReduceScalar requires Z operand to have one of integer types") if (zType != nd4j::DataType::INT64) throw nd4j::datatype_exception::build("NativeOps::execIndexReduceScalar requires Z operand to have INT64 data type", zType); auto dz = reinterpret_cast<Nd4jLong*>(dZ); BUILD_SINGLE_SELECTOR(xType, functions::indexreduce::IndexReduce, ::executeIndexReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz, nullptr, 0, nullptr, 0, 1, allocationPointer, reductionPointer, nullptr, nullptr), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execIndexReduceScalar(...) failed"); } void NativeOps::execTransformSame(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims(512, 512, 16384); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (xType != zType) throw std::runtime_error("NativeOps::execTransformSame requires X & Z to have same type"); //nd4j_printf("Going to execute transformSame; opNum: %i\n", opNum); BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformSame, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execTransformSame(...) failed"); } void NativeOps::execTransformBool(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims(512, 512, 16384); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isB(zType)) throw std::runtime_error("NativeOps::execTransformBool requires Z to have same boolean type"); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformBool, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, BOOL_TYPES); } void NativeOps::execTransformAny(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); switch (opNum) { case transform::IsMax: { bool scalarCheat = false; if (extraParams == nullptr) { scalarCheat = true; } auto special = reinterpret_cast<double *>(extraPointers[17]); if (scalarCheat) { auto scalarShape = ShapeBuilders::createScalarShapeInfo(nd4j::DataType::INT64); /** * In case of vector-input for IsMax, it just turns into IndexReduce call + further filler call */ execIndexReduceScalar(extraPointers, indexreduce::IndexMax, nullptr, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, scalarShape, special, nullptr); Nd4jLong maxIdx = -119; checkCudaErrors(cudaStreamSynchronize(*stream)); cudaMemcpyAsync(&maxIdx, special, sizeof(Nd4jLong), cudaMemcpyDeviceToHost, *stream); checkCudaErrors(cudaStreamSynchronize(*stream)); int targetIdx = 0; if (shape::order(hXShapeInfo) == 'c' || shape::order(hXShapeInfo) == 'f' && maxIdx * shape::stride(hXShapeInfo)[shape::rank(hXShapeInfo) - 1] >= shape::length(hXShapeInfo)) targetIdx = maxIdx; else targetIdx = maxIdx * shape::stride(hXShapeInfo)[shape::rank(hXShapeInfo) - 1]; dim3 launchDims(1, 512, 1024); BUILD_SINGLE_SELECTOR(zType, fillIsMaxGeneric, (launchDims, stream, dZ, shape::length(hZShapeInfo), targetIdx), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed"); delete[] scalarShape; } else { auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]); auto hostTShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[19]); auto tadMaxShapeInfo = reinterpret_cast<Nd4jLong *> (extraPointers[10]); auto tadMaxOffsets = reinterpret_cast<Nd4jLong *> (extraPointers[11]); int *dimension = reinterpret_cast<int *> (extraPointers[15]); int dimensionLength = getDeviceId(extraPointers[18]); // we call for IMax on specified dimension execIndexReduce(extraPointers, indexreduce::IndexMax, nullptr, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, hostTShapeInfo, special, hostYShapeInfo, dimension, dimensionLength); DEBUG_KERNEL(stream, opNum); dim3 launchDims(256, 256, 16384); // at this point, all IMax indexes are gathered, and we execute filler BUILD_SINGLE_SELECTOR(zType, fillDimensionalIsMaxGeneric, (launchDims, stream, special, dZ, dZShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed"); } } break; default: { dim3 launchDims(512, 512, 16384); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformAny, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, LIBND4J_TYPES); } } } void NativeOps::execTransformStrict(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims(512, 512, 16384); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (xType != zType || !DataTypeUtils::isR(xType)) throw datatype_exception::build("NativeOps::execTransformStrict requires X & Z to have same floating point type", xType, zType); switch (opNum) { case transform::SoftMax: case transform::SoftMaxDerivative: case transform::LogSoftMax: { if (shape::isVector(hXShapeInfo)) { int length = shape::length(hXShapeInfo); int block = nd4j::math::nd4j_min<int>(length, 256); launchDims.x = 1; launchDims.y = block; launchDims.z += (block * sizeof(double) * 4); BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformStrict, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), FLOAT_TYPES); } else { auto shape = shape::shapeOf(hXShapeInfo); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); // special pointer for special buffer for special ops auto specialPointer = reinterpret_cast<double *>(extraPointers[6]); auto dimension = reinterpret_cast<int *>(specialPointer); auto maxDimension = dimension + 1; auto maxShapeBuffer = reinterpret_cast<Nd4jLong *>(maxDimension + 1); auto special = reinterpret_cast<double *> (maxShapeBuffer + (MAX_RANK * 2 + 4)); Nd4jPointer tempPointers[16]; tempPointers[0] = extraPointers[0]; tempPointers[1] = extraPointers[1]; tempPointers[2] = extraPointers[2]; tempPointers[3] = extraPointers[3]; tempPointers[4] = extraPointers[4]; tempPointers[5] = extraPointers[5]; tempPointers[6] = extraPointers[6]; tempPointers[7] = extraPointers[7]; tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[12]; tempPointers[13] = extraPointers[13]; tempPointers[14] = extraPointers[14]; tempPointers[15] = extraPointers[15]; Nd4jLong maxShape[2] = {shape::shapeOf(hXShapeInfo)[0], 1}; auto hostMaxShapeBuffer = shape::shapeBuffer(2, xType, maxShape); tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer; tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer; prepareShapeBuffer<<<1, 1, 128, *stream>>>(dimension, maxDimension, maxShapeBuffer, shape[0], xType); DEBUG_KERNEL(stream, opNum); //shape::printShapeInfo(maxShapeBuffer); tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; // max 3 execReduceSame(tempPointers, reduce::Max, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, maxDimension, 1); DEBUG_KERNEL(stream, opNum); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // sub 1 execBroadcast(tempPointers, broadcast::Subtract, hX, hXShapeInfo, dX, dXShapeInfo, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, nullptr, hZShapeInfo, dZ, dZShapeInfo, dimension, 1); DEBUG_KERNEL(stream, opNum); // exp 3 execTransformFloat(extraPointers, transform::Exp, hZ, hZShapeInfo, dZ, dZShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams); DEBUG_KERNEL(stream, opNum); tempPointers[8] = tempPointers[7]; tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; //sum 1 execReduceSame(tempPointers, reduce::Sum, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, maxDimension, 1); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // divide 3 execBroadcast(tempPointers, broadcast::Divide, hZ, hZShapeInfo, dZ, dZShapeInfo, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, nullptr, hZShapeInfo, dZ, dZShapeInfo, dimension, 1); DEBUG_KERNEL(stream, opNum); // log 3 if (opNum == transform::LogSoftMax) execTransformFloat(extraPointers, transform::Log, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, hZShapeInfo, dZ, dZShapeInfo, extraParams); else if (opNum == transform::SoftMaxDerivative) execTransformStrict(extraPointers, transform::SpecialDerivative, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, hZShapeInfo, dZ, dZShapeInfo, extraParams); nd4j::DebugHelper::checkErrorCode(stream, "SoftMax(...) failed"); delete hostMaxShapeBuffer; } } break; default: { BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformStrict, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), FLOAT_TYPES); } } } void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isR(zType)) throw datatype_exception::build("NativeOps::execTransformFloat requires Z to have floating point type", zType); if (opNum == transform::Histogram) { dim3 launchDims(256, 256, 32768); Nd4jPointer maskedAllocPointer; auto length = shape::length(hZShapeInfo); cudaMalloc(reinterpret_cast<void **>(&maskedAllocPointer), length * launchDims.x * DataTypeUtils::sizeOf(nd4j::DataType::INT64)); auto imaskedAllocPointer = reinterpret_cast<int *>(maskedAllocPointer); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformFloat, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, imaskedAllocPointer, reductionPointer, nullptr, nullptr), LIBND4J_TYPES, FLOAT_TYPES); checkCudaErrors(cudaStreamSynchronize(*stream)); cudaFree(maskedAllocPointer); } else { dim3 launchDims(512, 512, 16384); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformFloat, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, FLOAT_TYPES); } } /** * Append an input array * to the end of a flat array * in a particular order * @param offset the offset of the array to start at * @param order the order * @param dZ the dZ array * @param dZShapeInfo the shape info for te array * @param input the input for the array * @param inputShapeInfo the shape information for that array */ void NativeOps::flatten(Nd4jPointer *extraPointers, int offset, char order, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hInput, Nd4jLong *hInputShapeInfo, void *dInput, Nd4jLong *dInputShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F22 opNum:[7]\n"); // int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hYShapeInfo), 2, funcAttributes[30]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF222 opNum:[7]\n"); auto type = nd4j::ArrayOptions::dataType(hInputShapeInfo); BUILD_SINGLE_SELECTOR(type, flattenKernelGeneric, (launchDims, stream, extraPointers, offset, order, dZ, dZShapeInfo, dInput, dInputShapeInfo), LIBND4J_TYPES); DEBUG_KERNEL(stream, -1); } void NativeOps::checkP2P() { int curDevice = 0; cudaGetDevice(&curDevice); int devCnt = 0; cudaGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; bool tempSupport = true; if (devCnt > 1) { for (int dX = 0; dX < devCnt; dX++) { for (int dY = 0; dY < devCnt; dY++) { if (dX == dY) continue; int canAccess = 0; cudaSetDevice(dX); cudaDeviceCanAccessPeer(&canAccess, dX , dY); if (!canAccess) { tempSupport = false; break; } } } supportedP2P = tempSupport; cudaSetDevice(curDevice); } else { // if we have only 1 device - we say that we support P2P, since all data will be on 1 device supportedP2P = true; } } void NativeOps::enableP2P(bool enable) { if (enable == allowedP2P) return; int curDevice = 0; cudaGetDevice(&curDevice); int devCnt = 0; cudaGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; if (devCnt > 1) { for (int dX = 0; dX < devCnt; dX++) { for (int dY = 0; dY < devCnt; dY++) { if (dX == dY) continue; int canAccess = 0; cudaSetDevice(dX); cudaDeviceCanAccessPeer(&canAccess, dX , dY); if (canAccess) { if (enable) { cudaDeviceEnablePeerAccess(dY, 0); } else { cudaDeviceDisablePeerAccess(dY); } } else { if (nd4j::Environment::getInstance()->isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY); } } } cudaSetDevice(curDevice); } allowedP2P = enable; cudaSetDevice(curDevice); } bool NativeOps::isP2PAvailable() { return supportedP2P; } void NativeOps::initializeDevicesAndFunctions() { int devCnt = 0; cudaGetDeviceCount(&devCnt); deviceProperties = new cudaDeviceProp[devCnt]; for (int i = 0; i < devCnt; i++) { cudaSetDevice(i); cudaGetDeviceProperties(&deviceProperties[i], i); cudaDeviceSetLimit(cudaLimitStackSize, 4096); } cudaSetDevice(0); checkP2P(); // enabling p2p gpu access if it's supported if (supportedP2P && devCnt > 1) enableP2P(allowedP2P); } void NativeOps::initializeFunctions(Nd4jPointer *functions) { nd4j::BlasHelper::getInstance()->initializeDeviceFunctions(functions); /* this->cublasSgemv = (CublasSgemv)functions[0]; this->cublasDgemv = (CublasDgemv)functions[1]; this->cublasHgemm = (CublasHgemm)functions[2]; this->cublasSgemm = (CublasSgemm)functions[3]; this->cublasDgemm = (CublasDgemm)functions[4]; this->cublasSgemmEx = (CublasSgemmEx)functions[5]; this->cublasHgemmBatched = (CublasHgemmBatched)functions[6]; this->cublasSgemmBatched = (CublasSgemmBatched)functions[7]; this->cublasDgemmBatched = (CublasDgemmBatched)functions[8]; */ } /** * This method acquires memory chunk of requested size on host side * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param flags optional parameter */ Nd4jPointer NativeOps::mallocHost(Nd4jLong memorySize, int flags) { Nd4jPointer pointer; // cudaHostAllocMapped |cudaHostAllocPortable cudaError_t res = cudaHostAlloc(reinterpret_cast<void **>(&pointer), memorySize, cudaHostAllocDefault); if (res != 0) pointer = 0L; return pointer; } /** * This method acquires memory chunk of requested size on specified device * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc * @param flags optional parameter */ Nd4jPointer NativeOps::mallocDevice(Nd4jLong memorySize, Nd4jPointer ptrToDeviceId, int flags) { Nd4jPointer pointer; cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&pointer), memorySize); if (res != 0) pointer = 0L; return pointer; } /** * This method releases previously allocated host memory space * * @param pointer pointer that'll be freed */ int NativeOps::freeHost(Nd4jPointer pointer) { cudaError_t res = cudaFreeHost(reinterpret_cast<void *>(pointer)); if (res != 0) pointer = 0L; return 1L; } /** * This method releases previously allocated memory space on device * * @param pointer pointer that'll be freed * @param ptrToDeviceId pointer to deviceId. */ int NativeOps::freeDevice(Nd4jPointer pointer, Nd4jPointer ptrToDeviceId) { cudaError_t res = cudaFree(reinterpret_cast<void *>(pointer)); if (res != 0) pointer = 0L; return 1L; } Nd4jPointer NativeOps::createContext() { return 0L; } Nd4jPointer NativeOps::createStream() { Nd4jPointer nativeStream = (Nd4jPointer) malloc(sizeof(cudaStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream"); cudaError_t dZ = cudaStreamCreate(reinterpret_cast<cudaStream_t *>(&nativeStream)); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaStreamCreate(...) failed"); return nativeStream; } Nd4jPointer NativeOps::createEvent() { Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(cudaEvent_t)); CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer"); cudaError_t dZ = cudaEventCreateWithFlags(reinterpret_cast<cudaEvent_t *>(&nativeEvent), cudaEventDisableTiming); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaEventCreateWithFlags(...) failed"); return nativeEvent; } int NativeOps::registerEvent(Nd4jPointer event, Nd4jPointer stream) { cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event); cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&stream); cudaError_t dZ = cudaEventRecord(*pEvent, *pStream); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaEventRecord(...) failed"); return 1; } int NativeOps::setDevice(Nd4jPointer ptrToDeviceId) { int deviceId = getDeviceId(ptrToDeviceId); cudaError_t dZ = cudaSetDevice(deviceId); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaSetDevice(...) failed"); return 1; } Nd4jLong NativeOps::getDeviceFreeMemory(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); int orig = -1; cudaGetDevice(&orig); if (device >= 0 && device != orig) { cudaSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; cudaMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { cudaSetDevice(orig); } return (Nd4jLong) memFree; } Nd4jLong NativeOps::getDeviceTotalMemory(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); int orig = -1; cudaGetDevice(&orig); if (device >= 0 && device != orig) { cudaSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; cudaMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { cudaSetDevice(orig); } return (Nd4jLong) memTotal; } int NativeOps::memcpy(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { return memcpyAsync(dst, src, size, flags, reserved); } int NativeOps::memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved); cudaMemcpyKind kind; DEBUG_KERNEL(pStream, 0); switch (flags) { case 0: { kind = cudaMemcpyHostToHost; } break; case 1: { kind = cudaMemcpyHostToDevice; } break; case 2: { kind = cudaMemcpyDeviceToHost; } case 3: { kind = cudaMemcpyDeviceToDevice; } break; default: { printf("UNDEFINED MEMCPY!\n"); break; } } cudaError_t dZ = cudaMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind, *pStream); if (dZ != 0) { checkCudaErrors(dZ); printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ)); fflush(stdout); fflush(stderr); throw std::runtime_error("cudaMemcpyAsync(...) failed"); //return 0L; } return 1; } int NativeOps::memset(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) { cudaError_t dZ = cudaMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size)); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaMemset(...) failed"); return 1; } int NativeOps::memsetAsync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) { cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved); cudaError_t dZ = cudaMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaMemsetAsync(...) failed"); return 1; } int NativeOps::destroyEvent(Nd4jPointer event) { cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event); cudaError_t dZ = cudaEventDestroy(*pEvent); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaEvenDestroy(...) failed"); return 1; } int NativeOps::streamSynchronize(Nd4jPointer stream) { cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&stream); cudaError_t dZ = cudaStreamSynchronize(*pStream); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaStreamSynchronize(...) failed"); return 1L; } int NativeOps::eventSynchronize(Nd4jPointer event) { cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event); cudaError_t dZ = cudaEventSynchronize(*pEvent); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaEventSynchronize(...) failed"); return 1L; } int NativeOps::getAvailableDevices() { int devCnt = 0; cudaGetDeviceCount(&devCnt); return devCnt; } void NativeOps::enableDebugMode(bool reallyEnable) { nd4j::Environment::getInstance()->setDebug(reallyEnable); } void NativeOps::setGridLimit(int gridSize) { if (gridSize > 8192) gridSize = 8192; if (gridSize < 1) gridSize = 1; blockLimit = gridSize; } int NativeOps::ompGetMaxThreads() { return maxThreads; } int NativeOps::ompGetNumThreads() { return maxThreads; } void NativeOps::setOmpNumThreads(int threads) { if (threads > 1024) threads = 1024; if (threads < 32) threads = 32; maxThreads = threads; } void NativeOps::enableVerboseMode(bool reallyEnable) { nd4j::Environment::getInstance()->setVerbose(reallyEnable); } int NativeOps::getDeviceMajor(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].major; } int NativeOps::getDeviceMinor(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].minor; } const char * NativeOps::getDeviceName(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].name; } /** * Concatneate multi array of the same shape together * along a particular dimension */ void NativeOps::concat( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, Nd4jPointer *ddata, Nd4jPointer *dinputShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hXShapeInfo = hZShapeInfo; auto hShapePointers = reinterpret_cast<Nd4jLong **>(inputShapeInfo); // numArrays will be used as number of TADs, so each block process 1 input int smem = 8192; bool isVstack = false; bool isScalar = true; bool isHstack = false; for (int i = 0; i < numArrays; i++) { if (!shape::isScalar(hShapePointers[i])) { isScalar = false; break; } } if (!isScalar && dimension == 0 && shape::rank(hZShapeInfo) == 2 && shape::order(hZShapeInfo) == 'c' ) { isVstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hShapePointers[i]) || shape::elementWiseStride(hShapePointers[i]) <= 0 || shape::order(hShapePointers[i]) != 'c') { isVstack = false; break; } } } // let's try to fit N-dimensional vstack if (!isVstack && !isScalar && dimension == 0 && shape::order(hXShapeInfo) == 'c') { auto length0 = shape::length(hShapePointers[0]); isVstack = true; for (int i = 0; i < numArrays; i++) { if (shape::elementWiseStride(hShapePointers[i]) <= 0 || shape::order(hShapePointers[i]) != 'c' || length0 != shape::length(hShapePointers[i])) { isVstack = false; break; } } } if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hZShapeInfo)) { isHstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hShapePointers[i]) || shape::elementWiseStride(hShapePointers[i]) <= 0) { isHstack = false; break; } } } if (isScalar) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going scalar concat\n"); dim3 launchDims(128, 128, 16384); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); BUILD_SINGLE_SELECTOR(zType, concatKernelScalarGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), dZ), LIBND4J_TYPES); } else if (isVstack) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going VStack concat\n"); dim3 launchDims(128, 512, 16384); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); BUILD_SINGLE_SELECTOR(zType, concatKernelVStackGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), reinterpret_cast<Nd4jPointer *>(dinputShapeInfo[0]), dZ, dZShapeInfo), LIBND4J_TYPES); } else if (isHstack) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going HStack concat\n"); dim3 launchDims(128, 128, 16384); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); BUILD_SINGLE_SELECTOR(zType, concatKernelHStackGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), reinterpret_cast<Nd4jPointer *>(dinputShapeInfo[0]), dZ, dZShapeInfo), LIBND4J_TYPES); } else { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going generic concat\n"); auto devZTadShape = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto devZOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); dim3 launchDims(128, 128, 8192); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); BUILD_SINGLE_SELECTOR(zType, concatKernelGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), reinterpret_cast<Nd4jPointer *>(dinputShapeInfo[0]), dZ, dZShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]), devZTadShape, devZOffsets), LIBND4J_TYPES); } if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("sharedMemory requested for concatFloat: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs); nd4j::DebugHelper::checkErrorCode(stream, "Legacy ConcatFloat(...) failed"); } void NativeOps::specialConcat( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { nd4j::SpecialMethods<float>::concatCpuGeneric( dimension, numArrays, data, inputShapeInfo, dZ, dZShapeInfo); } /** * This method saves */ void NativeOps::tadOnlyShapeInfo(Nd4jLong *dXShapeInfo, int *dimension, int dimensionLength, Nd4jLong *target, Nd4jLong *offsets) { shape::TAD tad; tad.init(dXShapeInfo, dimension, dimensionLength); //tad->setOutputBuffer(target); tad.createTadOnlyShapeInfo(); tad.createOffsets(); std::memcpy(reinterpret_cast<void *>(target), tad.tadOnlyShapeInfo, shape::shapeInfoByteLength(tad.tadOnlyShapeInfo)); std::memcpy(reinterpret_cast<void *>(offsets), tad.tadOffsets, tad.numTads * sizeof(Nd4jLong)); } int NativeOps::memcpyConstantAsync(Nd4jLong dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved); cudaMemcpyKind kind; DEBUG_KERNEL(pStream, -1); switch (flags) { case 0: { kind = cudaMemcpyHostToHost; } break; case 1: { kind = cudaMemcpyHostToDevice; } break; case 2: { kind = cudaMemcpyDeviceToHost; } case 3: { kind = cudaMemcpyDeviceToDevice; } break; } //cudaError_t dZ = cudaMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream); cudaError_t dZ = cudaMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaMemcpyToSymbolAsync(...) failed"); return 1; } Nd4jPointer NativeOps::getConstantSpace() { Nd4jPointer dConstAddr; cudaError_t dZ = cudaGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory); if (dZ != 0) throw std::runtime_error("cudaGetSymbolAddress(...) failed"); return dConstAddr; } void NativeOps::pullRows(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jLong n, Nd4jLong *indexes, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims(64, 256, 1024); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric, (launchDims, stream, dX, dZ, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets), LIBND4J_TYPES); DEBUG_KERNEL(stream, -1); } void NativeOps::average(Nd4jPointer *extras, Nd4jPointer *x, Nd4jLong *xShapeInfo, Nd4jPointer *dx, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dz, Nd4jLong *dzShapeInfo, int n, Nd4jLong length, bool propagate) { cudaStream_t * stream = reinterpret_cast<cudaStream_t *>(&extras[1]); int mode = getDeviceId(extras[3]); auto dX = reinterpret_cast<void **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("averageFloat called\n"); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // launching on gpu if (mode == 0) { dim3 launchDims(256, 256, 4096); // averagingKernelFloat<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(dX, dz, n, length, propagate); BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed"); } else { // launching on host memory BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate), LIBND4J_TYPES); } } void NativeOps::accumulate(Nd4jPointer *extras, Nd4jPointer *x, Nd4jLong *xShapeInfo, Nd4jPointer *dx, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dz, Nd4jLong *dzShapeInfo, int n, Nd4jLong length) { auto stream = reinterpret_cast<cudaStream_t *>(&extras[1]); int mode = getDeviceId(extras[3]); auto dX = reinterpret_cast<void **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("accumulateFloat called\n"); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // launching on gpu if (mode == 0) { dim3 launchDims(n, 256, 16384); BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n,length), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed"); } else { // launching on host memory BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length), LIBND4J_TYPES); } } void NativeOps::shuffle(Nd4jPointer *extras, Nd4jPointer *x, Nd4jPointer *xShapeInfo, Nd4jPointer *dx, Nd4jPointer *dXShapeInfo, Nd4jPointer *z, Nd4jPointer *zShapeInfo, Nd4jPointer *dz, Nd4jPointer *dZShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); auto dX = reinterpret_cast<void **>(dx); auto dZ = reinterpret_cast<void **>(dz); auto xShape = reinterpret_cast<Nd4jLong **>(xShapeInfo); auto dxShape = reinterpret_cast<Nd4jLong **>(dXShapeInfo); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo); auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets); auto xType = nd4j::ArrayOptions::dataType(xShape[0]); dim3 launchDims(N, 256, 8192); BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric, (launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset), LIBND4J_TYPES); DEBUG_KERNEL(stream, 0); } /* void NativeOps::execMetaPredicateShape(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraA, void *extraB, double scalarA, double scalarB) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, functions::grid::GRIDShaped, ::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraA, extraB, scalarA, scalarB), LIBND4J_TYPES); // functions::grid::GRIDShaped<float>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dX, dXShapeInfo, dy, dYShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB); DEBUG_KERNEL(stream, opNumA); } */ bool NativeOps::isExperimentalEnabled() { return nd4j::Environment::getInstance()->isExperimentalBuild(); } void NativeOps::setOmpMinThreads(int threads) { minThreads = nd4j::math::nd4j_max<int>(32, threads); minThreads = nd4j::math::nd4j_min<int>(maxThreads, minThreads); } int NativeOps::getDevice() { int curDevice = -1; cudaGetDevice(&curDevice); return curDevice; } void NativeOps::setElementThreshold(int num) { // this is no-op for CUDA } void NativeOps::setTADThreshold(int num) { // this is no-op for CUDA } void NativeOps::execSummaryStats(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, bool biasCorrected) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(256, 256, 32768); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isR(zType)) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, nullptr), LIBND4J_TYPES, FLOAT_TYPES); } void NativeOps::execSummaryStats(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, int *dimension, int dimensionLength, bool biasCorrected, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); dim3 launchDims = dim3(256, 256, 32768); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isR(zType)) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, biasCorrected, reductionPointer), LIBND4J_TYPES, FLOAT_TYPES); } void NativeOps::execReduce3(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); dim3 launchDims(256, 256, 32768); if (xType != yType) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Y operand to have X type", xType, yType); if (!DataTypeUtils::isR(zType)) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, 1, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), LIBND4J_TYPES, FLOAT_TYPES) DEBUG_KERNEL(stream, opNum); } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduce3(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); if (xType != yType) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Y operand to have X type", xType, yType); if (!DataTypeUtils::isR(zType)) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), LIBND4J_TYPES, FLOAT_TYPES) } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduce3Scalar(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto allocationPointer = reinterpret_cast<int *>(extraPointers[3]); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); if (xType != yType) throw nd4j::datatype_exception::build("NativeOps::execReduce3Scalar requires Y operand to have X type", xType, yType); if (!DataTypeUtils::isR(zType)) throw nd4j::datatype_exception::build("NativeOps::execReduce3Scalar requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::execScalar(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, allocationPointer, reductionPointer, nullptr), LIBND4J_TYPES, FLOAT_TYPES); } void NativeOps::execScalarBool( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalar, Nd4jLong *hScalarShapeInfo, void *dScalar, Nd4jLong *dScalarShapeInfo, void *extraParams) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(256, 512, 8192); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (xType != yType ) throw std::runtime_error("NativeOps::execScalarBool requires X & Y to have same type"); if (!DataTypeUtils::isB(zType) ) throw std::runtime_error("NativeOps::execScalarBool requires Z operand to have BOOL type"); BUILD_DOUBLE_SELECTOR(xType, zType, functions::scalar::ScalarBoolTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalar, extraParams), LIBND4J_TYPES, BOOL_TYPES); DEBUG_KERNEL(stream, opNum); } void NativeOps::execScalarBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalars, Nd4jLong *hScalarShapeInfo, void *dScalars, Nd4jLong *dScalarShapeInfo, void *extraParams, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims(256, 512, 8192); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (xType != yType ) throw std::runtime_error("NativeOps::execScalarBool requires X & Y to have same type"); if (!DataTypeUtils::isB(zType) ) throw std::runtime_error("NativeOps::execScalarBool requires Z operand to have BOOL type"); BUILD_DOUBLE_SELECTOR(xType, yType, functions::scalar::ScalarBoolTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, BOOL_TYPES); DEBUG_KERNEL(stream, opNum); } void NativeOps::execScalar( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalar, Nd4jLong *hScalarShapeInfo, void *dScalar, Nd4jLong *dScalarShapeInfo, void *extraParams) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims(256, 512, 8192); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); #ifndef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo, hZShapeInfo, dScalar, extraParams), LIBND4J_TYPES, LIBND4J_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo, hZShapeInfo, dScalar, extraParams), LIBND4J_TYPES); #endif DEBUG_KERNEL(stream, opNum); } void NativeOps::execScalar(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalars, Nd4jLong *hScalarShapeInfo, void *dScalars, Nd4jLong *dScalarShapeInfo, void *extraParams, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); dim3 launchDims(256, 256, 16384); #ifndef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES); #endif DEBUG_KERNEL(stream, opNum); } void NativeOps::execAggregate(Nd4jPointer *extraPointers, int opNum, void **arguments, int numArguments, Nd4jLong **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, void *realArguments, int numRealArguments, nd4j::DataType dtype) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numBlocks, numThreads, shmem); BUILD_SINGLE_SELECTOR(dtype, functions::aggregate::AggregatedFunction, ::aggregateKernelGeneric(launchDims, stream, opNum, arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), FLOAT_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execAggregateFloat(...) failed"); } void NativeOps::execAggregateBatch(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments, nd4j::DataType dtype) { // not implemented yet cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numAggregates, numThreads, shmem); BUILD_SINGLE_SELECTOR(dtype, functions::aggregate::AggregatedFunction, ::aggregateBatchKernelGeneric(launchDims, stream, opNum, numAggregates, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), FLOAT_TYPES); DEBUG_KERNEL(stream, opNum); } void NativeOps::execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto sizeOf = sizeof(nd4j::graph::RandomGenerator); Nd4jPointer stateDevice; cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf); checkCudaErrors(cudaStreamSynchronize(*stream)); checkCudaErrors(cudaMemcpyAsync(stateDevice, stateHost, sizeOf, cudaMemcpyHostToDevice, *stream)); dim3 launchDims = dim3(512, 512, 32768); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); // functions::random::RandomFunction<float>::executeCudaSingle(launchDims, extraPointers, opNum, stateHost, dZ, dZShapeInfo, extraArguments), BUILD_SINGLE_SELECTOR(zType, functions::random::RandomFunction, ::executeCudaSingle(launchDims, extraPointers, opNum, stateDevice, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES); checkCudaErrors(cudaMemcpyAsync(stateHost, stateDevice, sizeOf, cudaMemcpyDeviceToHost, *stream)); checkCudaErrors(cudaStreamSynchronize(*stream)); cudaFree(stateDevice); } void NativeOps::execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto sizeOf = sizeof(nd4j::graph::RandomGenerator); Nd4jPointer stateDevice; cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf); checkCudaErrors(cudaStreamSynchronize(*stream)); checkCudaErrors(cudaMemcpyAsync(stateDevice, stateHost, sizeOf, cudaMemcpyHostToDevice, *stream)); dim3 launchDims = dim3(512, 512, 32768); auto xType = nd4j::ArrayOptions::dataType(hZShapeInfo); // functions::random::RandomFunction<float>::executeCudaDouble(launchDims, extraPointers, opNum, stateHost, dX, dXShapeInfo, dZ, dZShapeInfo, extraArguments); BUILD_SINGLE_SELECTOR(xType, functions::random::RandomFunction, ::executeCudaDouble(launchDims, extraPointers, opNum, stateDevice, dX, dXShapeInfo, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES); checkCudaErrors(cudaMemcpyAsync(stateHost, stateDevice, sizeOf, cudaMemcpyDeviceToHost, *stream)); checkCudaErrors(cudaStreamSynchronize(*stream)); cudaFree(stateDevice); } void NativeOps::execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto sizeOf = sizeof(nd4j::graph::RandomGenerator); Nd4jPointer stateDevice; cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf); checkCudaErrors(cudaStreamSynchronize(*stream)); checkCudaErrors(cudaMemcpyAsync(stateDevice, stateHost, sizeOf, cudaMemcpyHostToDevice, *stream)); dim3 launchDims = dim3(512, 512, 32768); auto xType = nd4j::ArrayOptions::dataType(hZShapeInfo); // functions::random::RandomFunction<float>::executeCudaTriple(launchDims, extraPointers, opNum, stateHost, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraArguments); BUILD_SINGLE_SELECTOR(xType, functions::random::RandomFunction, ::executeCudaTriple(launchDims, extraPointers, opNum, stateDevice, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES); checkCudaErrors(cudaMemcpyAsync(stateHost, stateDevice, sizeOf, cudaMemcpyDeviceToHost, *stream)); checkCudaErrors(cudaStreamSynchronize(*stream)); cudaFree(stateDevice); } Nd4jPointer NativeOps::initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) { unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // we don't synchronize at random initialization, it's safe to go unsync here // cudaStreamSynchronize(*stream); auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer); auto buffer = new nd4j::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost), reinterpret_cast<uint64_t *>(ptrDev)); buffer->propagateToDevice(buffer, *stream); nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A"); // we generate sequence in the host memory nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // and copy it to gpu cudaMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, cudaMemcpyHostToDevice, *stream); nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B"); return buffer; } void NativeOps::destroyRandom(Nd4jPointer ptrBuffer) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrBuffer); // FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice cudaDeviceSynchronize(); delete buffer; } void NativeOps::refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); cudaStreamSynchronize(*stream); uint64_t *ptrDev = buffer->getDeviceBuffer(); // update rng state buffer->setSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); // refresh buffer on host size nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // copy back to gpu cudaMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, cudaMemcpyHostToDevice, *stream); } void NativeOps::reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); cudaStreamSynchronize(*stream); // update rng state buffer->reSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); } /** * Return the length of a shape buffer * based on the pointer * @param buffer the buffer pointer to check * @return */ int NativeOps::lengthForShapeBufferPointer(Nd4jPointer buffer) { auto shapeBuffer = reinterpret_cast<Nd4jLong *>(buffer); return shape::shapeInfoLength(shape::rank(shapeBuffer)); } /** * The pointer to get the address for * * @param address the address to get the pointer * @return the pointer for the given address */ Nd4jPointer NativeOps::pointerForAddress(Nd4jLong address) { return reinterpret_cast<Nd4jPointer >(address); } void NativeOps::tear(Nd4jPointer *extras, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, Nd4jPointer *targets, Nd4jLong *zShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); dim3 launchDims(512, 512, 512); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, tearKernelGeneric, (launchDims, stream, dX, dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed"); } void prescanArrayRecursive(Nd4jPointer *extras, int *dZ, int *dX, int numElements, int level) { auto stream = reinterpret_cast<cudaStream_t *>(&extras[1]); auto g_scanBlockSums = reinterpret_cast<int **>(&extras[2]); int blockSize = 512; // max size of the thread blocks int numBlocks = nd4j::math::nd4j_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize)))); int numThreads; if (numBlocks > 1) numThreads = blockSize; else if (nd4j::isPowerOfTwo(numElements)) numThreads = numElements / 2; else numThreads = nd4j::floorPow2(numElements); int numEltsPerBlock = numThreads * 2; // if this is a non-power-of-2 array, the last block will be non-full // compute the smallest power of 2 able to compute its scan. int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock; int numThreadsLastBlock = nd4j::math::nd4j_max<int>(1, numEltsLastBlock / 2); int np2LastBlock = 0; int sharedMemLastBlock = 0; if (numEltsLastBlock != numEltsPerBlock) { np2LastBlock = 1; if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock); unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS; sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace); } // padding space is used to avoid shared memory bank conflicts int extraSpace = numEltsPerBlock / NUM_BANKS; int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace); // setup execution parameters // if NP2, we process the last block separately dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1); dim3 threads(numThreads, 1, 1); dim3 gridOnes(1, 1, 1); dim3 threadsOnes(numThreadsLastBlock, 1, 1); if (sharedMemSize < 2048) sharedMemSize = 2048; if (sharedMemLastBlock < 2048) sharedMemLastBlock = 2048; // execute the scan if (numBlocks > 1) { nd4j::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level], numThreads * 2, 0, 0); if (np2LastBlock) { nd4j::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } // After scanning all the sub-blocks, we are mostly done. But now we // need to take all of the last values of the sub-blocks and scan those. // This will give us a new value that must be sdded to each block to // get the final results. // recursive (CPU) call prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1); nd4j::uniformAdd<<<grid, threads, 1024, *stream>>>(dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0); if (np2LastBlock) { nd4j::uniformAdd<<<1, numThreadsLastBlock, 1024, *stream>>>(dZ, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } } else if (isPowerOfTwo(numElements)) { nd4j::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0); } else { nd4j::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0); } } void NativeOps::encodeThresholdP1(Nd4jPointer *extras, void *dx, Nd4jLong *hXShapeInfo, Nd4jLong N, int *dz, float threshold) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 1024); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, encoderKernelP1Generic, (launchDims, stream, dx, N, dz, threshold), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP1Float(...) failed"); } void NativeOps::encodeThresholdP2Int(Nd4jPointer *extraPointers, int *dx, Nd4jLong N, int *dz) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); //encoderKernelP2Float<<<numBlocks, blockSize , 1024 * sizeof(float), *stream>>>(dx, N, dz); prescanArrayRecursive(extraPointers, dz, dx + 1, (int) N, 0); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP2Int(...) failed"); } void NativeOps::encodeThresholdP3(Nd4jPointer *extraPointers, void *dx, Nd4jLong *hXShapeInfo, int *offsets, Nd4jLong N, int *dz){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 4096); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, encoderKernelP3Generic, (launchDims, stream, dx, offsets, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP3Float(...) failed"); } void NativeOps::decodeThreshold(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, void *dz, Nd4jLong *zShapeInfo){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // we probably want to have smaller blocks here, memory writes are misaligned anyway int blockSize = 128; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 1024); auto zType = nd4j::ArrayOptions::dataType(zShapeInfo); BUILD_SINGLE_SELECTOR(zType, decoderKernelGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "decodeThresholdFloat(...) failed"); } void NativeOps::execReduce3All(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParamsVals, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, int *dimension, int dimensionLength, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, Nd4jLong *yTadShapeInfo, Nd4jLong *yOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D119 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims(shape::length(hZShapeInfo), 256, 32768); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AD119 opNum:[%i]\n", opNum); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (yType != xType) throw nd4j::datatype_exception::build("NativeOps::execReduce3All both operands must have same data type", xType, yType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::execAll(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParamsVals, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets), LIBND4J_TYPES, FLOAT_TYPES); DEBUG_KERNEL(stream, opNum); } void NativeOps::sort(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, bool descending) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[ 1]); auto xLength = shape::length(xShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2*k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric, (launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending), LIBND4J_TYPES); } } } else { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window<<=1) { int n = window; int rev = 0; do{ int half = n >> 1; BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric, (launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES); n>>=1; rev = 1; } while(n > 1); } } nd4j::DebugHelper::checkErrorCode(stream, "sort(...) failed"); } void NativeOps::sortTad(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) { // to be implemented cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims(512, 512, 32768); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, oesTadGeneric, (launchDims, stream, dX, dXShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "sortTadFloat(...) failed"); } void NativeOps::sortCooIndices(Nd4jPointer *extraPointers, Nd4jLong *indices, void *values, Nd4jLong length, int rank) { throw std::runtime_error("sortCooIndices:: Not implemented yet"); } Nd4jLong NativeOps::encodeBitmap(Nd4jPointer *extraPointers, void *dx, Nd4jLong *hXShapeInfo, Nd4jLong N, int *dz, float threshold) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *resultPointer = reinterpret_cast<int *>(extraPointers[2]); int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims(512, 512, 32768); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, cudaEncodeBitmapGeneric, (launchDims, stream, dx, N, dz, resultPointer, reductionPointer, threshold), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeBitmapFloat(...) failed"); Nd4jLong dZ = (Nd4jLong) resultPointer[0]; resultPointer[0] = 0; return dZ; } void NativeOps::decodeBitmap(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, void *dz, Nd4jLong *zShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims(512, 512, 16384); auto xType = nd4j::ArrayOptions::dataType(zShapeInfo); BUILD_SINGLE_SELECTOR(xType, cudaDecodeBitmapGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "decodeBitmapFloat(...) failed"); } Nd4jLong* NativeOps::mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jLong length) { return nullptr; } void NativeOps::munmapFile(Nd4jPointer *extraPointers, Nd4jLong* ptrMap, Nd4jLong length) { } nd4j::graph::ResultWrapper* NativeOps::executeFlatGraph(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) { return nullptr; } const char* NativeOps::getAllCustomOps() { return nd4j::ops::OpRegistrator::getInstance()->getAllCustomOperations(); } nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { nd4j::graph::VariableSpace varSpace; Context block(2, &varSpace); nd4j::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numInputShapes; e++) { auto shape_ = reinterpret_cast<Nd4jLong *>(inputShapes[e]); // we shouldn't copy buffer if that's empty array void *buffer_ = nd4j::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e]; auto array = new nd4j::NDArray(buffer_, shape_); array->triggerAllocationFlag(false, false); // block should contain references to proper variable varSpace.putVariable(1, e, array); block.pickInput(1, e); inShapes.push_back(shape_); } auto shapeList = op->calculateOutputShape(&inShapes, block); if (varSpace.workspace() != nullptr) shapeList->detach(); return shapeList; } nd4j::ShapeList* NativeOps::calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { Context block(1); nd4j::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numInputShapes; e++) inShapes.push_back(reinterpret_cast<Nd4jLong *>(inputShapes[e])); auto shapeList = op->calculateOutputShape(&inShapes, block); return shapeList; } nd4j::ShapeList* NativeOps::calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } static FORCEINLINE Nd4jStatus realExec(nd4j::ops::DeclarableOp* op, Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) { if (op == nullptr) nd4j_printf("Can't find requested operation: [%lld]\n", hash); // we're using the same fake nodeId everywhere here std::vector<nd4j::NDArray*> inputs(numInputs); std::vector<nd4j::NDArray*> outputs(numOutputs); std::vector<double> ttArgs(numTArgs); std::vector<bool> bbArgs(0); std::vector<Nd4jLong> iiArgs(numIArgs); // filling block now with inputs for (int e = 0; e < numInputs; e++) { auto shape = reinterpret_cast<Nd4jLong *>(inputShapes[e]); void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e]; inputs[e] = new nd4j::NDArray(buffer, shape); } // if not inplace - transferring output arrays if (!isInplace) for (int e = 0; e < numOutputs; e++) { // we want to keep original output shape intact auto shape = shape::copyShape(reinterpret_cast<Nd4jLong *>(outputShapes[e])); void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e]; auto array = new nd4j::NDArray(buffer, shape); outputs[e] = array; // and we want to release shape copy once we're done array->triggerAllocationFlag(false, true); } for (int e = 0; e < numIArgs; e++) iiArgs[e] = iArgs[e]; for (int e = 0; e < numTArgs; e++) ttArgs[e] = tArgs[e]; // hypothetically at this point we have everything filled auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, isInplace); //auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace); if (!isInplace) for (int e = 0; e < numOutputs; e++) { //shape::printShapeInfoLinear("JVM output shape", (int *) outputShapes[e]); //shape::printShapeInfoLinear("C++ output shape", (int *) outputs[e]->shapeInfo()); //outputs[e]->printIndexedBuffer("C++ raw output"); //outputs[e]->printBuffer("C++ indexed output"); if (outputs[e]->ordering() != shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e]))) outputs[e]->streamline(shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e]))); } /* if (!isInplace) { if (dZ->size() != numOutputs) { return ND4J_STATUS_BAD_OUTPUT; } for (int e = 0; e < numOutputs; e++) { auto buffer = (T *) outputBuffers[e]; auto shape = (int *) outputShapes[e]; nd4j::NDArray<T> tmp(buffer, shape); if (tmp.lengthOf() != dZ->at(e)->lengthOf()) { nd4j_printf("Provided output array for [%s] has length of %i, but actual dZ has length of %i\n", op->getOpName()->c_str(), tmp.lengthOf(), dZ->at(e)->lengthOf()); return ND4J_STATUS_BAD_OUTPUT; } tmp.assign(dZ->at(e)); } } else { // if op is inplace, our ResultSet holds pointers dZ->purge(); } delete dZ; */ for (auto v: inputs) delete v; for (auto v: outputs) delete v; return Status::OK(); } int NativeOps::execCustomOp(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace); } int NativeOps::registerGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) { auto graph = nd4j::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer); nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph); return ND4J_STATUS_OK; } static VariablesSet* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { auto graph = nd4j::graph::GraphHolder::getInstance()->pullGraph(graphId); auto varSpace = graph->getVariableSpace()->clone(); std::vector<nd4j::NDArray*> handles; for (int e = 0; e < numInputs; e++) { auto idx = inputIndices[e]; // we'll delete this array later, together with cloned VariableSpace auto array = new nd4j::NDArray(inputBuffers[e], reinterpret_cast<Nd4jLong *>(inputShapes[e])); handles.emplace_back(array); if (varSpace->hasVariable(idx)) { auto var = varSpace->getVariable(idx); if (var->hasNDArray()) delete var->getNDArray(); var->setNDArray(array); } else varSpace->putVariable(idx, array); } auto dZ = nd4j::graph::GraphExecutioner::execute(graph, varSpace); auto varSet = new nd4j::graph::VariablesSet(dZ); if (dZ == ND4J_STATUS_OK) { // pull back results, and provide them auto outputs = graph->fetchOutputs(); for (int e = 0; e < outputs->size(); e++) { // we're only getting variable ID/Index from original grap. values will be taken from cloned workspace std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index()); auto var = varSpace->getVariable(varId); varSet->push_back(var->clone()); } delete outputs; } delete varSpace; return varSet; } VariablesSet* NativeOps::executeStoredGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs); } int NativeOps::unregisterGraph(Nd4jPointer *extraPointers, Nd4jLong graphId) { nd4j::graph::GraphHolder::getInstance()->dropGraphAny(graphId); return ND4J_STATUS_OK; } void NativeOps::deletePointerArray(Nd4jPointer pointer) { Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer); delete[] ptr; } void NativeOps::deleteIntArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<int *>(pointer); delete[] ptr; } void NativeOps::deleteLongArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<Nd4jLong *>(pointer); delete[] ptr; } template <typename T> static void deleteVariablesSetT(Nd4jPointer pointer) { nd4j::graph::VariablesSet* ptr = reinterpret_cast<nd4j::graph::VariablesSet*>(pointer); delete ptr; } void NativeOps::deleteVariablesSet(Nd4jPointer pointer) { deleteVariablesSetT<double>(pointer); } void NativeOps::deleteShapeList(Nd4jPointer shapeList) { nd4j::ShapeList* list = reinterpret_cast<nd4j::ShapeList*>(shapeList); list->destroy(); delete list; } const char* NativeOps::getAllOperations() { return nd4j::OpTracker::getInstance()->exportOperations(); } Nd4jPointer NativeOps::getGraphState(Nd4jLong id) { return (Nd4jPointer) new nd4j::graph::GraphState(id); } void NativeOps::deleteGraphState(Nd4jPointer state) { auto stateP = reinterpret_cast<nd4j::graph::GraphState*>(state); delete stateP; } Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, nd4j::graph::GraphState *state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { /** * That's basically exec, with VariableSpace provided in GraphState: * depending on operation (i.e. while of if), different logic executors could be used */ auto graph = state->graph(); auto varSpace = state->variableSpace(); // Node is dynamically created, and has nothing beyond it: only inputs and outputs // this node has id of 0, and inputs are Node node(OpType_LOGIC, opHash, 0); // mapping inputs for (int e = 0; e < numInputs; e++) { auto buffer = inputBuffers[e]; auto shapeInfo = reinterpret_cast<Nd4jLong *>(inputShapes[e]); auto array = new nd4j::NDArray(buffer, shapeInfo, varSpace->workspace()); // now we just put array to VarSpace varSpace->putVariable(0, e, array); node.pickInput(0, e); } // mapping scopes for (int e = 0; e < numScopes; e++) { // we should check scope existence in GraphState/Graph int scopeId = (int) scopes[e]; if (!state->hasScope(scopeId)) { // nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId); return Status::THROW(); } node.pickInput(scopeId, 0); } auto dZ = LogicExecutor::processNode(graph, &node); if (dZ != Status::OK()) return dZ; // mapping outputs for (int e = 0; e < numOutputs; e++) { auto buffer = outputBuffers[e]; auto shapeInfo = reinterpret_cast<Nd4jLong *>(outputShapes[e]); NDArray array(buffer, shapeInfo, varSpace->workspace()); // now we just put array to VarSpace to the same ID //varSpace->putVariable(0, e, array); auto t = varSpace->getVariable(0, e)->getNDArray(); array.assign(t); } // removing input variables for (int e = 0; e < numInputs; e++) { varSpace->dropVariable(0, e); } // after some bla-bla-bla we should have Graph and Node for current op return Status::OK(); } Nd4jStatus NativeOps::execCustomOpWithScope(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { return execCustomOpWithScope(extraPointers, reinterpret_cast<nd4j::graph::GraphState*>(state), opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs); } void NativeOps::deleteResultWrapper(Nd4jPointer ptr) { // just 0 room for compiler s@!t auto p = reinterpret_cast<nd4j::graph::ResultWrapper *>(ptr); delete p; } int NativeOps::estimateThreshold(Nd4jPointer *extraPointers, Nd4jPointer dX, Nd4jLong *dXShapeInfo, int N, float threshold) { throw std::runtime_error("estimateThreshold: Not implemented yet"); } /* * TypeDef: * void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, long N, int dstType, Nd4jPointer dZ); */ void NativeOps::convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, Nd4jLong N, int dstType, Nd4jPointer dZ) { auto dx = reinterpret_cast<void *>(dX); auto dz = reinterpret_cast<void *>(dZ); if (srcType == ND4J_FLOAT8) { if (dstType == ND4J_FLOAT8) { // convertKernel<double, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int8>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint8>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int16>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint16>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_FLOAT32) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_INT8) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::int8, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { //convertKernel<nd4j::int8, nd4j::int8>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: eventually we might want to add it } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_UINT8) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<uint8_t, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: still might want to add } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_FLOAT16) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<float16, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: .... ^^^ } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz); } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<float16>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_INT16) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<int16_t, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO... } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz); } else { printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_FLOAT24) { } else if (srcType == ND4J_FLOAT32) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<float, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz); } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<float>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_DOUBLE) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<double, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { // } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<double>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_THRESHOLD) { if (dstType == ND4J_FLOAT16) { //nd4j::convertFromThreshold<float16>(nullptr, dx, N, dz); } else if (dstType == ND4J_FLOAT32) { //nd4j::convertFromThreshold<float>(nullptr, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { //nd4j::convertFromThreshold<double>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } Nd4jPointer NativeOps::createUtf8String(Nd4jPointer *extraPointers, const char *string, int length) { auto u = new nd4j::utf8string(string, length); return reinterpret_cast<Nd4jPointer>(u); } void NativeOps::deleteUtf8String(Nd4jPointer *extraPointers, Nd4jPointer ptr) { delete(reinterpret_cast<nd4j::utf8string*>(ptr)); }
0703f0d2eb4c63c32ef109c343673dc5dd5a0d48.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "bvh.h" #include "primitive.h" #include "../watch.h" #include <iostream> #include <fstream> #include <algorithm> #include <bitset> #include <thrust/sort.h> #include <thrust/execution_policy.h> #include "../Utilities.h" #include "../common.h" using namespace std; extern inline void copyFromCPUtoGPU(void** dst, void* src, int size); extern inline void copyFromGPUtoCPU(void** dst, void* src, int size); // Expands a 10-bit integer into 30 bits // by inserting 2 zeros after each bit. __device__ unsigned int d_expandBits(unsigned int v) { v = (v * 0x00010001u) & 0xFF0000FFu; v = (v * 0x00000101u) & 0x0F00F00Fu; v = (v * 0x00000011u) & 0xC30C30C3u; v = (v * 0x00000005u) & 0x49249249u; return v; } // Calculates a 30-bit Morton code for the // given 3D point located within the unit cube [0,1]. __device__ unsigned int d_morton3D(glm::vec3 p) { float x = p.x, float y = p.y, float z = p.z; x = min(max(x * 1024.0f, 0.0f), 1023.0f); y = min(max(y * 1024.0f, 0.0f), 1023.0f); z = min(max(z * 1024.0f, 0.0f), 1023.0f); unsigned int xx = d_expandBits((unsigned int)x); unsigned int yy = d_expandBits((unsigned int)y); unsigned int zz = d_expandBits((unsigned int)z); return xx * 4 + yy * 2 + zz; } __global__ void get_bb(int num, int m, Primitive* d_primitives, BBox* d_bb) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num +1) return; int div = m / num; int res = m%num; if (index == num + 1) { BBox tem_bbox; for (int i = m - res; i < m; i++) { tem_bbox.expand(d_primitives[i].d_get_expand_bbox()); } d_bb[index] = tem_bbox; } else { BBox tem_bbox; for (int i = 0; i < div; i++) //use shared to replace { tem_bbox.expand(d_primitives[i*num + index].d_get_expand_bbox()); } d_bb[index].expand(tem_bbox); } } __global__ void compute_morton_bbox(int num, Primitive* d_primitives, BBox bb, MortonCode* mortons, BBox* bboxes) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num) return; BBox tem_bbox = d_primitives[index].d_get_expand_bbox(); bboxes[index] = tem_bbox; mortons[index] = d_morton3D(bb.getUnitcubePosOf(tem_bbox.centroid())); } // Expands a 10-bit integer into 30 bits // by inserting 2 zeros after each bit. unsigned int BVHAccel::expandBits(unsigned int v) { v = (v * 0x00010001u) & 0xFF0000FFu; v = (v * 0x00000101u) & 0x0F00F00Fu; v = (v * 0x00000011u) & 0xC30C30C3u; v = (v * 0x00000005u) & 0x49249249u; return v; } // Calculates a 30-bit Morton code for the // given 3D point located within the unit cube [0,1]. unsigned int BVHAccel::morton3D(float x, float y, float z) { x = min(max(x * 1024.0f, 0.0f), 1023.0f); y = min(max(y * 1024.0f, 0.0f), 1023.0f); z = min(max(z * 1024.0f, 0.0f), 1023.0f); unsigned int xx = expandBits((unsigned int)x); unsigned int yy = expandBits((unsigned int)y); unsigned int zz = expandBits((unsigned int)z); return xx * 4 + yy * 2 + zz; } /** * a wrapper to calculate morton code from * the position of an object inside the * unit cube. */ unsigned int BVHAccel::morton3D(glm::vec3 pos) { return morton3D(pos.x, pos.y, pos.z); } /** * comparer used to sort primitives acoording * to their morton code. */ BBox BVHAccel::computet_root_bbox(Primitive* d_tem_primitives) { const unsigned int num_threads = 128; vector<BBox> c_bb(num_threads + 1); BBox* d_bb; copyFromCPUtoGPU((void**)&d_bb, &c_bb[0], sizeof(BBox)* c_bb.size()); get_bb << <1, c_bb.size() >> > (num_threads, _primitives.size(), d_tem_primitives, d_bb); BBox* cc_bb, bb; copyFromGPUtoCPU((void**)&cc_bb, d_bb, sizeof(BBox)*c_bb.size()); for (int i = 0; i < c_bb.size(); i++) { bb.expand(cc_bb[i]); } hipFree(d_bb); return bb; } void save(vector<Primitive>& primitives, string file_name) { //ofstream outfile(file_name); //outfile << "# morton code" << endl; //for (auto pri: primitives) //{ // outfile << pri.morton_code << endl; // //} //outfile.close(); //cout << "save done!" << endl; } void BVHAccel::compute_bbox_and_morton() { Primitive* d_tem_primitives; MortonCode* d_tem_morton_codes; BBox* d_tem_bboxes; _morton_codes.resize(_primitives.size()); _bboxes.resize(_primitives.size()); copyFromCPUtoGPU((void**)&d_tem_primitives, &_primitives[0], sizeof(Primitive)*_primitives.size()); copyFromCPUtoGPU((void**)&d_tem_morton_codes, &_morton_codes[0], sizeof(MortonCode)*_morton_codes.size()); copyFromCPUtoGPU((void**)&d_tem_bboxes, &_bboxes[0], sizeof(BBox)*_bboxes.size()); BBox bb = computet_root_bbox(d_tem_primitives); unsigned int numThreads, numBlocks; unsigned int blockSize = 512; unsigned int n = _primitives.size(); numThreads = min(blockSize, n); numBlocks = (n % numThreads != 0) ? (n / numThreads + 1) : (n / numThreads); compute_morton_bbox << <numBlocks, numThreads >> > (n, d_tem_primitives, bb, d_tem_morton_codes, d_tem_bboxes); hipMemcpy(&_morton_codes[0], d_tem_morton_codes, sizeof(MortonCode)*_morton_codes.size(), hipMemcpyDeviceToHost); hipMemcpy(&_bboxes[0], d_tem_bboxes, sizeof(BBox)*_bboxes.size(), hipMemcpyDeviceToHost); hipFree(d_tem_primitives); hipFree(d_tem_morton_codes); hipFree(d_tem_bboxes); } __global__ void init_nodes(BRTreeNode* _nodes,const unsigned int num) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num) return; BRTreeNode node; node.setIdx(index); node.bbox = BBox(); _nodes[index] = node; } void BVHAccel::init() { d_bvh = new D_BVH(); auto size = _sorted_primitives.size(); numInternalNode = size - 1; numLeafNode = size; //whether to set h_vertices = NULL before send to gpu? copyFromCPUtoGPU((void**)&d_bvh->d_primitives, &_sorted_primitives[0], sizeof(Primitive)*_sorted_primitives.size()); copyFromCPUtoGPU((void**)&d_sorted_morton_code, &_sorted_morton_codes[0], sizeof(MortonCode)*_sorted_morton_codes.size()); copyFromCPUtoGPU((void**)&d_bboxes, &_sorted_bboxes[0], sizeof(BBox)*_sorted_bboxes.size()); //initialize d_leaf_nodes and d_internal_nodes: with a parallel way? ????? hipMalloc((void**)&d_bvh->d_leaf_nodes, numLeafNode * sizeof(BRTreeNode)); hipMalloc((void**)&d_bvh->d_internal_nodes, numInternalNode * sizeof(BRTreeNode)); int threadPerBlock = DEFAULT_THREAD_PER_BLOCK; int numBlock = (numLeafNode + DEFAULT_THREAD_PER_BLOCK - 1) / threadPerBlock; init_nodes << <numBlock, threadPerBlock >> > (d_bvh->d_leaf_nodes, numLeafNode); threadPerBlock = DEFAULT_THREAD_PER_BLOCK; numBlock = (numInternalNode + DEFAULT_THREAD_PER_BLOCK - 1) / threadPerBlock; init_nodes << <numBlock, threadPerBlock >> > (d_bvh->d_internal_nodes, numInternalNode); } void BVHAccel::build() { //build the bvh int threadPerBlock = DEFAULT_THREAD_PER_BLOCK; int numBlock = (numInternalNode + DEFAULT_THREAD_PER_BLOCK - 1) / threadPerBlock; processInternalNode << <numBlock, threadPerBlock >> > (d_sorted_morton_code, numInternalNode, d_bvh->d_leaf_nodes, d_bvh->d_internal_nodes); //calculate bounding box threadPerBlock = DEFAULT_THREAD_PER_BLOCK; numBlock = (numLeafNode + DEFAULT_THREAD_PER_BLOCK - 1) / threadPerBlock; calculateBoudingBox << <numBlock, threadPerBlock >> > (d_bboxes, numLeafNode, d_bvh->d_leaf_nodes, d_bvh->d_internal_nodes); } void BVHAccel::init_primitives(Mesh& body) { //prepare primitives obj_vertices.resize(body.vertices.size()); for (int i = 0; i < body.vertices.size(); i++) { obj_vertices[i] = glm::vec3(body.vertices[i]); } safe_cuda(hipMalloc((void**)&d_obj_vertices, sizeof(glm::vec3)*obj_vertices.size())); safe_cuda(hipMemcpy(d_obj_vertices, &obj_vertices[0], sizeof(glm::vec3)*obj_vertices.size(), hipMemcpyHostToDevice)); //create primitives glm::vec3* h_obj_vertices = &obj_vertices[0]; _primitives.resize(body.vertex_indices.size() / 3); for (int i = 0; i < _primitives.size(); i++) { Primitive tem_pri(h_obj_vertices, d_obj_vertices, body.vertex_indices[i * 3 + 0], body.vertex_indices[i * 3 + 1], body.vertex_indices[i * 3 + 2]); _primitives[i] = tem_pri; } } BVHAccel::BVHAccel(Mesh& body, size_t max_leaf_size): d_bboxes(nullptr), #ifdef _DEBUG h_leaf_nodes(nullptr), h_internal_nodes(nullptr), #endif d_sorted_morton_code(nullptr) { init_primitives(body); // edge case if (_primitives.empty()) { return; } compute_bbox_and_morton(); // remove duplicates vector<unsigned int> indices; indices_sort(_morton_codes, indices); remove_redundant(_morton_codes, indices); filter(_morton_codes, indices, _sorted_morton_codes); filter(_primitives, indices, _sorted_primitives); filter(_bboxes, indices, _sorted_bboxes); // init GPU data, including d_bboxes,d_primitives, d_sorted_morton_code,d_leaf_nodes, d_internal_nodes init(); // build the brt tree build(); } BVHAccel::~BVHAccel() { hipFree(d_bboxes); hipFree(d_sorted_morton_code); hipFree(d_obj_vertices); // Free d_bvh here cause it has pointer points to gpu memory // and we need to pass the value several times and make sure the // resource not freed, so we can't free it in its own destructor. // \BVHAccel controls the lifetieme of \d_bvh, if the destructor // of \BVHAccel called, which means we can free all the resources in // gpu and cpu(Obviously, this violates the "new" and "free" pair priciple) d_bvh->free_memory(); } #ifdef _DEBUG BRTreeNode* BVHAccel::get_leaf_nodes() { copyFromGPUtoCPU((void**)&h_leaf_nodes, d_bvh->d_leaf_nodes, numLeafNode * sizeof(BRTreeNode)); return h_leaf_nodes; } BRTreeNode* BVHAccel::get_internal_nodes() { copyFromGPUtoCPU((void**)&h_internal_nodes, d_bvh->d_internal_nodes, numInternalNode * sizeof(BRTreeNode)); return h_internal_nodes; } BRTreeNode* BVHAccel::get_root() const { return &h_internal_nodes[0]; } BRTreeNode* BVHAccel::get_left_child(BRTreeNode* node)const { bool is_leaf = false; bool is_null = false; int child_idx = false; child_idx = node->getChildA(is_leaf, is_null); if (!is_null) { if (is_leaf) { return &h_leaf_nodes[child_idx]; } else { return &h_internal_nodes[child_idx]; } } else return nullptr; } BRTreeNode* BVHAccel::get_right_child(BRTreeNode* node)const { bool is_leaf = false; bool is_null = false; int child_idx = false; child_idx = node->getChildB(is_leaf, is_null); if (!is_null) { if (is_leaf) { return &h_leaf_nodes[child_idx]; } else { return &h_internal_nodes[child_idx]; } } else return nullptr; } bool BVHAccel::is_leaf(BRTreeNode* node)const { bool is_leaf = false; bool is_null_a = false; bool is_null_b = false; int child_idx_a = false; int child_idx_b = false; child_idx_a = node->getChildA(is_leaf, is_null_a); child_idx_b = node->getChildB(is_leaf, is_null_b); if (is_null_a && is_null_b) return true; return false; } bool BVHAccel::intersect(const glm::vec3 point, int& idx) const { // Allocate traversal stack from thread-local memory, // and push NULL to indicate that there are no postponed nodes. BRTreeNode* stack[64]; BRTreeNode** stackPtr = stack; *stackPtr++ = NULL; // push // Traverse nodes starting from the root. BRTreeNode* node = get_root(); do { // Check each child node for overlap. BRTreeNode* childA = get_left_child(node); BRTreeNode* childB = get_right_child(node); bool overlapL = check_overlap(point, childA); bool overlapR = check_overlap(point, childB); // Query overlaps a leaf node => report collision with the first collision. if (overlapL && is_leaf(childA)) { idx = childA->getIdx(); //idx = -(idx + 1); //is a leaf, and we can get it through primitive[idx] return true; } if (overlapR && is_leaf(childB)) { idx = childB->getIdx(); //idx = -(idx + 1); //is a leaf return true; } // Query overlaps an internal node => traverse. bool traverseL = (overlapL && !is_leaf(childA)); bool traverseR = (overlapR && !is_leaf(childB)); if (!traverseL && !traverseR) node = *--stackPtr; // pop else { node = (traverseL) ? childA : childB; if (traverseL && traverseR) *stackPtr++ = childB; // push } } while (node != NULL); return false; } bool BVHAccel::check_overlap(const glm::vec3 point, BRTreeNode* node)const { return node->bbox.intersect(point); } void BVHAccel::access(BRTreeNode* root, vector<BRTreeNode*>& bad_bode) { if (root->bbox.min.x > root->bbox.max.x) { if (is_leaf(root)) { bad_bode.push_back(root); return; } else { access(get_left_child(root), bad_bode); access(get_right_child(root), bad_bode); } } } void BVHAccel::copy_data_gpu_to_cpu() { copyFromGPUtoCPU((void**)&h_internal_nodes, d_bvh->d_internal_nodes, sizeof(BRTreeNode)*numInternalNode); copyFromGPUtoCPU((void**)&h_leaf_nodes, d_bvh->d_leaf_nodes, sizeof(BRTreeNode)*numLeafNode); } // call copy_data_gpu_to_cpu() before print void BVHAccel::print(BRTreeNode* root, int depth, const int max_depth) { depth++; if (depth > max_depth) return; bool is_null = false; cout << root->getIdx() << " " << root->getParent(is_null); root->bbox.print(); if (is_leaf(root)) { return; } else { is_null = false; cout << " left:" << get_left_child(root)->getIdx() << " " << get_left_child(root)->getParent(is_null); get_left_child(root)->bbox.print(); is_null = false; cout << " right:" << get_right_child(root)->getIdx() << " "<< get_right_child(root)->getParent(is_null); get_right_child(root)->bbox.print(); print(get_left_child(root),depth +1, max_depth); print(get_right_child(root), depth + 1, max_depth); } } // call copy_data_gpu_to_cpu() before draw void BVHAccel::draw(BRTreeNode* root) { //root->bbox.draw(); bool is_null = false; cout << root->getIdx() << " parent_id: " << root->getParent(is_null) << " "; bool is_leaf_a = false; bool is_null_a = false; bool is_null_b = false; int child_idx_a = false; int child_idx_b = false; child_idx_a = root->getChildA(is_leaf_a, is_null_a); cout << "left_id " << child_idx_a << " is_leaf_a" << is_leaf_a; child_idx_b = root->getChildB(is_leaf_a, is_null_b); cout << "right_id " << child_idx_b << " is_leaf_a" << is_leaf_a; root->bbox.print(); if (is_leaf(root)) { //cout << "is_leaf"; //bool is_leaf = false; //bool is_null_a = false; //bool is_null_b = false; //int child_idx_a = false; //int child_idx_b = false; //child_idx_a = root->getChildA(is_leaf, is_null_a); //cout << "left_id " << child_idx_a << " is_leaf" << is_leaf; //child_idx_b = root->getChildB(is_leaf, is_null_b); //cout << "right_id " << child_idx_b << " is_leaf" << is_leaf; return; } else { draw(get_left_child(root)); draw(get_right_child(root)); } } #endif
0703f0d2eb4c63c32ef109c343673dc5dd5a0d48.cu
#include "bvh.h" #include "primitive.h" #include "../watch.h" #include <iostream> #include <fstream> #include <algorithm> #include <bitset> #include <thrust/sort.h> #include <thrust/execution_policy.h> #include "../Utilities.h" #include "../common.h" using namespace std; extern inline void copyFromCPUtoGPU(void** dst, void* src, int size); extern inline void copyFromGPUtoCPU(void** dst, void* src, int size); // Expands a 10-bit integer into 30 bits // by inserting 2 zeros after each bit. __device__ unsigned int d_expandBits(unsigned int v) { v = (v * 0x00010001u) & 0xFF0000FFu; v = (v * 0x00000101u) & 0x0F00F00Fu; v = (v * 0x00000011u) & 0xC30C30C3u; v = (v * 0x00000005u) & 0x49249249u; return v; } // Calculates a 30-bit Morton code for the // given 3D point located within the unit cube [0,1]. __device__ unsigned int d_morton3D(glm::vec3 p) { float x = p.x, float y = p.y, float z = p.z; x = min(max(x * 1024.0f, 0.0f), 1023.0f); y = min(max(y * 1024.0f, 0.0f), 1023.0f); z = min(max(z * 1024.0f, 0.0f), 1023.0f); unsigned int xx = d_expandBits((unsigned int)x); unsigned int yy = d_expandBits((unsigned int)y); unsigned int zz = d_expandBits((unsigned int)z); return xx * 4 + yy * 2 + zz; } __global__ void get_bb(int num, int m, Primitive* d_primitives, BBox* d_bb) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num +1) return; int div = m / num; int res = m%num; if (index == num + 1) { BBox tem_bbox; for (int i = m - res; i < m; i++) { tem_bbox.expand(d_primitives[i].d_get_expand_bbox()); } d_bb[index] = tem_bbox; } else { BBox tem_bbox; for (int i = 0; i < div; i++) //use shared to replace { tem_bbox.expand(d_primitives[i*num + index].d_get_expand_bbox()); } d_bb[index].expand(tem_bbox); } } __global__ void compute_morton_bbox(int num, Primitive* d_primitives, BBox bb, MortonCode* mortons, BBox* bboxes) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num) return; BBox tem_bbox = d_primitives[index].d_get_expand_bbox(); bboxes[index] = tem_bbox; mortons[index] = d_morton3D(bb.getUnitcubePosOf(tem_bbox.centroid())); } // Expands a 10-bit integer into 30 bits // by inserting 2 zeros after each bit. unsigned int BVHAccel::expandBits(unsigned int v) { v = (v * 0x00010001u) & 0xFF0000FFu; v = (v * 0x00000101u) & 0x0F00F00Fu; v = (v * 0x00000011u) & 0xC30C30C3u; v = (v * 0x00000005u) & 0x49249249u; return v; } // Calculates a 30-bit Morton code for the // given 3D point located within the unit cube [0,1]. unsigned int BVHAccel::morton3D(float x, float y, float z) { x = min(max(x * 1024.0f, 0.0f), 1023.0f); y = min(max(y * 1024.0f, 0.0f), 1023.0f); z = min(max(z * 1024.0f, 0.0f), 1023.0f); unsigned int xx = expandBits((unsigned int)x); unsigned int yy = expandBits((unsigned int)y); unsigned int zz = expandBits((unsigned int)z); return xx * 4 + yy * 2 + zz; } /** * a wrapper to calculate morton code from * the position of an object inside the * unit cube. */ unsigned int BVHAccel::morton3D(glm::vec3 pos) { return morton3D(pos.x, pos.y, pos.z); } /** * comparer used to sort primitives acoording * to their morton code. */ BBox BVHAccel::computet_root_bbox(Primitive* d_tem_primitives) { const unsigned int num_threads = 128; vector<BBox> c_bb(num_threads + 1); BBox* d_bb; copyFromCPUtoGPU((void**)&d_bb, &c_bb[0], sizeof(BBox)* c_bb.size()); get_bb << <1, c_bb.size() >> > (num_threads, _primitives.size(), d_tem_primitives, d_bb); BBox* cc_bb, bb; copyFromGPUtoCPU((void**)&cc_bb, d_bb, sizeof(BBox)*c_bb.size()); for (int i = 0; i < c_bb.size(); i++) { bb.expand(cc_bb[i]); } cudaFree(d_bb); return bb; } void save(vector<Primitive>& primitives, string file_name) { //ofstream outfile(file_name); //outfile << "# morton code" << endl; //for (auto pri: primitives) //{ // outfile << pri.morton_code << endl; //数据写入文件 //} //outfile.close(); //cout << "save done!" << endl; } void BVHAccel::compute_bbox_and_morton() { Primitive* d_tem_primitives; MortonCode* d_tem_morton_codes; BBox* d_tem_bboxes; _morton_codes.resize(_primitives.size()); _bboxes.resize(_primitives.size()); copyFromCPUtoGPU((void**)&d_tem_primitives, &_primitives[0], sizeof(Primitive)*_primitives.size()); copyFromCPUtoGPU((void**)&d_tem_morton_codes, &_morton_codes[0], sizeof(MortonCode)*_morton_codes.size()); copyFromCPUtoGPU((void**)&d_tem_bboxes, &_bboxes[0], sizeof(BBox)*_bboxes.size()); BBox bb = computet_root_bbox(d_tem_primitives); unsigned int numThreads, numBlocks; unsigned int blockSize = 512; unsigned int n = _primitives.size(); numThreads = min(blockSize, n); numBlocks = (n % numThreads != 0) ? (n / numThreads + 1) : (n / numThreads); compute_morton_bbox << <numBlocks, numThreads >> > (n, d_tem_primitives, bb, d_tem_morton_codes, d_tem_bboxes); cudaMemcpy(&_morton_codes[0], d_tem_morton_codes, sizeof(MortonCode)*_morton_codes.size(), cudaMemcpyDeviceToHost); cudaMemcpy(&_bboxes[0], d_tem_bboxes, sizeof(BBox)*_bboxes.size(), cudaMemcpyDeviceToHost); cudaFree(d_tem_primitives); cudaFree(d_tem_morton_codes); cudaFree(d_tem_bboxes); } __global__ void init_nodes(BRTreeNode* _nodes,const unsigned int num) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num) return; BRTreeNode node; node.setIdx(index); node.bbox = BBox(); _nodes[index] = node; } void BVHAccel::init() { d_bvh = new D_BVH(); auto size = _sorted_primitives.size(); numInternalNode = size - 1; numLeafNode = size; //whether to set h_vertices = NULL before send to gpu? copyFromCPUtoGPU((void**)&d_bvh->d_primitives, &_sorted_primitives[0], sizeof(Primitive)*_sorted_primitives.size()); copyFromCPUtoGPU((void**)&d_sorted_morton_code, &_sorted_morton_codes[0], sizeof(MortonCode)*_sorted_morton_codes.size()); copyFromCPUtoGPU((void**)&d_bboxes, &_sorted_bboxes[0], sizeof(BBox)*_sorted_bboxes.size()); //initialize d_leaf_nodes and d_internal_nodes: with a parallel way? ????? cudaMalloc((void**)&d_bvh->d_leaf_nodes, numLeafNode * sizeof(BRTreeNode)); cudaMalloc((void**)&d_bvh->d_internal_nodes, numInternalNode * sizeof(BRTreeNode)); int threadPerBlock = DEFAULT_THREAD_PER_BLOCK; int numBlock = (numLeafNode + DEFAULT_THREAD_PER_BLOCK - 1) / threadPerBlock; init_nodes << <numBlock, threadPerBlock >> > (d_bvh->d_leaf_nodes, numLeafNode); threadPerBlock = DEFAULT_THREAD_PER_BLOCK; numBlock = (numInternalNode + DEFAULT_THREAD_PER_BLOCK - 1) / threadPerBlock; init_nodes << <numBlock, threadPerBlock >> > (d_bvh->d_internal_nodes, numInternalNode); } void BVHAccel::build() { //build the bvh int threadPerBlock = DEFAULT_THREAD_PER_BLOCK; int numBlock = (numInternalNode + DEFAULT_THREAD_PER_BLOCK - 1) / threadPerBlock; processInternalNode << <numBlock, threadPerBlock >> > (d_sorted_morton_code, numInternalNode, d_bvh->d_leaf_nodes, d_bvh->d_internal_nodes); //calculate bounding box threadPerBlock = DEFAULT_THREAD_PER_BLOCK; numBlock = (numLeafNode + DEFAULT_THREAD_PER_BLOCK - 1) / threadPerBlock; calculateBoudingBox << <numBlock, threadPerBlock >> > (d_bboxes, numLeafNode, d_bvh->d_leaf_nodes, d_bvh->d_internal_nodes); } void BVHAccel::init_primitives(Mesh& body) { //prepare primitives obj_vertices.resize(body.vertices.size()); for (int i = 0; i < body.vertices.size(); i++) { obj_vertices[i] = glm::vec3(body.vertices[i]); } safe_cuda(cudaMalloc((void**)&d_obj_vertices, sizeof(glm::vec3)*obj_vertices.size())); safe_cuda(cudaMemcpy(d_obj_vertices, &obj_vertices[0], sizeof(glm::vec3)*obj_vertices.size(), cudaMemcpyHostToDevice)); //create primitives glm::vec3* h_obj_vertices = &obj_vertices[0]; _primitives.resize(body.vertex_indices.size() / 3); for (int i = 0; i < _primitives.size(); i++) { Primitive tem_pri(h_obj_vertices, d_obj_vertices, body.vertex_indices[i * 3 + 0], body.vertex_indices[i * 3 + 1], body.vertex_indices[i * 3 + 2]); _primitives[i] = tem_pri; } } BVHAccel::BVHAccel(Mesh& body, size_t max_leaf_size): d_bboxes(nullptr), #ifdef _DEBUG h_leaf_nodes(nullptr), h_internal_nodes(nullptr), #endif d_sorted_morton_code(nullptr) { init_primitives(body); // edge case if (_primitives.empty()) { return; } compute_bbox_and_morton(); // remove duplicates vector<unsigned int> indices; indices_sort(_morton_codes, indices); remove_redundant(_morton_codes, indices); filter(_morton_codes, indices, _sorted_morton_codes); filter(_primitives, indices, _sorted_primitives); filter(_bboxes, indices, _sorted_bboxes); // init GPU data, including d_bboxes,d_primitives, d_sorted_morton_code,d_leaf_nodes, d_internal_nodes init(); // build the brt tree build(); } BVHAccel::~BVHAccel() { cudaFree(d_bboxes); cudaFree(d_sorted_morton_code); cudaFree(d_obj_vertices); // Free d_bvh here cause it has pointer points to gpu memory // and we need to pass the value several times and make sure the // resource not freed, so we can't free it in its own destructor. // \BVHAccel controls the lifetieme of \d_bvh, if the destructor // of \BVHAccel called, which means we can free all the resources in // gpu and cpu(Obviously, this violates the "new" and "free" pair priciple) d_bvh->free_memory(); } #ifdef _DEBUG BRTreeNode* BVHAccel::get_leaf_nodes() { copyFromGPUtoCPU((void**)&h_leaf_nodes, d_bvh->d_leaf_nodes, numLeafNode * sizeof(BRTreeNode)); return h_leaf_nodes; } BRTreeNode* BVHAccel::get_internal_nodes() { copyFromGPUtoCPU((void**)&h_internal_nodes, d_bvh->d_internal_nodes, numInternalNode * sizeof(BRTreeNode)); return h_internal_nodes; } BRTreeNode* BVHAccel::get_root() const { return &h_internal_nodes[0]; } BRTreeNode* BVHAccel::get_left_child(BRTreeNode* node)const { bool is_leaf = false; bool is_null = false; int child_idx = false; child_idx = node->getChildA(is_leaf, is_null); if (!is_null) { if (is_leaf) { return &h_leaf_nodes[child_idx]; } else { return &h_internal_nodes[child_idx]; } } else return nullptr; } BRTreeNode* BVHAccel::get_right_child(BRTreeNode* node)const { bool is_leaf = false; bool is_null = false; int child_idx = false; child_idx = node->getChildB(is_leaf, is_null); if (!is_null) { if (is_leaf) { return &h_leaf_nodes[child_idx]; } else { return &h_internal_nodes[child_idx]; } } else return nullptr; } bool BVHAccel::is_leaf(BRTreeNode* node)const { bool is_leaf = false; bool is_null_a = false; bool is_null_b = false; int child_idx_a = false; int child_idx_b = false; child_idx_a = node->getChildA(is_leaf, is_null_a); child_idx_b = node->getChildB(is_leaf, is_null_b); if (is_null_a && is_null_b) return true; return false; } bool BVHAccel::intersect(const glm::vec3 point, int& idx) const { // Allocate traversal stack from thread-local memory, // and push NULL to indicate that there are no postponed nodes. BRTreeNode* stack[64]; BRTreeNode** stackPtr = stack; *stackPtr++ = NULL; // push // Traverse nodes starting from the root. BRTreeNode* node = get_root(); do { // Check each child node for overlap. BRTreeNode* childA = get_left_child(node); BRTreeNode* childB = get_right_child(node); bool overlapL = check_overlap(point, childA); bool overlapR = check_overlap(point, childB); // Query overlaps a leaf node => report collision with the first collision. if (overlapL && is_leaf(childA)) { idx = childA->getIdx(); //idx = -(idx + 1); //is a leaf, and we can get it through primitive[idx] return true; } if (overlapR && is_leaf(childB)) { idx = childB->getIdx(); //idx = -(idx + 1); //is a leaf return true; } // Query overlaps an internal node => traverse. bool traverseL = (overlapL && !is_leaf(childA)); bool traverseR = (overlapR && !is_leaf(childB)); if (!traverseL && !traverseR) node = *--stackPtr; // pop else { node = (traverseL) ? childA : childB; if (traverseL && traverseR) *stackPtr++ = childB; // push } } while (node != NULL); return false; } bool BVHAccel::check_overlap(const glm::vec3 point, BRTreeNode* node)const { return node->bbox.intersect(point); } void BVHAccel::access(BRTreeNode* root, vector<BRTreeNode*>& bad_bode) { if (root->bbox.min.x > root->bbox.max.x) { if (is_leaf(root)) { bad_bode.push_back(root); return; } else { access(get_left_child(root), bad_bode); access(get_right_child(root), bad_bode); } } } void BVHAccel::copy_data_gpu_to_cpu() { copyFromGPUtoCPU((void**)&h_internal_nodes, d_bvh->d_internal_nodes, sizeof(BRTreeNode)*numInternalNode); copyFromGPUtoCPU((void**)&h_leaf_nodes, d_bvh->d_leaf_nodes, sizeof(BRTreeNode)*numLeafNode); } // call copy_data_gpu_to_cpu() before print void BVHAccel::print(BRTreeNode* root, int depth, const int max_depth) { depth++; if (depth > max_depth) return; bool is_null = false; cout << root->getIdx() << " " << root->getParent(is_null); root->bbox.print(); if (is_leaf(root)) { return; } else { is_null = false; cout << " left:" << get_left_child(root)->getIdx() << " " << get_left_child(root)->getParent(is_null); get_left_child(root)->bbox.print(); is_null = false; cout << " right:" << get_right_child(root)->getIdx() << " "<< get_right_child(root)->getParent(is_null); get_right_child(root)->bbox.print(); print(get_left_child(root),depth +1, max_depth); print(get_right_child(root), depth + 1, max_depth); } } // call copy_data_gpu_to_cpu() before draw void BVHAccel::draw(BRTreeNode* root) { //root->bbox.draw(); bool is_null = false; cout << root->getIdx() << " parent_id: " << root->getParent(is_null) << " "; bool is_leaf_a = false; bool is_null_a = false; bool is_null_b = false; int child_idx_a = false; int child_idx_b = false; child_idx_a = root->getChildA(is_leaf_a, is_null_a); cout << "left_id " << child_idx_a << " is_leaf_a" << is_leaf_a; child_idx_b = root->getChildB(is_leaf_a, is_null_b); cout << "right_id " << child_idx_b << " is_leaf_a" << is_leaf_a; root->bbox.print(); if (is_leaf(root)) { //cout << "is_leaf"; //bool is_leaf = false; //bool is_null_a = false; //bool is_null_b = false; //int child_idx_a = false; //int child_idx_b = false; //child_idx_a = root->getChildA(is_leaf, is_null_a); //cout << "left_id " << child_idx_a << " is_leaf" << is_leaf; //child_idx_b = root->getChildB(is_leaf, is_null_b); //cout << "right_id " << child_idx_b << " is_leaf" << is_leaf; return; } else { draw(get_left_child(root)); draw(get_right_child(root)); } } #endif
b28dceffb2a5f314dd1c860f55aae09c5dea12c0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "CNNConvLayer.h" using namespace std; // This is the CPU version, please don't modify it void convLayerCPU() { // declarations for bunch of indexing parameters int fn, sli, fmy, fmx, y, x; int ifmy, ifmx, ofmy, ofmx; int filtIdx, inNeuIdx, outNeuIdx, outIdx; int filtVol = FMDEPTH * FILTSIZE * FILTSIZE; int fmArea = FMSIZE * FMSIZE; int filtArea = FILTSIZE * FILTSIZE; int outArea = FMSIZE/3 * FMSIZE/3; int sum; // Convolution for(fn = 0; fn < FILTNUM; fn++){ //iterate through each filters for(fmy = 0; fmy < FMSIZE; fmy += STRIDE){ //Stride through for(fmx = 0; fmx < FMSIZE; fmx += STRIDE){ //Stride through sum = 0; for(sli = 0; sli < FMDEPTH; sli++){ //Iterate through depth //Convolution for(y = 0; y < FILTSIZE; y++){ for(x = 0; x < FILTSIZE; x++){ ifmy = fmy - FILTSIZE / 2 + y; ifmx = fmx - FILTSIZE / 2 + x; filtIdx = fn*filtVol + sli*filtArea + y*FILTSIZE + x; inNeuIdx = sli*fmArea + ifmy*FMSIZE + ifmx; if(ifmy >= 0 && ifmy < FMSIZE && ifmx >= 0 && ifmx < FMSIZE) sum += filt[filtIdx] * inNeu[inNeuIdx]; //"filt" is a giant array that stores all of the parameters of all the filters //size = 307200 //inNeu size = 69984 //What's tricky here is that filter weighting and input neurons are all int } } } // Activation - ReLU <- Don't pronounce it wrong outNeuIdx = fn*fmArea + fmy*FMSIZE + fmx; if(sum <= 0) outNeu[outNeuIdx] = 0; else outNeu[outNeuIdx] = sum; } } } // Max Pooling with Window Size 3x3 and stride 3 int max, tmpVal; for(sli = 0; sli < FILTNUM; sli++){ for(fmy = 0; fmy < FMSIZE/3 ; fmy += 1){ for(fmx = 0; fmx < FMSIZE/3 ; fmx += 1){ outNeuIdx = sli*fmArea + fmy*3*FMSIZE + fmx*3; max = outNeu[outNeuIdx]; for(y = 0; y < 3; y++){ for(x = 0; x < 3; x++){ ofmy = fmy*3 + y; ofmx = fmx*3 + x; outNeuIdx = sli*fmArea + ofmy*FMSIZE + ofmx; tmpVal = outNeu[outNeuIdx]; if(tmpVal > max) max = tmpVal; } } outIdx = sli*outArea + fmy*FMSIZE/3 + fmx; outCPU[outIdx] = max; } } } } /*** Implement your CUDA Kernel here ***/ __global__ void convLayerGPU(int* filt_GPU, int* inNeu_GPU, int* out_GPU_kernel) { // declarations for bunch of indexing parameters int fn, sli, fmy, fmx, y, x; int ifmy, ifmx; int filtIdx, inNeuIdx, outIdx; int filtVol = FMDEPTH * FILTSIZE * FILTSIZE; int fmArea = FMSIZE * FMSIZE; int filtArea = FILTSIZE * FILTSIZE; int outArea = FMSIZE/3 * FMSIZE/3; int sum; int i = blockIdx.x*blockDim.x + threadIdx.x; if(i < outArea* FILTNUM) out_GPU_kernel[i] = 0; sum = 0; if(i < FILTNUM*FMSIZE*FMSIZE){ fn = i/fmArea; for(sli = 0; sli < FMDEPTH; sli++){ for(y = 0; y < FILTSIZE; y++){ for(x = 0; x < FILTSIZE; x++){ fmy = (i%fmArea)/FMSIZE; fmx = i%FMSIZE; ifmy = fmy - FILTSIZE / 2 + y; ifmx = fmx - FILTSIZE / 2 + x; filtIdx = fn*filtVol + sli*filtArea + y*FILTSIZE + x; inNeuIdx = sli*fmArea + ifmy*FMSIZE + ifmx; if(ifmy >= 0 && ifmy < FMSIZE && ifmx >= 0 && ifmx < FMSIZE) sum += filt_GPU[filtIdx] * inNeu_GPU[inNeuIdx]; } } } } __syncthreads(); for (y = 0; y < 3; y++){ for (x = 0; x < 3; x++) { outIdx = fn*outArea + fmy/3*FMSIZE/3 + fmx/3; if (outIdx < outArea* FILTNUM){ if (fmy % 3 == y && fmx % 3 == x){ int tmp = out_GPU_kernel[outIdx]; if (sum > tmp) out_GPU_kernel[outIdx] = sum; } } __syncthreads(); } } __syncthreads(); } /*** Implement your CUDA Kernel here ***/ int main() { //variables setting and loading input data timespec time_begin, time_end; int convLayerCPUExecTime, convLayerGPUExecTime; init(); /******** Added ********/ int* filt_GPU; int* inNeu_GPU; int* out_GPU_kernel; // int* out_Neu_kernel; //Convolution by CPU clock_gettime(CLOCK_REALTIME, &time_begin); convLayerCPU(); clock_gettime(CLOCK_REALTIME, &time_end); convLayerCPUExecTime = timespec_diff_us(time_begin, time_end); cout << "CPU time for executing a typical convolutional layer = " << ((float)convLayerCPUExecTime)/1000 << "ms" << endl; //Convolution by GPU clock_gettime(CLOCK_REALTIME, &time_begin); hipMalloc(&inNeu_GPU, FMSIZE*FMSIZE*FMDEPTH*sizeof(int)); hipMalloc(&filt_GPU, FILTSIZE*FILTSIZE*FMDEPTH*FILTNUM*sizeof(int)); hipMalloc(&out_GPU_kernel, FILTNUM * FMSIZE/3 * FMSIZE/3*sizeof(int)); hipMemcpy(filt_GPU, filt, FILTSIZE*FILTSIZE*FMDEPTH*FILTNUM*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(inNeu_GPU, inNeu, FMSIZE*FMSIZE*FMDEPTH*sizeof(int), hipMemcpyHostToDevice); /******** Added ********/ /*** Lunch your CUDA Kernel here ***/ hipLaunchKernelGGL(( convLayerGPU), dim3((FILTNUM*FMSIZE*FMSIZE+729)/729), dim3(729), 0, 0, filt_GPU, inNeu_GPU, out_GPU_kernel); // Lunch the kernel hipDeviceSynchronize(); // Do synchronization before clock_gettime() hipMemcpy(outGPU, out_GPU_kernel, FILTNUM * FMSIZE/3 * FMSIZE/3*sizeof(int), hipMemcpyDeviceToHost); /*** Lunch your CUDA Kernel here ***/ clock_gettime(CLOCK_REALTIME, &time_end); convLayerGPUExecTime = timespec_diff_us(time_begin, time_end); cout << "GPU time for executing a typical convolutional layer = " << ((float)convLayerGPUExecTime)/1000 << "ms" << endl; //check the anser from CPU and from GPU if(checker()){ cout << "Congratulations! You pass the check." << endl; cout << "Speedup: " << (float)convLayerCPUExecTime / convLayerGPUExecTime << endl; } else cout << "Hummm there's something wrong" << endl; /******** Added ********/ hipFree(filt_GPU); hipFree(inNeu_GPU); hipFree(out_GPU_kernel); /******** Added ********/ //release memory space ending(); return 0; }
b28dceffb2a5f314dd1c860f55aae09c5dea12c0.cu
#include <iostream> #include "CNNConvLayer.h" using namespace std; // This is the CPU version, please don't modify it void convLayerCPU() { // declarations for bunch of indexing parameters int fn, sli, fmy, fmx, y, x; int ifmy, ifmx, ofmy, ofmx; int filtIdx, inNeuIdx, outNeuIdx, outIdx; int filtVol = FMDEPTH * FILTSIZE * FILTSIZE; int fmArea = FMSIZE * FMSIZE; int filtArea = FILTSIZE * FILTSIZE; int outArea = FMSIZE/3 * FMSIZE/3; int sum; // Convolution for(fn = 0; fn < FILTNUM; fn++){ //iterate through each filters for(fmy = 0; fmy < FMSIZE; fmy += STRIDE){ //Stride through for(fmx = 0; fmx < FMSIZE; fmx += STRIDE){ //Stride through sum = 0; for(sli = 0; sli < FMDEPTH; sli++){ //Iterate through depth //Convolution for(y = 0; y < FILTSIZE; y++){ for(x = 0; x < FILTSIZE; x++){ ifmy = fmy - FILTSIZE / 2 + y; ifmx = fmx - FILTSIZE / 2 + x; filtIdx = fn*filtVol + sli*filtArea + y*FILTSIZE + x; inNeuIdx = sli*fmArea + ifmy*FMSIZE + ifmx; if(ifmy >= 0 && ifmy < FMSIZE && ifmx >= 0 && ifmx < FMSIZE) sum += filt[filtIdx] * inNeu[inNeuIdx]; //"filt" is a giant array that stores all of the parameters of all the filters //size = 307200 //inNeu size = 69984 //What's tricky here is that filter weighting and input neurons are all int } } } // Activation - ReLU <- Don't pronounce it wrong outNeuIdx = fn*fmArea + fmy*FMSIZE + fmx; if(sum <= 0) outNeu[outNeuIdx] = 0; else outNeu[outNeuIdx] = sum; } } } // Max Pooling with Window Size 3x3 and stride 3 int max, tmpVal; for(sli = 0; sli < FILTNUM; sli++){ for(fmy = 0; fmy < FMSIZE/3 ; fmy += 1){ for(fmx = 0; fmx < FMSIZE/3 ; fmx += 1){ outNeuIdx = sli*fmArea + fmy*3*FMSIZE + fmx*3; max = outNeu[outNeuIdx]; for(y = 0; y < 3; y++){ for(x = 0; x < 3; x++){ ofmy = fmy*3 + y; ofmx = fmx*3 + x; outNeuIdx = sli*fmArea + ofmy*FMSIZE + ofmx; tmpVal = outNeu[outNeuIdx]; if(tmpVal > max) max = tmpVal; } } outIdx = sli*outArea + fmy*FMSIZE/3 + fmx; outCPU[outIdx] = max; } } } } /*** Implement your CUDA Kernel here ***/ __global__ void convLayerGPU(int* filt_GPU, int* inNeu_GPU, int* out_GPU_kernel) { // declarations for bunch of indexing parameters int fn, sli, fmy, fmx, y, x; int ifmy, ifmx; int filtIdx, inNeuIdx, outIdx; int filtVol = FMDEPTH * FILTSIZE * FILTSIZE; int fmArea = FMSIZE * FMSIZE; int filtArea = FILTSIZE * FILTSIZE; int outArea = FMSIZE/3 * FMSIZE/3; int sum; int i = blockIdx.x*blockDim.x + threadIdx.x; if(i < outArea* FILTNUM) out_GPU_kernel[i] = 0; sum = 0; if(i < FILTNUM*FMSIZE*FMSIZE){ fn = i/fmArea; for(sli = 0; sli < FMDEPTH; sli++){ for(y = 0; y < FILTSIZE; y++){ for(x = 0; x < FILTSIZE; x++){ fmy = (i%fmArea)/FMSIZE; fmx = i%FMSIZE; ifmy = fmy - FILTSIZE / 2 + y; ifmx = fmx - FILTSIZE / 2 + x; filtIdx = fn*filtVol + sli*filtArea + y*FILTSIZE + x; inNeuIdx = sli*fmArea + ifmy*FMSIZE + ifmx; if(ifmy >= 0 && ifmy < FMSIZE && ifmx >= 0 && ifmx < FMSIZE) sum += filt_GPU[filtIdx] * inNeu_GPU[inNeuIdx]; } } } } __syncthreads(); for (y = 0; y < 3; y++){ for (x = 0; x < 3; x++) { outIdx = fn*outArea + fmy/3*FMSIZE/3 + fmx/3; if (outIdx < outArea* FILTNUM){ if (fmy % 3 == y && fmx % 3 == x){ int tmp = out_GPU_kernel[outIdx]; if (sum > tmp) out_GPU_kernel[outIdx] = sum; } } __syncthreads(); } } __syncthreads(); } /*** Implement your CUDA Kernel here ***/ int main() { //variables setting and loading input data timespec time_begin, time_end; int convLayerCPUExecTime, convLayerGPUExecTime; init(); /******** Added ********/ int* filt_GPU; int* inNeu_GPU; int* out_GPU_kernel; // int* out_Neu_kernel; //Convolution by CPU clock_gettime(CLOCK_REALTIME, &time_begin); convLayerCPU(); clock_gettime(CLOCK_REALTIME, &time_end); convLayerCPUExecTime = timespec_diff_us(time_begin, time_end); cout << "CPU time for executing a typical convolutional layer = " << ((float)convLayerCPUExecTime)/1000 << "ms" << endl; //Convolution by GPU clock_gettime(CLOCK_REALTIME, &time_begin); cudaMalloc(&inNeu_GPU, FMSIZE*FMSIZE*FMDEPTH*sizeof(int)); cudaMalloc(&filt_GPU, FILTSIZE*FILTSIZE*FMDEPTH*FILTNUM*sizeof(int)); cudaMalloc(&out_GPU_kernel, FILTNUM * FMSIZE/3 * FMSIZE/3*sizeof(int)); cudaMemcpy(filt_GPU, filt, FILTSIZE*FILTSIZE*FMDEPTH*FILTNUM*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(inNeu_GPU, inNeu, FMSIZE*FMSIZE*FMDEPTH*sizeof(int), cudaMemcpyHostToDevice); /******** Added ********/ /*** Lunch your CUDA Kernel here ***/ convLayerGPU<<<(FILTNUM*FMSIZE*FMSIZE+729)/729, 729>>>(filt_GPU, inNeu_GPU, out_GPU_kernel); // Lunch the kernel cudaDeviceSynchronize(); // Do synchronization before clock_gettime() cudaMemcpy(outGPU, out_GPU_kernel, FILTNUM * FMSIZE/3 * FMSIZE/3*sizeof(int), cudaMemcpyDeviceToHost); /*** Lunch your CUDA Kernel here ***/ clock_gettime(CLOCK_REALTIME, &time_end); convLayerGPUExecTime = timespec_diff_us(time_begin, time_end); cout << "GPU time for executing a typical convolutional layer = " << ((float)convLayerGPUExecTime)/1000 << "ms" << endl; //check the anser from CPU and from GPU if(checker()){ cout << "Congratulations! You pass the check." << endl; cout << "Speedup: " << (float)convLayerCPUExecTime / convLayerGPUExecTime << endl; } else cout << "Hummm there's something wrong" << endl; /******** Added ********/ cudaFree(filt_GPU); cudaFree(inNeu_GPU); cudaFree(out_GPU_kernel); /******** Added ********/ //release memory space ending(); return 0; }
bb639beab12dcc69e7925886d6f73a139e5ea3db.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CombineLayer.h" #include <vector> #include <helper_functions.h> #include <helper_cuda.h> #include <math.h> #include "../common/Config.h" #include "../common/cuBase.h" /* * dim3 block = dim3(batch, inputsSkip->getLen()); * dim3 thread= dim3(min(preDelta[0]->getLen() / batch, 1024)); */ __global__ void g_CombineLayer_backpropagation( float** preDeltas, float* curDeltas, int* skip, int* cols, int* channels, int batch, int curDeltaCols); /* * dim3 block = dim3(batch, skip->getLen()); * dim3 thread= dim3(min(outputs->getLen() / batch, 1024)); */ __global__ void g_CombineLayer_feedforward( float** inputs, float* outputs, int* skip, int* cols, int* channels, int batch, int outputCols); void CombineLayer::feedforward() { /*spread multi-inputs to output*/ dim3 block = dim3(batch, inputsSkip->getLen()); dim3 thread= dim3(min(outputs->getLen() / batch, 1024)); hipLaunchKernelGGL(( g_CombineLayer_feedforward), dim3(block), dim3(thread), 0, 0, inputs.m_devPoint, outputs->getDev(), inputsSkip->getDev(), inputsCols->getDev(), inputsChannels->getDev(), batch, outputs->cols); checkCudaErrors(hipStreamSynchronize(0)); getLastCudaError("CombineLayer feedforward"); #ifdef CombineLayer_feedforward_Checking outputs->toCpu(); for(int i = 0; i < inputs.size(); i++){ inputs[i]->toCpu(); for(int j = 0; j < inputs[i]->getLen(); j++){ printf("%f ", inputs[i]->getHost()[j]); }printf("\n"); } printf("\n\noutputs\n\n"); for(int i = 0; i < outputs->getLen(); i++){ printf("%f ", outputs->getHost()[i]); }printf("\n"); #endif } void CombineLayer::backpropagation() { /*copy curDelta to multi-preDelta*/ dim3 block = dim3(batch, inputsSkip->getLen()); dim3 thread= dim3(min(preDelta[0]->getLen() / batch, 1024)); hipLaunchKernelGGL(( g_CombineLayer_backpropagation), dim3(block), dim3(thread), 0, 0, preDelta.m_devPoint, curDelta->getDev(), inputsSkip->getDev(), inputsCols->getDev(), inputsChannels->getDev(), batch, curDelta->cols); checkCudaErrors(hipStreamSynchronize(0)); getLastCudaError("combineLayer backpropagation"); #ifdef CombineLayer_backpropagation_checking curDelta->toCpu(); for(int i = 0; i < inputs.size(); i++){ preDelta[i]->toCpu(); for(int j = 0; j < preDelta[i]->getLen(); j++){ printf("%f ", preDelta[i]->getHost()[j]); }printf("\n"); } printf("\n\noutputs\n\n"); for(int i = 0; i < curDelta->getLen(); i++){ printf("%f ", curDelta->getHost()[i]); }printf("\n"); #endif } CombineLayer::CombineLayer(std::string name) { cost = NULL; m_name = name; ConfigCombineLayer* config = (ConfigCombineLayer*)Config::instance()->getLayerByName(m_name); Assert(config->m_input == std::string("NULL")); /*multi-inputs*/ /*suppose the input certainly not the BranLayers's sub-output*/ inputsSkip = new cuMatrix<int>(config->m_inputs.size(), 1, 1); inputsChannels = new cuMatrix<int>(config->m_inputs.size(), 1, 1); inputsCols = new cuMatrix<int>(config->m_inputs.size(), 1, 1); int len = 0; for(int i = 0; i < (int)config->m_inputs.size(); i++){ ConvLayerBase * preLayer = (ConvLayerBase*)Layers::instance()->get(config->m_inputs[i]); inputs.push_back(preLayer->getOutputs()); preDelta.push_back(preLayer->getCurDelta()); inputsSkip->set(i, 0, 0, len); int area = preLayer->getOutputs()->cols * preLayer->getOutputs()->channels; inputsCols->set(i, 0, 0, preLayer->getOutputs()->cols); inputsChannels->set(i, 0, 0, preLayer->getOutputs()->channels); len += area; } batch = Config::instance()->getBatchSize(); outputs = new cuMatrix<float>(batch, len, 1); curDelta = new cuMatrix<float>(batch, len, 1); inputs.toGpu(); preDelta.toGpu(); inputsSkip->toGpu(); inputsCols->toGpu(); inputsChannels->toGpu(); Layers::instance()->set(m_name, this); } /* * dim3 block = dim3(batch, skip->getLen()); * dim3 thread= dim3(min(outputs->getLen() / batch, 1024)); */ __global__ void g_CombineLayer_feedforward( float** inputs, float* outputs, int* skip, int* cols, int* channels, int batch, int outputCols) { int batchId = blockIdx.x; int skipId = blockIdx.y; int curcols = cols[skipId];/*current input's one image feature size*/ int curChannels = channels[skipId]; float* output = outputs + batchId * outputCols + skip[skipId]; float* input = inputs[skipId]; int cols_channels = curChannels * curcols; for(int i = 0; i < cols_channels; i += blockDim.x){ int idx = i + threadIdx.x; if(idx < cols_channels){ int channel = idx / curcols; int col = idx % curcols; int area = batch * curcols; output[idx] = input[channel * area + batchId * curcols + col]; } } } /* * dim3 block = dim3(batch, inputsSkip->getLen()); * dim3 thread= dim3(min(preDelta[0]->getLen() / batch, 1024)); */ __global__ void g_CombineLayer_backpropagation( float** preDeltas, float* curDeltas, int* skip, int* cols, int* channels, int batch, int curDeltaCols){ int batchId = blockIdx.x; int skipId = blockIdx.y; int precols = cols[skipId];/*current input's one image feature size*/ int preChannels = channels[skipId]; float* curDelta = curDeltas + batchId * curDeltaCols + skip[skipId]; float* preDelta = preDeltas[skipId]; int cols_channels= precols * preChannels; for(int i = 0; i < cols_channels; i += blockDim.x){ int idx = i + threadIdx.x; if(idx < cols_channels){ int channel = idx / precols; int col = idx % precols; int area = batch * precols; preDelta[channel * area + batchId * precols + col] = curDelta[idx]; } } }
bb639beab12dcc69e7925886d6f73a139e5ea3db.cu
#include "CombineLayer.h" #include <vector> #include <helper_functions.h> #include <helper_cuda.h> #include <math.h> #include "../common/Config.h" #include "../common/cuBase.h" /* * dim3 block = dim3(batch, inputsSkip->getLen()); * dim3 thread= dim3(min(preDelta[0]->getLen() / batch, 1024)); */ __global__ void g_CombineLayer_backpropagation( float** preDeltas, float* curDeltas, int* skip, int* cols, int* channels, int batch, int curDeltaCols); /* * dim3 block = dim3(batch, skip->getLen()); * dim3 thread= dim3(min(outputs->getLen() / batch, 1024)); */ __global__ void g_CombineLayer_feedforward( float** inputs, float* outputs, int* skip, int* cols, int* channels, int batch, int outputCols); void CombineLayer::feedforward() { /*spread multi-inputs to output*/ dim3 block = dim3(batch, inputsSkip->getLen()); dim3 thread= dim3(min(outputs->getLen() / batch, 1024)); g_CombineLayer_feedforward<<<block, thread>>>( inputs.m_devPoint, outputs->getDev(), inputsSkip->getDev(), inputsCols->getDev(), inputsChannels->getDev(), batch, outputs->cols); checkCudaErrors(cudaStreamSynchronize(0)); getLastCudaError("CombineLayer feedforward"); #ifdef CombineLayer_feedforward_Checking outputs->toCpu(); for(int i = 0; i < inputs.size(); i++){ inputs[i]->toCpu(); for(int j = 0; j < inputs[i]->getLen(); j++){ printf("%f ", inputs[i]->getHost()[j]); }printf("\n"); } printf("\n\noutputs\n\n"); for(int i = 0; i < outputs->getLen(); i++){ printf("%f ", outputs->getHost()[i]); }printf("\n"); #endif } void CombineLayer::backpropagation() { /*copy curDelta to multi-preDelta*/ dim3 block = dim3(batch, inputsSkip->getLen()); dim3 thread= dim3(min(preDelta[0]->getLen() / batch, 1024)); g_CombineLayer_backpropagation<<<block, thread>>>( preDelta.m_devPoint, curDelta->getDev(), inputsSkip->getDev(), inputsCols->getDev(), inputsChannels->getDev(), batch, curDelta->cols); checkCudaErrors(cudaStreamSynchronize(0)); getLastCudaError("combineLayer backpropagation"); #ifdef CombineLayer_backpropagation_checking curDelta->toCpu(); for(int i = 0; i < inputs.size(); i++){ preDelta[i]->toCpu(); for(int j = 0; j < preDelta[i]->getLen(); j++){ printf("%f ", preDelta[i]->getHost()[j]); }printf("\n"); } printf("\n\noutputs\n\n"); for(int i = 0; i < curDelta->getLen(); i++){ printf("%f ", curDelta->getHost()[i]); }printf("\n"); #endif } CombineLayer::CombineLayer(std::string name) { cost = NULL; m_name = name; ConfigCombineLayer* config = (ConfigCombineLayer*)Config::instance()->getLayerByName(m_name); Assert(config->m_input == std::string("NULL")); /*multi-inputs*/ /*suppose the input certainly not the BranLayers's sub-output*/ inputsSkip = new cuMatrix<int>(config->m_inputs.size(), 1, 1); inputsChannels = new cuMatrix<int>(config->m_inputs.size(), 1, 1); inputsCols = new cuMatrix<int>(config->m_inputs.size(), 1, 1); int len = 0; for(int i = 0; i < (int)config->m_inputs.size(); i++){ ConvLayerBase * preLayer = (ConvLayerBase*)Layers::instance()->get(config->m_inputs[i]); inputs.push_back(preLayer->getOutputs()); preDelta.push_back(preLayer->getCurDelta()); inputsSkip->set(i, 0, 0, len); int area = preLayer->getOutputs()->cols * preLayer->getOutputs()->channels; inputsCols->set(i, 0, 0, preLayer->getOutputs()->cols); inputsChannels->set(i, 0, 0, preLayer->getOutputs()->channels); len += area; } batch = Config::instance()->getBatchSize(); outputs = new cuMatrix<float>(batch, len, 1); curDelta = new cuMatrix<float>(batch, len, 1); inputs.toGpu(); preDelta.toGpu(); inputsSkip->toGpu(); inputsCols->toGpu(); inputsChannels->toGpu(); Layers::instance()->set(m_name, this); } /* * dim3 block = dim3(batch, skip->getLen()); * dim3 thread= dim3(min(outputs->getLen() / batch, 1024)); */ __global__ void g_CombineLayer_feedforward( float** inputs, float* outputs, int* skip, int* cols, int* channels, int batch, int outputCols) { int batchId = blockIdx.x; int skipId = blockIdx.y; int curcols = cols[skipId];/*current input's one image feature size*/ int curChannels = channels[skipId]; float* output = outputs + batchId * outputCols + skip[skipId]; float* input = inputs[skipId]; int cols_channels = curChannels * curcols; for(int i = 0; i < cols_channels; i += blockDim.x){ int idx = i + threadIdx.x; if(idx < cols_channels){ int channel = idx / curcols; int col = idx % curcols; int area = batch * curcols; output[idx] = input[channel * area + batchId * curcols + col]; } } } /* * dim3 block = dim3(batch, inputsSkip->getLen()); * dim3 thread= dim3(min(preDelta[0]->getLen() / batch, 1024)); */ __global__ void g_CombineLayer_backpropagation( float** preDeltas, float* curDeltas, int* skip, int* cols, int* channels, int batch, int curDeltaCols){ int batchId = blockIdx.x; int skipId = blockIdx.y; int precols = cols[skipId];/*current input's one image feature size*/ int preChannels = channels[skipId]; float* curDelta = curDeltas + batchId * curDeltaCols + skip[skipId]; float* preDelta = preDeltas[skipId]; int cols_channels= precols * preChannels; for(int i = 0; i < cols_channels; i += blockDim.x){ int idx = i + threadIdx.x; if(idx < cols_channels){ int channel = idx / precols; int col = idx % precols; int area = batch * precols; preDelta[channel * area + batchId * precols + col] = curDelta[idx]; } } }
36578304dc06c3714c758b0d27aa5707110aef9a.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <iostream> #include <hip/hip_runtime.h> // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.width + col) typedef struct { int width; int height; float* elements; } Matrix; // Thread block size #ifndef BLOCK_SIZE #define BLOCK_SIZE 16 #endif // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); void print(Matrix X) { // std::cout << X.height << "\n"; std::cout << X.width << "\n"; for (int i = 0; i < X.height; i++) { for (int j = 0; j < X.width; j++) { std::cout << X.elements[i * X.width + j] << " "; } std::cout << "\n"; } } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); hipMalloc(&d_A.elements, size); hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice); Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); hipMalloc(&d_B.elements, size); hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); hipMalloc(&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((B.width - 1) / dimBlock.x + 1, (A.height - 1) / dimBlock.y + 1, (B.width - 1) / dimBlock.z + 1); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C); hipEventRecord(stop); hipError_t errSync = hipGetLastError(); hipError_t errAsync = hipDeviceSynchronize(); if (errSync != hipSuccess) printf("4: Sync kernel error: %s\n", hipGetErrorString(errSync)); if (errAsync != hipSuccess) printf("4: Async kernel error: %s\n", hipGetErrorString(errAsync)); // Read C from device memory hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost); if (ELAPSED_TIME == 1) { hipEventSynchronize (stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << "\n"; } else { print(C); } // Free device memory hipFree(d_A.elements); hipFree(d_B.elements); hipFree(d_C.elements); } int main() { int n, m, q; scanf("%d", &n); m = n; q = n; //printf("n=%d,m=%d,q=%d\n", n, m, q); Matrix A; Matrix B; Matrix C; int sizeA = n * m * sizeof(float); A.height = n; A.width = m; A.elements = new float[sizeA]; int sizeB = m * q * sizeof(float); B.height = m; B.width = q; B.elements = new float[sizeB]; int sizeC = n * q * sizeof(float); C.height = n; C.width = q; C.elements = new float[sizeC]; srand(time(NULL)); for (int i = 0; i < n*m; i++) scanf("%f", &A.elements[i]); for (int i = 0; i < m*q; i++) scanf("%f", &B.elements[i]); //print(A); //printf("\n"); //print(B); //printf("\n"); MatMul(A, B, C); free(A.elements); free(B.elements); free(C.elements); return 0; } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C // by accumulating results into Cvalue int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; //int depth = blockIdx.z * blockDim.z + threadIdx.z; int depth = threadIdx.z; __shared__ float Cs[BLOCK_SIZE][BLOCK_SIZE][BLOCK_SIZE]; Cs[threadIdx.y][threadIdx.x][threadIdx.z] = 0; for (int e = 0; e < A.width/BLOCK_SIZE; ++e) { Cs[threadIdx.y][threadIdx.x][threadIdx.z] += A.elements[row * A.width + depth * (A.width/BLOCK_SIZE) + e] * B.elements[e * B.width + depth * (A.width/BLOCK_SIZE) * B.width + col]; } __syncthreads(); int e = BLOCK_SIZE/2; while (threadIdx.z < e) { Cs[threadIdx.y][threadIdx.x][threadIdx.z] += Cs[threadIdx.y][threadIdx.x][threadIdx.z+e]; __syncthreads(); e /= 2; } if(threadIdx.z == 0) C.elements[row * C.width + col] = Cs[threadIdx.y][threadIdx.x][0]; }
36578304dc06c3714c758b0d27aa5707110aef9a.cu
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <iostream> #include <cuda.h> // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.width + col) typedef struct { int width; int height; float* elements; } Matrix; // Thread block size #ifndef BLOCK_SIZE #define BLOCK_SIZE 16 #endif // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); void print(Matrix X) { // std::cout << X.height << "\n"; std::cout << X.width << "\n"; for (int i = 0; i < X.height; i++) { for (int j = 0; j < X.width; j++) { std::cout << X.elements[i * X.width + j] << " "; } std::cout << "\n"; } } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); cudaMalloc(&d_A.elements, size); cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); cudaMalloc(&d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); cudaMalloc(&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((B.width - 1) / dimBlock.x + 1, (A.height - 1) / dimBlock.y + 1, (B.width - 1) / dimBlock.z + 1); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); cudaEventRecord(stop); cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess) printf("4: Sync kernel error: %s\n", cudaGetErrorString(errSync)); if (errAsync != cudaSuccess) printf("4: Async kernel error: %s\n", cudaGetErrorString(errAsync)); // Read C from device memory cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); if (ELAPSED_TIME == 1) { cudaEventSynchronize (stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << "\n"; } else { print(C); } // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } int main() { int n, m, q; scanf("%d", &n); m = n; q = n; //printf("n=%d,m=%d,q=%d\n", n, m, q); Matrix A; Matrix B; Matrix C; int sizeA = n * m * sizeof(float); A.height = n; A.width = m; A.elements = new float[sizeA]; int sizeB = m * q * sizeof(float); B.height = m; B.width = q; B.elements = new float[sizeB]; int sizeC = n * q * sizeof(float); C.height = n; C.width = q; C.elements = new float[sizeC]; srand(time(NULL)); for (int i = 0; i < n*m; i++) scanf("%f", &A.elements[i]); for (int i = 0; i < m*q; i++) scanf("%f", &B.elements[i]); //print(A); //printf("\n"); //print(B); //printf("\n"); MatMul(A, B, C); free(A.elements); free(B.elements); free(C.elements); return 0; } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C // by accumulating results into Cvalue int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; //int depth = blockIdx.z * blockDim.z + threadIdx.z; int depth = threadIdx.z; __shared__ float Cs[BLOCK_SIZE][BLOCK_SIZE][BLOCK_SIZE]; Cs[threadIdx.y][threadIdx.x][threadIdx.z] = 0; for (int e = 0; e < A.width/BLOCK_SIZE; ++e) { Cs[threadIdx.y][threadIdx.x][threadIdx.z] += A.elements[row * A.width + depth * (A.width/BLOCK_SIZE) + e] * B.elements[e * B.width + depth * (A.width/BLOCK_SIZE) * B.width + col]; } __syncthreads(); int e = BLOCK_SIZE/2; while (threadIdx.z < e) { Cs[threadIdx.y][threadIdx.x][threadIdx.z] += Cs[threadIdx.y][threadIdx.x][threadIdx.z+e]; __syncthreads(); e /= 2; } if(threadIdx.z == 0) C.elements[row * C.width + col] = Cs[threadIdx.y][threadIdx.x][0]; }
0fad5be07728c27a81b8adaeaeeb200b4e3023b2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> // Standard C/C++ Directives #include <iostream> #include <conio.h> // OpenCV directives #include <highgui\highgui.hpp> using namespace std; using namespace cv; hipError_t transposeRGBImageOnGPU(Mat *input, Mat *transpose); __global__ void transposeKernel(uchar *iChannel, uchar *tChannel, const int WIDTH, const int HEIGHT) { /*thred index along x-dimension*/ int tIdx = (blockIdx.x * blockDim.x) + threadIdx.x; /*thread index along y dimension*/ int tIdy = (blockIdx.y * blockDim.y) + threadIdx.y; int i_input, i_transpose; i_transpose = tIdx + ( HEIGHT * tIdy); i_input = tIdy + ( WIDTH * tIdx); int SIZE = WIDTH * HEIGHT; /*check that the index does not exceed the bound of the image in the memory*/ if( i_transpose < SIZE && i_input < SIZE) { tChannel[i_transpose] = iChannel[i_input];//4; } } void windowSetting( char* title, int width, int height); int main() { //Windows Creation and Display Size settings /*namedWindow("InputImage", WINDOW_NORMAL); resizeWindow("InputImage", 300, 200); namedWindow("TransposedImage", WINDOW_NORMAL); resizeWindow("TransposedImage", 300, 200)*/; windowSetting( "InputImage", 300, 200); // ## 1. Memory Allocation and Initilization on HOST Mat h_inputImage = imread("D:\\input1.png",CV_LOAD_IMAGE_ANYCOLOR); if( h_inputImage.data == NULL) { cout<<"[ERROR] Input Image is null"<<endl; } Mat h_transposeImage; h_transposeImage.create( h_inputImage.cols, h_inputImage.rows, CV_8UC3); // ## 2. STUB function invocation hipError_t cudaStatus = transposeRGBImageOnGPU( &h_inputImage, &h_transposeImage); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } imshow("TransposedImage", h_transposeImage); waitKey(0); return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t transposeRGBImageOnGPU(Mat *inputImage, Mat *transposeImage) { hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); } const int iWIDTH = inputImage->rows; const int iHEIGHT = inputImage->cols; const int SIZE_IMAGE = iWIDTH * iHEIGHT; //[OPTIONAL] convert HOST images to float //inputImage->create( inputImage->rows, inputImage->cols, CV_32FC3); vector<uchar> h_Input_Channels; split( *inputImage, h_Input_Channels); uchar *h_Transposed_BLUE, *h_Transposed_RED, *h_Transposed_GREEN; // Device Variables uchar *d_BLUE, *d_GREEN, *d_RED; uchar *d_Transposed_BLUE, *d_Transposed_GREEN, *d_Transposed_RED; // Calculating dim BLOCK and GRID int threadX = 32; int threadY = 32; int blockX = iWIDTH/threadX; int blockY = iHEIGHT/threadY; dim3 dimBlock( threadX, threadY, 1); dim3 dimGrid( blockY, blockX, 1); //## A. KERNEL 1 i.e. BLUE //# A.1 Mem-Alloc for KERNEL BLUE cudaStatus = hipMalloc( &d_BLUE, sizeof(uchar) * SIZE_IMAGE); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } cudaStatus = hipMalloc( &d_Transposed_BLUE, sizeof(uchar) * SIZE_IMAGE); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } //# A.2 Mem-CPY for KERNEL BLUE cudaStatus = hipMemcpy( d_BLUE, &h_Input_Channels[0], sizeof(uchar) * SIZE_IMAGE, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } //# A.3 KERNEL LAUNCH for BLUE hipLaunchKernelGGL(( transposeKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_BLUE, d_Transposed_BLUE, iWIDTH, iHEIGHT); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); } //## B. KERNEL 1 i.e. GREEN //# A.1 Mem-Alloc for KERNEL GREEN hipMalloc( &d_GREEN, sizeof(uchar) * SIZE_IMAGE); hipMalloc( &d_Transposed_GREEN, sizeof(uchar) * SIZE_IMAGE); //# A.2 Mem-CPY for KERNEL GREEN hipMemcpy( d_GREEN, &h_Input_Channels[1], sizeof(uchar) * SIZE_IMAGE, hipMemcpyHostToDevice); //# A.3 KERNEL LAUNCH for GREEN hipLaunchKernelGGL(( transposeKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_GREEN, d_Transposed_GREEN, iWIDTH, iHEIGHT); //## C. KERNEL 1 i.e. RED //# A.1 Mem-Alloc for KERNEL RED hipMalloc( &d_RED, sizeof(uchar) * SIZE_IMAGE); hipMalloc( &d_Transposed_RED, sizeof(uchar) * SIZE_IMAGE); //# A.2 Mem-CPY for KERNEL RED hipMemcpy( d_RED, &h_Input_Channels[2], sizeof(uchar) * SIZE_IMAGE, hipMemcpyHostToDevice); //# A.3 KERNEL LAUNCH for RED hipLaunchKernelGGL(( transposeKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_RED, d_Transposed_RED, iWIDTH, iHEIGHT); // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); } //memcpy to device cudaStatus = hipMemcpy( h_Transposed_BLUE, d_Transposed_BLUE, sizeof(uchar) * iWIDTH * iHEIGHT, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } cudaStatus = hipMemcpy( h_Transposed_GREEN, d_Transposed_GREEN, sizeof(uchar) * iWIDTH * iHEIGHT, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } cudaStatus = hipMemcpy( h_Transposed_RED, d_Transposed_RED, sizeof(uchar) * iWIDTH * iHEIGHT, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } vector<uchar> hh; hh.push_back(*h_Transposed_RED); hh.push_back( *h_Transposed_GREEN); hh.push_back(*h_Transposed_BLUE); /*Mat i[3]; Mat image(Size(iWIDTH, iHEIGHT), CV_8UC1, h_Transposed_BLUE, Mat::AUTO_STEP); Mat image2(Size(iWIDTH, iHEIGHT), CV_8UC1, h_Transposed_BLUE, Mat::AUTO_STEP); Mat image3(Size(iWIDTH, iHEIGHT), CV_8UC1, h_Transposed_BLUE, Mat::AUTO_STEP); i[0] = image; i[1] = image2; i[2] =image3;*/ merge( hh, *transposeImage); //merge( i,3, *transposeImage); //Error: hipFree(d_BLUE); hipFree(d_GREEN); hipFree(d_RED); hipFree(d_Transposed_BLUE); hipFree(d_Transposed_GREEN); hipFree(d_Transposed_RED); return cudaStatus; }
0fad5be07728c27a81b8adaeaeeb200b4e3023b2.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> // Standard C/C++ Directives #include <iostream> #include <conio.h> // OpenCV directives #include <highgui\highgui.hpp> using namespace std; using namespace cv; cudaError_t transposeRGBImageOnGPU(Mat *input, Mat *transpose); __global__ void transposeKernel(uchar *iChannel, uchar *tChannel, const int WIDTH, const int HEIGHT) { /*thred index along x-dimension*/ int tIdx = (blockIdx.x * blockDim.x) + threadIdx.x; /*thread index along y dimension*/ int tIdy = (blockIdx.y * blockDim.y) + threadIdx.y; int i_input, i_transpose; i_transpose = tIdx + ( HEIGHT * tIdy); i_input = tIdy + ( WIDTH * tIdx); int SIZE = WIDTH * HEIGHT; /*check that the index does not exceed the bound of the image in the memory*/ if( i_transpose < SIZE && i_input < SIZE) { tChannel[i_transpose] = iChannel[i_input];//4; } } void windowSetting( char* title, int width, int height); int main() { //Windows Creation and Display Size settings /*namedWindow("InputImage", WINDOW_NORMAL); resizeWindow("InputImage", 300, 200); namedWindow("TransposedImage", WINDOW_NORMAL); resizeWindow("TransposedImage", 300, 200)*/; windowSetting( "InputImage", 300, 200); // ## 1. Memory Allocation and Initilization on HOST Mat h_inputImage = imread("D:\\input1.png",CV_LOAD_IMAGE_ANYCOLOR); if( h_inputImage.data == NULL) { cout<<"[ERROR] Input Image is null"<<endl; } Mat h_transposeImage; h_transposeImage.create( h_inputImage.cols, h_inputImage.rows, CV_8UC3); // ## 2. STUB function invocation cudaError_t cudaStatus = transposeRGBImageOnGPU( &h_inputImage, &h_transposeImage); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } imshow("TransposedImage", h_transposeImage); waitKey(0); return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t transposeRGBImageOnGPU(Mat *inputImage, Mat *transposeImage) { cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); } const int iWIDTH = inputImage->rows; const int iHEIGHT = inputImage->cols; const int SIZE_IMAGE = iWIDTH * iHEIGHT; //[OPTIONAL] convert HOST images to float //inputImage->create( inputImage->rows, inputImage->cols, CV_32FC3); vector<uchar> h_Input_Channels; split( *inputImage, h_Input_Channels); uchar *h_Transposed_BLUE, *h_Transposed_RED, *h_Transposed_GREEN; // Device Variables uchar *d_BLUE, *d_GREEN, *d_RED; uchar *d_Transposed_BLUE, *d_Transposed_GREEN, *d_Transposed_RED; // Calculating dim BLOCK and GRID int threadX = 32; int threadY = 32; int blockX = iWIDTH/threadX; int blockY = iHEIGHT/threadY; dim3 dimBlock( threadX, threadY, 1); dim3 dimGrid( blockY, blockX, 1); //## A. KERNEL 1 i.e. BLUE //# A.1 Mem-Alloc for KERNEL BLUE cudaStatus = cudaMalloc( &d_BLUE, sizeof(uchar) * SIZE_IMAGE); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } cudaStatus = cudaMalloc( &d_Transposed_BLUE, sizeof(uchar) * SIZE_IMAGE); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } //# A.2 Mem-CPY for KERNEL BLUE cudaStatus = cudaMemcpy( d_BLUE, &h_Input_Channels[0], sizeof(uchar) * SIZE_IMAGE, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } //# A.3 KERNEL LAUNCH for BLUE transposeKernel<<< dimGrid, dimBlock>>>( d_BLUE, d_Transposed_BLUE, iWIDTH, iHEIGHT); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); } //## B. KERNEL 1 i.e. GREEN //# A.1 Mem-Alloc for KERNEL GREEN cudaMalloc( &d_GREEN, sizeof(uchar) * SIZE_IMAGE); cudaMalloc( &d_Transposed_GREEN, sizeof(uchar) * SIZE_IMAGE); //# A.2 Mem-CPY for KERNEL GREEN cudaMemcpy( d_GREEN, &h_Input_Channels[1], sizeof(uchar) * SIZE_IMAGE, cudaMemcpyHostToDevice); //# A.3 KERNEL LAUNCH for GREEN transposeKernel<<< dimGrid, dimBlock>>>( d_GREEN, d_Transposed_GREEN, iWIDTH, iHEIGHT); //## C. KERNEL 1 i.e. RED //# A.1 Mem-Alloc for KERNEL RED cudaMalloc( &d_RED, sizeof(uchar) * SIZE_IMAGE); cudaMalloc( &d_Transposed_RED, sizeof(uchar) * SIZE_IMAGE); //# A.2 Mem-CPY for KERNEL RED cudaMemcpy( d_RED, &h_Input_Channels[2], sizeof(uchar) * SIZE_IMAGE, cudaMemcpyHostToDevice); //# A.3 KERNEL LAUNCH for RED transposeKernel<<< dimGrid, dimBlock>>>( d_RED, d_Transposed_RED, iWIDTH, iHEIGHT); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); } //memcpy to device cudaStatus = cudaMemcpy( h_Transposed_BLUE, d_Transposed_BLUE, sizeof(uchar) * iWIDTH * iHEIGHT, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cudaStatus = cudaMemcpy( h_Transposed_GREEN, d_Transposed_GREEN, sizeof(uchar) * iWIDTH * iHEIGHT, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cudaStatus = cudaMemcpy( h_Transposed_RED, d_Transposed_RED, sizeof(uchar) * iWIDTH * iHEIGHT, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } vector<uchar> hh; hh.push_back(*h_Transposed_RED); hh.push_back( *h_Transposed_GREEN); hh.push_back(*h_Transposed_BLUE); /*Mat i[3]; Mat image(Size(iWIDTH, iHEIGHT), CV_8UC1, h_Transposed_BLUE, Mat::AUTO_STEP); Mat image2(Size(iWIDTH, iHEIGHT), CV_8UC1, h_Transposed_BLUE, Mat::AUTO_STEP); Mat image3(Size(iWIDTH, iHEIGHT), CV_8UC1, h_Transposed_BLUE, Mat::AUTO_STEP); i[0] = image; i[1] = image2; i[2] =image3;*/ merge( hh, *transposeImage); //merge( i,3, *transposeImage); //Error: cudaFree(d_BLUE); cudaFree(d_GREEN); cudaFree(d_RED); cudaFree(d_Transposed_BLUE); cudaFree(d_Transposed_GREEN); cudaFree(d_Transposed_RED); return cudaStatus; }
541f23c087b7d5bf99bab2bd7fa55b75a7330297.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void kernel(int* a_d, int* b_d, int* c_d){ *c_d = *a_d + *b_d; return; } int main(){ int a = 1, b = 2; int *a_d, *b_d, *c_d; hipMalloc((void**) &a_d, sizeof(int)); hipMalloc((void**) &b_d, sizeof(int)); hipMalloc((void**) &c_d, sizeof(int)); hipMemcpy(a_d, &a, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(b_d, &b, sizeof(int), hipMemcpyHostToDevice); int c; hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, 0, a_d, b_d, c_d); hipMemcpy(&c, c_d, sizeof(int), hipMemcpyDeviceToHost); hipFree((void**) a_d); hipFree((void**) b_d); hipFree((void**) c_d); printf("%d\n", c); }
541f23c087b7d5bf99bab2bd7fa55b75a7330297.cu
#include <stdio.h> __global__ void kernel(int* a_d, int* b_d, int* c_d){ *c_d = *a_d + *b_d; return; } int main(){ int a = 1, b = 2; int *a_d, *b_d, *c_d; cudaMalloc((void**) &a_d, sizeof(int)); cudaMalloc((void**) &b_d, sizeof(int)); cudaMalloc((void**) &c_d, sizeof(int)); cudaMemcpy(a_d, &a, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(b_d, &b, sizeof(int), cudaMemcpyHostToDevice); int c; kernel<<<1, 1>>>(a_d, b_d, c_d); cudaMemcpy(&c, c_d, sizeof(int), cudaMemcpyDeviceToHost); cudaFree((void**) a_d); cudaFree((void**) b_d); cudaFree((void**) c_d); printf("%d\n", c); }
db2a2e58a28f3e7626ce85fab20056fb0a93ba21.hip
// !!! This is a file automatically generated by hipify!!! //adding two arrays and storing the results in a third array using CUDA #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> __global__ void add(int *a, int *b, int *c, int Num) { // int idx = threadIdx.x; // for a grid with one block of threads int idx = threadIdx.x + blockIdx.x * blockDim.x while(thread_id < Num){ //checking bounds c[idx] = a[idx] + b[idx]; idx += blockDim.x * gridDim.x; //increment thread index } } int main() { int Num = 50; int h_a[Num], h_b[Num], h_c[Num]; //declaring host variables int *d_a, *d_b, *d_c; //declaring device variables //Memory allocation of device variables hipMalloc((void**)&d_a, Num*sizeof(int)); hipMalloc((void**)&d_b, Num*sizeof(int)); hipMalloc((void**)&d_c, Num*sizeof(int)); //initializing host array variables for(int i = 1; i <= Num; ++i){ h_a[i-1] = i; h_b[i-1] = i; } //Copy Host memory to Device memory hipMemcpy(d_a,h_a, Num*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(d_b,h_b, Num*sizeof(int),hipMemcpyHostToDevice); //Launch Kernel // add<<<1,Num>>>(d_a,d_b,d_c,Num); //A grid with one block and Num=50 threads hipLaunchKernelGGL(( add), dim3(2),dim3(Num/2), 0, 0, d_a,d_b,d_c,Num); //A grid with two blocks, 50/2 threads per block //copy device results to host results hipMemcpy(h_c,d_c, Num*sizeof(int),hipMemcpyDeviceToHost); for(int i = 0;i < Num; i++){ //print results printf("%d\n", h_c[i]); } //free device memory hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
db2a2e58a28f3e7626ce85fab20056fb0a93ba21.cu
//adding two arrays and storing the results in a third array using CUDA #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> __global__ void add(int *a, int *b, int *c, int Num) { // int idx = threadIdx.x; // for a grid with one block of threads int idx = threadIdx.x + blockIdx.x * blockDim.x while(thread_id < Num){ //checking bounds c[idx] = a[idx] + b[idx]; idx += blockDim.x * gridDim.x; //increment thread index } } int main() { int Num = 50; int h_a[Num], h_b[Num], h_c[Num]; //declaring host variables int *d_a, *d_b, *d_c; //declaring device variables //Memory allocation of device variables cudaMalloc((void**)&d_a, Num*sizeof(int)); cudaMalloc((void**)&d_b, Num*sizeof(int)); cudaMalloc((void**)&d_c, Num*sizeof(int)); //initializing host array variables for(int i = 1; i <= Num; ++i){ h_a[i-1] = i; h_b[i-1] = i; } //Copy Host memory to Device memory cudaMemcpy(d_a,h_a, Num*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(d_b,h_b, Num*sizeof(int),cudaMemcpyHostToDevice); //Launch Kernel // add<<<1,Num>>>(d_a,d_b,d_c,Num); //A grid with one block and Num=50 threads add<<<2,Num/2>>>(d_a,d_b,d_c,Num); //A grid with two blocks, 50/2 threads per block //copy device results to host results cudaMemcpy(h_c,d_c, Num*sizeof(int),cudaMemcpyDeviceToHost); for(int i = 0;i < Num; i++){ //print results printf("%d\n", h_c[i]); } //free device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
5d96fdd6f7cbba92e57b0bc7c96b9e41e1dd1af8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" __global__ void reduction_unroll_block2(int * arr, int * temp, int l) { int tid = threadIdx.x; int BLOCK_OFFSET = blockIdx.x * blockDim.x * 2; int index = BLOCK_OFFSET + tid; int * i_data = arr + BLOCK_OFFSET; if((index + blockDim.x) < l) { arr[index] += arr[index + blockDim.x]; } __syncthreads(); for(int offset=blockDim.x/2;offset>0;offset/=2) { if(tid<offset) { i_data[tid]+=i_data[tid+offset]; } __syncthreads(); } if(tid==0) { temp[blockIdx.x]=i_data[0]; } } int cpu_summer(int * arr, int l) { int s=0; for(int i=0;i<l;i++) { s+=arr[i]; } return s; } int main() { int shape=1<<27; int size=shape*sizeof(int); int block_size=128; dim3 block(block_size); dim3 grid(shape/block.x/2); int * arr; arr=(int *)malloc(size); int temp_size=sizeof(int)*grid.x; int * tarr; tarr=(int *)malloc(temp_size); int * d_arr, * d_temp; hipMalloc((void**)&d_arr, size); hipMalloc((void**)&d_temp, temp_size); hipMemset(d_temp, 0, temp_size); for(int i=0; i< shape; i++) { arr[i]=(int)(rand() & 0x0f); } clock_t ct1,ct2,gt1,gt2; ct1=clock(); int cpu=cpu_summer(arr, shape); ct2=clock(); gt1=clock(); hipMemcpy(d_arr, arr, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( reduction_unroll_block2), dim3(grid), dim3(block), 0, 0, d_arr, d_temp, shape); hipDeviceSynchronize(); hipMemcpy(tarr, d_temp, temp_size, hipMemcpyDeviceToHost); int gpu=0; for(int i=0;i<grid.x;i++) { gpu+=tarr[i]; } gt2=clock(); printf(cpu==gpu?"CPU and GPU values Match\n":"CPU and GPU values do not match\n"); printf("GPU time : %lf sec\n",(double)((gt2-gt1)/(double)CLOCKS_PER_SEC)); printf("CPU time : %lf sec\n",(double)((ct2-ct1)/(double)CLOCKS_PER_SEC)); hipFree(d_arr); hipFree(d_temp); free(arr); free(tarr); hipDeviceReset(); return 0; }
5d96fdd6f7cbba92e57b0bc7c96b9e41e1dd1af8.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" __global__ void reduction_unroll_block2(int * arr, int * temp, int l) { int tid = threadIdx.x; int BLOCK_OFFSET = blockIdx.x * blockDim.x * 2; int index = BLOCK_OFFSET + tid; int * i_data = arr + BLOCK_OFFSET; if((index + blockDim.x) < l) { arr[index] += arr[index + blockDim.x]; } __syncthreads(); for(int offset=blockDim.x/2;offset>0;offset/=2) { if(tid<offset) { i_data[tid]+=i_data[tid+offset]; } __syncthreads(); } if(tid==0) { temp[blockIdx.x]=i_data[0]; } } int cpu_summer(int * arr, int l) { int s=0; for(int i=0;i<l;i++) { s+=arr[i]; } return s; } int main() { int shape=1<<27; int size=shape*sizeof(int); int block_size=128; dim3 block(block_size); dim3 grid(shape/block.x/2); int * arr; arr=(int *)malloc(size); int temp_size=sizeof(int)*grid.x; int * tarr; tarr=(int *)malloc(temp_size); int * d_arr, * d_temp; cudaMalloc((void**)&d_arr, size); cudaMalloc((void**)&d_temp, temp_size); cudaMemset(d_temp, 0, temp_size); for(int i=0; i< shape; i++) { arr[i]=(int)(rand() & 0x0f); } clock_t ct1,ct2,gt1,gt2; ct1=clock(); int cpu=cpu_summer(arr, shape); ct2=clock(); gt1=clock(); cudaMemcpy(d_arr, arr, size, cudaMemcpyHostToDevice); reduction_unroll_block2<<<grid, block>>>(d_arr, d_temp, shape); cudaDeviceSynchronize(); cudaMemcpy(tarr, d_temp, temp_size, cudaMemcpyDeviceToHost); int gpu=0; for(int i=0;i<grid.x;i++) { gpu+=tarr[i]; } gt2=clock(); printf(cpu==gpu?"CPU and GPU values Match\n":"CPU and GPU values do not match\n"); printf("GPU time : %lf sec\n",(double)((gt2-gt1)/(double)CLOCKS_PER_SEC)); printf("CPU time : %lf sec\n",(double)((ct2-ct1)/(double)CLOCKS_PER_SEC)); cudaFree(d_arr); cudaFree(d_temp); free(arr); free(tarr); cudaDeviceReset(); return 0; }
2dd2520199df6332e8609a69244e47fe991b4f58.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kAddDiagonal(float* mat, float* vec, float* tgtMat, unsigned int width) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width; i += numThreads) { tgtMat[width*i + i] = mat[width*i + i] + vec[i]; } }
2dd2520199df6332e8609a69244e47fe991b4f58.cu
#include "includes.h" __global__ void kAddDiagonal(float* mat, float* vec, float* tgtMat, unsigned int width) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width; i += numThreads) { tgtMat[width*i + i] = mat[width*i + i] + vec[i]; } }
cf6eb5fa6f933fdcc7ca1ab8d4f89dbca40dda82.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /************************************************************** Lokman A. Abbas-Turki code reused by Ly Yannick and Prugniaud Melchior for GPU Programming course of ENSAE 2020 Those who re-use this code should mention in their code the name of the author above. ***************************************************************/ #include "RNG.h" #define nt 15 #define nk 6 __constant__ float Tg[nt]; __constant__ float rg[nt]; __constant__ float Kg[nk]; __constant__ float Cg[16*(nt-1)*(nk-1)]; float *Cgc, *Kgc, *Tgc, *rgc; // Allocate parameters void VarMalloc() { Kgc = (float *)calloc(nk, sizeof(float)); Tgc = (float *)calloc(nt, sizeof(float)); rgc = (float *)calloc(nt, sizeof(float)); Cgc = (float *)calloc(16*(nk-1)*(nt-1), sizeof(float)); } // Free parameters void FreeVar() { free(Cgc); free(Kgc); free(Tgc); free(rgc); } // Time parameters void parameters() { Kgc[0] = 20.f; Kgc[1] = 70.f; Kgc[2] = 120.f; Kgc[3] = 160.f; Kgc[4] = 200.f; Kgc[5] = 250.0f; float d, w, m, y; d = 1.0f / 360.0f; w = 7.0f * d; m = 30.0f * d; y = 12.0f * m; Tgc[0] = d; Tgc[1] = 2.f*d; Tgc[2] = w; Tgc[3] = 2.f*w; Tgc[4] = m; Tgc[5] = 2.f*m; Tgc[6] = 3.f*m; Tgc[7] = 6.f*m; Tgc[8] = y; Tgc[9] = y + 3.f*m; Tgc[10] =y + 6.f*m; Tgc[11] = 2.f*y; Tgc[12] = 2.f*y + 6.f*m; Tgc[13] = 3.f*y; Tgc[14] = 3.f*y + 6.f*m; rgc[0] = 0.05f; rgc[1] = 0.07f; rgc[2] = 0.08f; rgc[3] = 0.06f; rgc[4] = 0.07f; rgc[5] = 0.1f; rgc[6] = 0.11f; rgc[7] = 0.13f; rgc[8] = 0.12f; rgc[9] = 0.14f; rgc[10] = 0.145f; rgc[11] = 0.14f; rgc[12] = 0.135f; rgc[13] = 0.13f; rgc[14] = 0.f*y; int k; FILE *ParFp; string TmpString; //Spline Volatility parameters------------------------------ // - Read values from input file on CPU TmpString = "Cg.txt"; ParFp = fopen(TmpString.c_str(),"r"); if (ParFp == NULL) { fprintf(stderr,"File '%s' unreachable!\n",TmpString.c_str()); exit(EXIT_FAILURE); } // - Store values in input data tables on CPU for (k = 0; k < 1120; k++) { if (fscanf(ParFp,"%f",&Cgc[k]) <= 0) { fprintf(stderr,"Error while reading file '%s'!\n",TmpString.c_str()); exit(EXIT_FAILURE); } } fclose(ParFp); hipMemcpyToSymbol(Kg, Kgc, nk*sizeof(float)); hipMemcpyToSymbol(Tg, Tgc, nt*sizeof(float)); hipMemcpyToSymbol(rg, rgc, nt*sizeof(float)); hipMemcpyToSymbol(Cg, Cgc, 16*(nt-1)*(nk-1)*sizeof(float)); } // Time index __device__ int timeIdx(float t) { int i, I; for (i=14; i>=0; i--) { if(t<Tg[i]){ I = i; } } return I; } // Interest rate time integral __device__ float rt_int(float t, float T, int i, int j) { float res; int k; if(i==j){ res = (T-t)*rg[i]; }else{ res = (T-Tg[j-1])*rg[j] + (Tg[i]-t)*rg[i]; for(k=i+1; k<j; k++){ res += (Tg[k]-Tg[k-1])*rg[k]; } } return res; } // Monomials till third degree __device__ float mon(float x, int i){return 1.0f*(i==0) + x*(i==1) + x*x*(i==2) + x*x*x*(i==3);} // Local volatility from bicubic interpolation of implied volatility __device__ void vol_d(float x, float x0, float t, float *V, int q){ float u1 = 0.0f; float u2 = 0.0f; float d1, d2, d_1; float y = 0.0f; float y_1 = 0.0f, y_2 = 0.0f, y_22 = 0.0f; int k = 0; if (x >= Kg[5]){ k = 4; d2 = 1.0f /(Kg[k + 1] - Kg[k]); u2 = 1.0f; }else{ if (x <= Kg[0]){ k = 0; d2 = 1.0f/(Kg[k + 1] - Kg[k]); u2 = 1.0f; }else{ while (Kg[k+1] < x){ k++; } d2 = 1.0f/(Kg[k+1] - Kg[k]); u2 = (x - Kg[k])/(Kg[k+1] - Kg[k]); } } d1 = 1.0f/(Tg[q + 1] - Tg[q]); u1 = (t - Tg[q])/(Tg[q + 1] - Tg[q]); for (int i = 0; i < 4; i++){ for (int j = 0; j < 4; j++){ y += Cg[k * 14 * 16 + q * 16 + j + i * 4] * mon(u1, i)*mon(u2, j); y_1 += i *Cg[k * 14 * 16 + q * 16 + i * 4 + j] * mon(u1, i-1)*mon(u2, j)*d1; y_2 += j*Cg[k * 14 * 16 + q * 16 + i * 4 + j] * mon(u1, i)*mon(u2, j-1)*d2; y_22 += j *(j - 1)*Cg[k * 14 * 16 + q * 16 + i * 4 + j] * mon(u1, i)*mon(u2, j-2)*d2*d2; } } d_1 = (logf(x0/x) + rt_int(0.0f, t, 0, q))/(y*sqrtf(t)) + 0.5f*y*sqrtf(t); u1 = x*x*(y_22 - d_1*sqrtf(t)*y_2*y_2 + (1.0f/y)*((1.0f/(x*sqrtf(t))) + d_1*y_2)*((1.0f /(x*sqrtf(t))) + d_1*y_2)); u2 = 2.0f*y_1 + y /t + 2.0f*x*rg[q]*y_2; *V = sqrtf(fminf(fmaxf(u2/u1,0.0001f),0.5f)); } // Set the new RNG seed __device__ void CMRG_set_d(int *a0, int *a1, int *a2, int *a3, int *a4, int *a5, int *CMRG_Out){ CMRG_Out[0] = *a0; CMRG_Out[1] = *a1; CMRG_Out[2] = *a2; CMRG_Out[3] = *a3; CMRG_Out[4] = *a4; CMRG_Out[5] = *a5; } // Get the RNG Seed __device__ void CMRG_get_d(int *a0, int *a1, int *a2, int *a3, int *a4, int *a5, int *CMRG_In){ *a0 = CMRG_In[0]; *a1 = CMRG_In[1]; *a2 = CMRG_In[2]; *a3 = CMRG_In[3]; *a4 = CMRG_In[4]; *a5 = CMRG_In[5]; } // Generate uniformly distributed random variables __device__ void CMRG_d(int *a0, int *a1, int *a2, int *a3, int *a4, int *a5, float *g0, float *g1, int nb){ const int m1 = 2147483647;// Requested for the simulation const int m2 = 2145483479;// Requested for the simulation int h, p12, p13, p21, p23, k, loc;// Requested local parameters for(k=0; k<nb; k++){ // First Component h = *a0/q13; p13 = a13*(h*q13-*a0)-h*r13; h = *a1/q12; p12 = a12*(*a1-h*q12)-h*r12; if (p13 < 0) { p13 = p13 + m1; } if (p12 < 0) { p12 = p12 + m1; } *a0 = *a1; *a1 = *a2; if( (p12 - p13) < 0){ *a2 = p12 - p13 + m1; } else { *a2 = p12 - p13; } // Second Component h = *a3/q23; p23 = a23*(h*q23-*a3)-h*r23; h = *a5/q21; p21 = a21*(*a5-h*q21)-h*r21; if (p23 < 0){ p23 = p23 + m2; } if (p12 < 0){ p21 = p21 + m2; } *a3 = *a4; *a4 = *a5; if ( (p21 - p23) < 0) { *a5 = p21 - p23 + m2; } else { *a5 = p21 - p23; } // Combines the two MRGs if(*a2 < *a5){ loc = *a2 - *a5 + m1; }else{loc = *a2 - *a5;} if(k){ if(loc == 0){ *g1 = Invmp*m1; }else{*g1 = Invmp*loc;} }else{ *g1 = 0.0f; if(loc == 0){ *g0 = Invmp*m1; }else{*g0 = Invmp*loc;} } } } // Generates Gaussian distribution from a uniform one (Box-Muller) __device__ void BoxMuller_d(float *g0, float *g1){ float loc; if (*g1 < 1.45e-6f){ loc = sqrtf(-2.0f*logf(0.00001f))*cosf(*g0*2.0f*MoPI); } else { if (*g1 > 0.99999f){ loc = 0.0f; } else {loc = sqrtf(-2.0f*logf(*g1))*cosf(*g0*2.0f*MoPI);} } *g0 = loc; } // Euler for local volatility __device__ void Euler_d(float *S2, float S1, float r0, float sigma, float dt, float e){ *S2 = S1*(1.0f + r0*dt*dt + sigma*dt*e); } // MC for inner trajectories __global__ void MC_inner_k(int P1, int P2, float St, float _t, int It, float dt, float B, float K, int L, int M, int Ntraj, TabSeedCMRG_t *pt_cmrg, float *option_price, int index_rng){ int gb_index_x = threadIdx.x + blockIdx.x*blockDim.x; int a0, a1, a2, a3, a4, a5, k, i, q, P; float g0, g1, Sk, Skp1, t, v; extern __shared__ float Z[]; Sk = St; P = It; CMRG_get_d(&a0, &a1, &a2, &a3, &a4, &a5, pt_cmrg[0][index_rng][gb_index_x]); for (k=int(_t * M); k<M; k++){ for (i=1; i<=L; i++){ t = dt*dt*(i+L*k); q = timeIdx(t); vol_d(Sk, St, t, &v, q); CMRG_d(&a0, &a1, &a2, &a3, &a4, &a5, &g0, &g1, 2); BoxMuller_d(&g0, &g1); Euler_d(&Skp1, Sk, rg[q], v, dt, g0); Sk = Skp1; } P += (Sk<B); } // Reduction phase Z[threadIdx.x] = expf(-rt_int(_t, t, 0, q))*fmaxf(0.0f, Sk-K)*((P<=P2)&&(P>=P1))/Ntraj; Z[threadIdx.x + blockDim.x] = Ntraj*Z[threadIdx.x]*Z[threadIdx.x]; __syncthreads(); i = blockDim.x/2; while (i != 0) { if (threadIdx.x < i){ Z[threadIdx.x] += Z[threadIdx.x + i]; } __syncthreads(); i /= 2; } if (threadIdx.x == 0){ atomicAdd(option_price, Z[0]); } CMRG_set_d(&a0, &a1, &a2, &a3, &a4, &a5, pt_cmrg[0][index_rng][gb_index_x]); } // MC for outer trajectories __global__ void MC_outer_k(int P1, int P2, float x_0, float dt, float B, float K, int L, int M, int Ntraj, TabSeedCMRG_t *pt_cmrg, float *option_price, float *sum, float *option_prices, float *stocks, int *It){ int gb_index_x = threadIdx.x + blockIdx.x*blockDim.x; int a0, a1, a2, a3, a4, a5, k, i, q, P, increment; float g0, g1, Sk, Skp1, t, v; extern __shared__ float H[]; Sk = x_0; P = 0; CMRG_get_d(&a0, &a1, &a2, &a3, &a4, &a5, pt_cmrg[0][gb_index_x][0]); for (k=1; k<=M; k++){ for (i=1; i<=L; i++){ t = dt*dt*(i+L*k); q = timeIdx(t); vol_d(Sk, x_0, t, &v, q); CMRG_d(&a0, &a1, &a2, &a3, &a4, &a5, &g0, &g1, 2); BoxMuller_d(&g0, &g1); Euler_d(&Skp1, Sk, rg[q], v, dt, g0); Sk = Skp1; } P += (Sk<B); increment = k + M * gb_index_x - 1; stocks[increment] = Sk; It[increment] = P; hipLaunchKernelGGL(( MC_inner_k), dim3(32),dim3(32),2*32*sizeof(float), 0, P1, P2, Sk, k*dt*dt, P, dt, B, K, L, M, Ntraj, pt_cmrg, option_prices + increment, gb_index_x); } // Reduction phase H[threadIdx.x] = expf(-rt_int(0.0f, t, 0, q))*fmaxf(0.0f, Sk-K)*((P<=P2)&&(P>=P1))/Ntraj; H[threadIdx.x + blockDim.x] = Ntraj*H[threadIdx.x]*H[threadIdx.x]; __syncthreads(); i = blockDim.x/2; while (i != 0) { if (threadIdx.x < i){ H[threadIdx.x] += H[threadIdx.x + i]; H[threadIdx.x + blockDim.x] += H[threadIdx.x + blockDim.x + i]; } __syncthreads(); i /= 2; } if (threadIdx.x == 0){ atomicAdd(option_price, H[0]); atomicAdd(sum, H[blockDim.x]); } CMRG_set_d(&a0, &a1, &a2, &a3, &a4, &a5, pt_cmrg[0][gb_index_x][0]); } int main() { int ti = 1; float T = 1.0f; float K = 100.0f; float S0 = 100.0f; float B = 120.0f; int M = 100; int P1 = 10; int P2 = 49; int Nt = 200; float dt = sqrtf(T/Nt); int leng = Nt/M; float option_price_CPU = 0.0f; float sum_CPU = 0.0f; float Tim; // GPU timer instructions hipEvent_t start, stop; // GPU timer instructions float *option_price_GPU, *sum_GPU, *option_prices_GPU, *option_prices_CPU, *St_GPU, *St_CPU; int *It_GPU, *It_CPU; int Ntraj = 32*32; // Allocation of memories inside GPU hipMalloc(&option_price_GPU, sizeof(float)); hipMalloc(&sum_GPU, sizeof(float)); hipMalloc(&St_GPU, Ntraj * M * sizeof(float)); hipMalloc(&It_GPU, Ntraj * M * sizeof(int)); hipMalloc(&option_prices_GPU, Ntraj * M * sizeof(float)); VarMalloc(); // Init values hipMemset(option_price_GPU, 0.0f, sizeof(float)); hipMemset(sum_GPU, 0.0f, sizeof(float)); hipMemset(option_prices_GPU, 0, Ntraj * M * sizeof(float)); // Allocation of memories inside CPU option_prices_CPU = (float*)malloc(Ntraj * M * sizeof(float)); It_CPU = (int*)malloc(Ntraj*M*sizeof(int)); St_CPU = (float*)malloc(Ntraj*M*sizeof(float)); // Init CMRG PostInitDataCMRG(); parameters(); // GPU timer instructions initialization hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); hipLaunchKernelGGL(( MC_outer_k), dim3(32),dim3(32),2*32*sizeof(float), 0, P1, P2, S0, dt, B, K, leng, M, Ntraj, pt_CMRG, option_price_GPU, sum_GPU, option_prices_GPU, St_GPU, It_GPU); // Transfer data from device (GPU) to host (CPU) hipMemcpy(option_prices_CPU, option_prices_GPU, Ntraj * M * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(St_CPU, St_GPU, Ntraj * M * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(It_CPU, It_GPU, Ntraj * M * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(&option_price_CPU, option_price_GPU, sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(&sum_CPU, sum_GPU, sizeof(float), hipMemcpyDeviceToHost); // GPU timer instructions stop the record hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&Tim,start,stop); hipEventDestroy(start); hipEventDestroy(stop); // Result of outer trajectories printf("The price is equal to %f\n", option_price_CPU); printf("error associated to a confidence interval of 95%% = %f\n", 1.96*sqrt((double)(1.0f/(Ntraj-1))*(Ntraj*sum_CPU-(option_price_CPU*option_price_CPU)))/sqrt((double)Ntraj)); printf("Execution time %f ms\n", Tim); // Writing results is CSV file of inner trajectories printf("===================================\nWriting into a csv file inside the current folder\n==================================="); FILE *outfile = fopen("data_generated.csv","w"); fprintf(outfile,"Temps, It, Stocks, Prix\n"); for (int k = 0; k < Ntraj*M; ++k){ fprintf(outfile,"%i,%i,%f,%f\n", ti, It_CPU[k], St_CPU[k], option_prices_CPU[k]); if (ti >= 100) { ti=0; } ti++; } fclose(outfile); // Free memory FreeVar(); hipFree(option_prices_GPU) ; hipFree(St_GPU); hipFree(It_GPU); hipFree(option_price_GPU); hipFree(sum_GPU); free(St_CPU); free(It_CPU); free(option_prices_CPU); return 0; } // nvcc -arch=sm_35 -rdc=true MC.cu RNG.cu -o MC
cf6eb5fa6f933fdcc7ca1ab8d4f89dbca40dda82.cu
/************************************************************** Lokman A. Abbas-Turki code reused by Ly Yannick and Prugniaud Melchior for GPU Programming course of ENSAE 2020 Those who re-use this code should mention in their code the name of the author above. ***************************************************************/ #include "RNG.h" #define nt 15 #define nk 6 __constant__ float Tg[nt]; __constant__ float rg[nt]; __constant__ float Kg[nk]; __constant__ float Cg[16*(nt-1)*(nk-1)]; float *Cgc, *Kgc, *Tgc, *rgc; // Allocate parameters void VarMalloc() { Kgc = (float *)calloc(nk, sizeof(float)); Tgc = (float *)calloc(nt, sizeof(float)); rgc = (float *)calloc(nt, sizeof(float)); Cgc = (float *)calloc(16*(nk-1)*(nt-1), sizeof(float)); } // Free parameters void FreeVar() { free(Cgc); free(Kgc); free(Tgc); free(rgc); } // Time parameters void parameters() { Kgc[0] = 20.f; Kgc[1] = 70.f; Kgc[2] = 120.f; Kgc[3] = 160.f; Kgc[4] = 200.f; Kgc[5] = 250.0f; float d, w, m, y; d = 1.0f / 360.0f; w = 7.0f * d; m = 30.0f * d; y = 12.0f * m; Tgc[0] = d; Tgc[1] = 2.f*d; Tgc[2] = w; Tgc[3] = 2.f*w; Tgc[4] = m; Tgc[5] = 2.f*m; Tgc[6] = 3.f*m; Tgc[7] = 6.f*m; Tgc[8] = y; Tgc[9] = y + 3.f*m; Tgc[10] =y + 6.f*m; Tgc[11] = 2.f*y; Tgc[12] = 2.f*y + 6.f*m; Tgc[13] = 3.f*y; Tgc[14] = 3.f*y + 6.f*m; rgc[0] = 0.05f; rgc[1] = 0.07f; rgc[2] = 0.08f; rgc[3] = 0.06f; rgc[4] = 0.07f; rgc[5] = 0.1f; rgc[6] = 0.11f; rgc[7] = 0.13f; rgc[8] = 0.12f; rgc[9] = 0.14f; rgc[10] = 0.145f; rgc[11] = 0.14f; rgc[12] = 0.135f; rgc[13] = 0.13f; rgc[14] = 0.f*y; int k; FILE *ParFp; string TmpString; //Spline Volatility parameters------------------------------ // - Read values from input file on CPU TmpString = "Cg.txt"; ParFp = fopen(TmpString.c_str(),"r"); if (ParFp == NULL) { fprintf(stderr,"File '%s' unreachable!\n",TmpString.c_str()); exit(EXIT_FAILURE); } // - Store values in input data tables on CPU for (k = 0; k < 1120; k++) { if (fscanf(ParFp,"%f",&Cgc[k]) <= 0) { fprintf(stderr,"Error while reading file '%s'!\n",TmpString.c_str()); exit(EXIT_FAILURE); } } fclose(ParFp); cudaMemcpyToSymbol(Kg, Kgc, nk*sizeof(float)); cudaMemcpyToSymbol(Tg, Tgc, nt*sizeof(float)); cudaMemcpyToSymbol(rg, rgc, nt*sizeof(float)); cudaMemcpyToSymbol(Cg, Cgc, 16*(nt-1)*(nk-1)*sizeof(float)); } // Time index __device__ int timeIdx(float t) { int i, I; for (i=14; i>=0; i--) { if(t<Tg[i]){ I = i; } } return I; } // Interest rate time integral __device__ float rt_int(float t, float T, int i, int j) { float res; int k; if(i==j){ res = (T-t)*rg[i]; }else{ res = (T-Tg[j-1])*rg[j] + (Tg[i]-t)*rg[i]; for(k=i+1; k<j; k++){ res += (Tg[k]-Tg[k-1])*rg[k]; } } return res; } // Monomials till third degree __device__ float mon(float x, int i){return 1.0f*(i==0) + x*(i==1) + x*x*(i==2) + x*x*x*(i==3);} // Local volatility from bicubic interpolation of implied volatility __device__ void vol_d(float x, float x0, float t, float *V, int q){ float u1 = 0.0f; float u2 = 0.0f; float d1, d2, d_1; float y = 0.0f; float y_1 = 0.0f, y_2 = 0.0f, y_22 = 0.0f; int k = 0; if (x >= Kg[5]){ k = 4; d2 = 1.0f /(Kg[k + 1] - Kg[k]); u2 = 1.0f; }else{ if (x <= Kg[0]){ k = 0; d2 = 1.0f/(Kg[k + 1] - Kg[k]); u2 = 1.0f; }else{ while (Kg[k+1] < x){ k++; } d2 = 1.0f/(Kg[k+1] - Kg[k]); u2 = (x - Kg[k])/(Kg[k+1] - Kg[k]); } } d1 = 1.0f/(Tg[q + 1] - Tg[q]); u1 = (t - Tg[q])/(Tg[q + 1] - Tg[q]); for (int i = 0; i < 4; i++){ for (int j = 0; j < 4; j++){ y += Cg[k * 14 * 16 + q * 16 + j + i * 4] * mon(u1, i)*mon(u2, j); y_1 += i *Cg[k * 14 * 16 + q * 16 + i * 4 + j] * mon(u1, i-1)*mon(u2, j)*d1; y_2 += j*Cg[k * 14 * 16 + q * 16 + i * 4 + j] * mon(u1, i)*mon(u2, j-1)*d2; y_22 += j *(j - 1)*Cg[k * 14 * 16 + q * 16 + i * 4 + j] * mon(u1, i)*mon(u2, j-2)*d2*d2; } } d_1 = (logf(x0/x) + rt_int(0.0f, t, 0, q))/(y*sqrtf(t)) + 0.5f*y*sqrtf(t); u1 = x*x*(y_22 - d_1*sqrtf(t)*y_2*y_2 + (1.0f/y)*((1.0f/(x*sqrtf(t))) + d_1*y_2)*((1.0f /(x*sqrtf(t))) + d_1*y_2)); u2 = 2.0f*y_1 + y /t + 2.0f*x*rg[q]*y_2; *V = sqrtf(fminf(fmaxf(u2/u1,0.0001f),0.5f)); } // Set the new RNG seed __device__ void CMRG_set_d(int *a0, int *a1, int *a2, int *a3, int *a4, int *a5, int *CMRG_Out){ CMRG_Out[0] = *a0; CMRG_Out[1] = *a1; CMRG_Out[2] = *a2; CMRG_Out[3] = *a3; CMRG_Out[4] = *a4; CMRG_Out[5] = *a5; } // Get the RNG Seed __device__ void CMRG_get_d(int *a0, int *a1, int *a2, int *a3, int *a4, int *a5, int *CMRG_In){ *a0 = CMRG_In[0]; *a1 = CMRG_In[1]; *a2 = CMRG_In[2]; *a3 = CMRG_In[3]; *a4 = CMRG_In[4]; *a5 = CMRG_In[5]; } // Generate uniformly distributed random variables __device__ void CMRG_d(int *a0, int *a1, int *a2, int *a3, int *a4, int *a5, float *g0, float *g1, int nb){ const int m1 = 2147483647;// Requested for the simulation const int m2 = 2145483479;// Requested for the simulation int h, p12, p13, p21, p23, k, loc;// Requested local parameters for(k=0; k<nb; k++){ // First Component h = *a0/q13; p13 = a13*(h*q13-*a0)-h*r13; h = *a1/q12; p12 = a12*(*a1-h*q12)-h*r12; if (p13 < 0) { p13 = p13 + m1; } if (p12 < 0) { p12 = p12 + m1; } *a0 = *a1; *a1 = *a2; if( (p12 - p13) < 0){ *a2 = p12 - p13 + m1; } else { *a2 = p12 - p13; } // Second Component h = *a3/q23; p23 = a23*(h*q23-*a3)-h*r23; h = *a5/q21; p21 = a21*(*a5-h*q21)-h*r21; if (p23 < 0){ p23 = p23 + m2; } if (p12 < 0){ p21 = p21 + m2; } *a3 = *a4; *a4 = *a5; if ( (p21 - p23) < 0) { *a5 = p21 - p23 + m2; } else { *a5 = p21 - p23; } // Combines the two MRGs if(*a2 < *a5){ loc = *a2 - *a5 + m1; }else{loc = *a2 - *a5;} if(k){ if(loc == 0){ *g1 = Invmp*m1; }else{*g1 = Invmp*loc;} }else{ *g1 = 0.0f; if(loc == 0){ *g0 = Invmp*m1; }else{*g0 = Invmp*loc;} } } } // Generates Gaussian distribution from a uniform one (Box-Muller) __device__ void BoxMuller_d(float *g0, float *g1){ float loc; if (*g1 < 1.45e-6f){ loc = sqrtf(-2.0f*logf(0.00001f))*cosf(*g0*2.0f*MoPI); } else { if (*g1 > 0.99999f){ loc = 0.0f; } else {loc = sqrtf(-2.0f*logf(*g1))*cosf(*g0*2.0f*MoPI);} } *g0 = loc; } // Euler for local volatility __device__ void Euler_d(float *S2, float S1, float r0, float sigma, float dt, float e){ *S2 = S1*(1.0f + r0*dt*dt + sigma*dt*e); } // MC for inner trajectories __global__ void MC_inner_k(int P1, int P2, float St, float _t, int It, float dt, float B, float K, int L, int M, int Ntraj, TabSeedCMRG_t *pt_cmrg, float *option_price, int index_rng){ int gb_index_x = threadIdx.x + blockIdx.x*blockDim.x; int a0, a1, a2, a3, a4, a5, k, i, q, P; float g0, g1, Sk, Skp1, t, v; extern __shared__ float Z[]; Sk = St; P = It; CMRG_get_d(&a0, &a1, &a2, &a3, &a4, &a5, pt_cmrg[0][index_rng][gb_index_x]); for (k=int(_t * M); k<M; k++){ for (i=1; i<=L; i++){ t = dt*dt*(i+L*k); q = timeIdx(t); vol_d(Sk, St, t, &v, q); CMRG_d(&a0, &a1, &a2, &a3, &a4, &a5, &g0, &g1, 2); BoxMuller_d(&g0, &g1); Euler_d(&Skp1, Sk, rg[q], v, dt, g0); Sk = Skp1; } P += (Sk<B); } // Reduction phase Z[threadIdx.x] = expf(-rt_int(_t, t, 0, q))*fmaxf(0.0f, Sk-K)*((P<=P2)&&(P>=P1))/Ntraj; Z[threadIdx.x + blockDim.x] = Ntraj*Z[threadIdx.x]*Z[threadIdx.x]; __syncthreads(); i = blockDim.x/2; while (i != 0) { if (threadIdx.x < i){ Z[threadIdx.x] += Z[threadIdx.x + i]; } __syncthreads(); i /= 2; } if (threadIdx.x == 0){ atomicAdd(option_price, Z[0]); } CMRG_set_d(&a0, &a1, &a2, &a3, &a4, &a5, pt_cmrg[0][index_rng][gb_index_x]); } // MC for outer trajectories __global__ void MC_outer_k(int P1, int P2, float x_0, float dt, float B, float K, int L, int M, int Ntraj, TabSeedCMRG_t *pt_cmrg, float *option_price, float *sum, float *option_prices, float *stocks, int *It){ int gb_index_x = threadIdx.x + blockIdx.x*blockDim.x; int a0, a1, a2, a3, a4, a5, k, i, q, P, increment; float g0, g1, Sk, Skp1, t, v; extern __shared__ float H[]; Sk = x_0; P = 0; CMRG_get_d(&a0, &a1, &a2, &a3, &a4, &a5, pt_cmrg[0][gb_index_x][0]); for (k=1; k<=M; k++){ for (i=1; i<=L; i++){ t = dt*dt*(i+L*k); q = timeIdx(t); vol_d(Sk, x_0, t, &v, q); CMRG_d(&a0, &a1, &a2, &a3, &a4, &a5, &g0, &g1, 2); BoxMuller_d(&g0, &g1); Euler_d(&Skp1, Sk, rg[q], v, dt, g0); Sk = Skp1; } P += (Sk<B); increment = k + M * gb_index_x - 1; stocks[increment] = Sk; It[increment] = P; MC_inner_k<<<32,32,2*32*sizeof(float)>>>(P1, P2, Sk, k*dt*dt, P, dt, B, K, L, M, Ntraj, pt_cmrg, option_prices + increment, gb_index_x); } // Reduction phase H[threadIdx.x] = expf(-rt_int(0.0f, t, 0, q))*fmaxf(0.0f, Sk-K)*((P<=P2)&&(P>=P1))/Ntraj; H[threadIdx.x + blockDim.x] = Ntraj*H[threadIdx.x]*H[threadIdx.x]; __syncthreads(); i = blockDim.x/2; while (i != 0) { if (threadIdx.x < i){ H[threadIdx.x] += H[threadIdx.x + i]; H[threadIdx.x + blockDim.x] += H[threadIdx.x + blockDim.x + i]; } __syncthreads(); i /= 2; } if (threadIdx.x == 0){ atomicAdd(option_price, H[0]); atomicAdd(sum, H[blockDim.x]); } CMRG_set_d(&a0, &a1, &a2, &a3, &a4, &a5, pt_cmrg[0][gb_index_x][0]); } int main() { int ti = 1; float T = 1.0f; float K = 100.0f; float S0 = 100.0f; float B = 120.0f; int M = 100; int P1 = 10; int P2 = 49; int Nt = 200; float dt = sqrtf(T/Nt); int leng = Nt/M; float option_price_CPU = 0.0f; float sum_CPU = 0.0f; float Tim; // GPU timer instructions cudaEvent_t start, stop; // GPU timer instructions float *option_price_GPU, *sum_GPU, *option_prices_GPU, *option_prices_CPU, *St_GPU, *St_CPU; int *It_GPU, *It_CPU; int Ntraj = 32*32; // Allocation of memories inside GPU cudaMalloc(&option_price_GPU, sizeof(float)); cudaMalloc(&sum_GPU, sizeof(float)); cudaMalloc(&St_GPU, Ntraj * M * sizeof(float)); cudaMalloc(&It_GPU, Ntraj * M * sizeof(int)); cudaMalloc(&option_prices_GPU, Ntraj * M * sizeof(float)); VarMalloc(); // Init values cudaMemset(option_price_GPU, 0.0f, sizeof(float)); cudaMemset(sum_GPU, 0.0f, sizeof(float)); cudaMemset(option_prices_GPU, 0, Ntraj * M * sizeof(float)); // Allocation of memories inside CPU option_prices_CPU = (float*)malloc(Ntraj * M * sizeof(float)); It_CPU = (int*)malloc(Ntraj*M*sizeof(int)); St_CPU = (float*)malloc(Ntraj*M*sizeof(float)); // Init CMRG PostInitDataCMRG(); parameters(); // GPU timer instructions initialization cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); MC_outer_k<<<32,32,2*32*sizeof(float)>>>(P1, P2, S0, dt, B, K, leng, M, Ntraj, pt_CMRG, option_price_GPU, sum_GPU, option_prices_GPU, St_GPU, It_GPU); // Transfer data from device (GPU) to host (CPU) cudaMemcpy(option_prices_CPU, option_prices_GPU, Ntraj * M * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(St_CPU, St_GPU, Ntraj * M * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(It_CPU, It_GPU, Ntraj * M * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&option_price_CPU, option_price_GPU, sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&sum_CPU, sum_GPU, sizeof(float), cudaMemcpyDeviceToHost); // GPU timer instructions stop the record cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&Tim,start,stop); cudaEventDestroy(start); cudaEventDestroy(stop); // Result of outer trajectories printf("The price is equal to %f\n", option_price_CPU); printf("error associated to a confidence interval of 95%% = %f\n", 1.96*sqrt((double)(1.0f/(Ntraj-1))*(Ntraj*sum_CPU-(option_price_CPU*option_price_CPU)))/sqrt((double)Ntraj)); printf("Execution time %f ms\n", Tim); // Writing results is CSV file of inner trajectories printf("===================================\nWriting into a csv file inside the current folder\n==================================="); FILE *outfile = fopen("data_generated.csv","w"); fprintf(outfile,"Temps, It, Stocks, Prix\n"); for (int k = 0; k < Ntraj*M; ++k){ fprintf(outfile,"%i,%i,%f,%f\n", ti, It_CPU[k], St_CPU[k], option_prices_CPU[k]); if (ti >= 100) { ti=0; } ti++; } fclose(outfile); // Free memory FreeVar(); cudaFree(option_prices_GPU) ; cudaFree(St_GPU); cudaFree(It_GPU); cudaFree(option_price_GPU); cudaFree(sum_GPU); free(St_CPU); free(It_CPU); free(option_prices_CPU); return 0; } // nvcc -arch=sm_35 -rdc=true MC.cu RNG.cu -o MC
1c928009ae91c7ca7891dda9610e594789ee40c2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <hiprand/hiprand_kernel.h> #include <THH/THHGeneral.h> #include <THH/THHTensorRandom.h> #include <THH/THHGenerator.hpp> THCGenerator* THCRandom_getGenerator(THCState* state); namespace at{ namespace native{ namespace { // philox generates 128 bits of randomness at a time. Kernel uses this explicitly by putting suitably transformed result into float4 // for all members of float4 to be consumed UNROLL has to be 4. Don't change! const int UNROLL = 4; std::pair<uint64_t, uint64_t> next_philox_seed(at::Generator* gen, uint64_t increment) { auto gen_ = THCRandom_getGenerator(at::globalContext().getTHCState()); uint64_t offset = gen_->state.philox_seed_offset.fetch_add(increment); return std::make_pair(gen_->state.initial_seed, offset); } template < typename scalar_t, typename accscalar_t, typename IndexType, int ADims> #if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__ __launch_bounds__(256,8) #endif __global__ void fused_dropout_kernel(cuda::detail::TensorInfo<scalar_t, IndexType> a, cuda::detail::TensorInfo<scalar_t, IndexType> b, cuda::detail::TensorInfo<uint8_t, IndexType> c, IndexType totalElements, accscalar_t p, std::pair<uint64_t, uint64_t> seeds ) { accscalar_t pinv = accscalar_t(1)/p; IndexType idx = blockIdx.x * blockDim.x + threadIdx.x; hiprandStatePhilox4_32_10_t state; hiprand_init( seeds.first, idx, seeds.second, &state); IndexType rounded_size = ((totalElements - 1)/(blockDim.x * gridDim.x * UNROLL)+1) * blockDim.x * gridDim.x * UNROLL; for (IndexType linearIndex = idx; linearIndex < rounded_size; linearIndex += gridDim.x * blockDim.x*UNROLL) { //hiprand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything float4 rand = hiprand_uniform4(&state); scalar_t src[UNROLL]; rand.x = rand.x < p; rand.y = rand.y < p; rand.z = rand.z < p; rand.w = rand.w < p; for (int ii = 0; ii < UNROLL; ii++) { IndexType li = linearIndex + blockDim.x * gridDim.x * ii; if (li < totalElements) { // Convert `linearIndex` into an offset of `a` const IndexType aOffset = cuda::detail::IndexToOffset<scalar_t, IndexType, ADims>::get(li, a); src[ii] = a.data[aOffset]; } } for (int ii = 0; ii < UNROLL; ii++) { IndexType li = linearIndex + blockDim.x * gridDim.x * ii; if (li < totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = cuda::detail::IndexToOffset<scalar_t, IndexType, 1>::get(li, b); b.data[bOffset] = src[ii]*(&rand.x)[ii]*pinv; c.data[bOffset] = (uint8_t)(&rand.x)[ii]; } } __syncthreads(); } } template<typename scalar_t, typename accscalar_t> void masked_scale_kernel(at::Tensor& ret, const at::Tensor src, const at::Tensor mask, accscalar_t scale){ at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, uint8_t>(ret, src, mask, [scale]__device__(scalar_t& ret_val, const scalar_t& src_val, const uint8_t mask_val){ ret_val = (float)mask_val * src_val * scale; }); } } //anonymous namespace std::tuple<Tensor,Tensor> fused_dropout_cuda(const Tensor& self, double p, Generator * gen){ Tensor ret = at::empty_like(self); Tensor mask = at::empty(self.sizes(), self.options().dtype(kByte)); const int64_t nelem = self.numel(); const int64_t block_size = 256; unsigned int blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor/block_size; dim3 dim_block(block_size); dim3 grid((nelem + block_size -1)/block_size); grid.x = ::min((unsigned int)at::cuda::getCurrentDeviceProperties()->multiProcessorCount * blocks_per_sm, grid.x); //number of times random will be generated per thread, to offset philox counter in thc random state int64_t counter_offset = ((nelem - 1)/(block_size*grid.x*UNROLL)+1)*UNROLL; if (cuda::detail::canUse32BitIndexMath(self)){ AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "fused_dropout", [&] { using accscalar_t = acc_type<scalar_t, true>; accscalar_t pa = (accscalar_t)(p); auto self_info = cuda::detail::getTensorInfo<scalar_t, unsigned int>(self); auto ret_info = cuda::detail::getTensorInfo<scalar_t, unsigned int>(ret); auto mask_info = cuda::detail::getTensorInfo<uint8_t, unsigned int>(mask); self_info.collapseDims(); ret_info.collapseDims(); mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor switch (self_info.dims) { case 1: hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, unsigned int, 1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, next_philox_seed(gen,counter_offset)); break; default: hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, unsigned int, -1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, next_philox_seed(gen,counter_offset)); } }); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "fused_dropout", [&] { using accscalar_t = acc_type<scalar_t, true>; accscalar_t pa = (accscalar_t)(p); auto self_info = cuda::detail::getTensorInfo<scalar_t, uint64_t>(self); auto ret_info = cuda::detail::getTensorInfo<scalar_t, uint64_t>(ret); auto mask_info = cuda::detail::getTensorInfo<uint8_t, uint64_t>(mask); self_info.collapseDims(); ret_info.collapseDims(); mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor switch (self_info.dims) { case 1: hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, uint64_t, 1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, next_philox_seed(gen,counter_offset)); break; default: hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, uint64_t, -1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, next_philox_seed(gen,counter_offset)); } }); } THCudaCheck(hipGetLastError()); return std::tuple<Tensor,Tensor>(ret, mask); } Tensor masked_scale_cuda(const Tensor& self, const Tensor& mask, double scale){ Tensor ret = at::empty_like(self); AT_CHECK(mask.type().scalarType() == at::ScalarType::Byte, "mask should be torch.uint8 dtype"); AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.type(), "masked_scale", [&] { using accscalar_t = acc_type<scalar_t, true>; accscalar_t pa = (accscalar_t)(scale); masked_scale_kernel<scalar_t>(ret, self, mask, pa); }); return ret; } } }
1c928009ae91c7ca7891dda9610e594789ee40c2.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <curand_kernel.h> #include <THC/THCGeneral.h> #include <THC/THCTensorRandom.h> #include <THC/THCGenerator.hpp> THCGenerator* THCRandom_getGenerator(THCState* state); namespace at{ namespace native{ namespace { // philox generates 128 bits of randomness at a time. Kernel uses this explicitly by putting suitably transformed result into float4 // for all members of float4 to be consumed UNROLL has to be 4. Don't change! const int UNROLL = 4; std::pair<uint64_t, uint64_t> next_philox_seed(at::Generator* gen, uint64_t increment) { auto gen_ = THCRandom_getGenerator(at::globalContext().getTHCState()); uint64_t offset = gen_->state.philox_seed_offset.fetch_add(increment); return std::make_pair(gen_->state.initial_seed, offset); } template < typename scalar_t, typename accscalar_t, typename IndexType, int ADims> #if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__ __launch_bounds__(256,8) #endif __global__ void fused_dropout_kernel(cuda::detail::TensorInfo<scalar_t, IndexType> a, cuda::detail::TensorInfo<scalar_t, IndexType> b, cuda::detail::TensorInfo<uint8_t, IndexType> c, IndexType totalElements, accscalar_t p, std::pair<uint64_t, uint64_t> seeds ) { accscalar_t pinv = accscalar_t(1)/p; IndexType idx = blockIdx.x * blockDim.x + threadIdx.x; curandStatePhilox4_32_10_t state; curand_init( seeds.first, idx, seeds.second, &state); IndexType rounded_size = ((totalElements - 1)/(blockDim.x * gridDim.x * UNROLL)+1) * blockDim.x * gridDim.x * UNROLL; for (IndexType linearIndex = idx; linearIndex < rounded_size; linearIndex += gridDim.x * blockDim.x*UNROLL) { //curand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything float4 rand = curand_uniform4(&state); scalar_t src[UNROLL]; rand.x = rand.x < p; rand.y = rand.y < p; rand.z = rand.z < p; rand.w = rand.w < p; for (int ii = 0; ii < UNROLL; ii++) { IndexType li = linearIndex + blockDim.x * gridDim.x * ii; if (li < totalElements) { // Convert `linearIndex` into an offset of `a` const IndexType aOffset = cuda::detail::IndexToOffset<scalar_t, IndexType, ADims>::get(li, a); src[ii] = a.data[aOffset]; } } for (int ii = 0; ii < UNROLL; ii++) { IndexType li = linearIndex + blockDim.x * gridDim.x * ii; if (li < totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = cuda::detail::IndexToOffset<scalar_t, IndexType, 1>::get(li, b); b.data[bOffset] = src[ii]*(&rand.x)[ii]*pinv; c.data[bOffset] = (uint8_t)(&rand.x)[ii]; } } __syncthreads(); } } template<typename scalar_t, typename accscalar_t> void masked_scale_kernel(at::Tensor& ret, const at::Tensor src, const at::Tensor mask, accscalar_t scale){ at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, uint8_t>(ret, src, mask, [scale]__device__(scalar_t& ret_val, const scalar_t& src_val, const uint8_t mask_val){ ret_val = (float)mask_val * src_val * scale; }); } } //anonymous namespace std::tuple<Tensor,Tensor> fused_dropout_cuda(const Tensor& self, double p, Generator * gen){ Tensor ret = at::empty_like(self); Tensor mask = at::empty(self.sizes(), self.options().dtype(kByte)); const int64_t nelem = self.numel(); const int64_t block_size = 256; unsigned int blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor/block_size; dim3 dim_block(block_size); dim3 grid((nelem + block_size -1)/block_size); grid.x = std::min((unsigned int)at::cuda::getCurrentDeviceProperties()->multiProcessorCount * blocks_per_sm, grid.x); //number of times random will be generated per thread, to offset philox counter in thc random state int64_t counter_offset = ((nelem - 1)/(block_size*grid.x*UNROLL)+1)*UNROLL; if (cuda::detail::canUse32BitIndexMath(self)){ AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "fused_dropout", [&] { using accscalar_t = acc_type<scalar_t, true>; accscalar_t pa = (accscalar_t)(p); auto self_info = cuda::detail::getTensorInfo<scalar_t, unsigned int>(self); auto ret_info = cuda::detail::getTensorInfo<scalar_t, unsigned int>(ret); auto mask_info = cuda::detail::getTensorInfo<uint8_t, unsigned int>(mask); self_info.collapseDims(); ret_info.collapseDims(); mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor switch (self_info.dims) { case 1: fused_dropout_kernel<scalar_t, accscalar_t, unsigned int, 1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, next_philox_seed(gen,counter_offset)); break; default: fused_dropout_kernel<scalar_t, accscalar_t, unsigned int, -1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, next_philox_seed(gen,counter_offset)); } }); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "fused_dropout", [&] { using accscalar_t = acc_type<scalar_t, true>; accscalar_t pa = (accscalar_t)(p); auto self_info = cuda::detail::getTensorInfo<scalar_t, uint64_t>(self); auto ret_info = cuda::detail::getTensorInfo<scalar_t, uint64_t>(ret); auto mask_info = cuda::detail::getTensorInfo<uint8_t, uint64_t>(mask); self_info.collapseDims(); ret_info.collapseDims(); mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor switch (self_info.dims) { case 1: fused_dropout_kernel<scalar_t, accscalar_t, uint64_t, 1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, next_philox_seed(gen,counter_offset)); break; default: fused_dropout_kernel<scalar_t, accscalar_t, uint64_t, -1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, next_philox_seed(gen,counter_offset)); } }); } THCudaCheck(cudaGetLastError()); return std::tuple<Tensor,Tensor>(ret, mask); } Tensor masked_scale_cuda(const Tensor& self, const Tensor& mask, double scale){ Tensor ret = at::empty_like(self); AT_CHECK(mask.type().scalarType() == at::ScalarType::Byte, "mask should be torch.uint8 dtype"); AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.type(), "masked_scale", [&] { using accscalar_t = acc_type<scalar_t, true>; accscalar_t pa = (accscalar_t)(scale); masked_scale_kernel<scalar_t>(ret, self, mask, pa); }); return ret; } } }
92f6ff4296e175544791428f1ba4909f710455d3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <thrust\device_vector.h> #include <thrust\host_vector.h> #include <stdio.h> #include <iostream> using namespace std; const int N = 3; __global__ void add(int *a, int N) { int i = blockDim.x*blockIdx.x + threadIdx.x; if (i < N) { a[i] = a[i] + N; } } void showArray(int a[][3], const int N) { cout << "show array: "; for (size_t i = 0; i < N; i++) for (size_t j = 0; j < N; j++) { cout << a[i][j] << ", "; } cout << endl; } void initStreams(thrust::host_vector<hipStream_t> & streams, const int N) { for (size_t i = 0; i < N; i++) { hipStream_t stream; hipStreamCreate(&stream); streams.push_back(stream); } } void run(hipStream_t & stream, int *a, const int N) { int *d_a; int size = N * sizeof(int); // blocks and threads dim3 blocks(N); dim3 threads(N); hipMalloc((void **)&d_a, size); hipStream_t copy; hipStreamCreateWithFlags(&copy, hipStreamNonBlocking); hipMemcpyAsync(d_a, a, size, hipMemcpyHostToDevice, copy); // asynchronizely copy data to device while (hipStreamQuery(copy) != hipSuccess) {} /*the same as using this api to hold here: hipStreamSynchronize(copy); */ hipLaunchKernelGGL(( add), dim3(blocks), dim3(threads), 0, stream, d_a, N); hipStreamSynchronize(stream); // wait for stream done hipMemcpy(a, d_a, size, hipMemcpyDeviceToHost); hipFree(d_a); hipStreamDestroy(copy); } int main() { thrust::host_vector<hipStream_t> streams; initStreams(streams, N); int a[N][N] = { {1,2,3},{4,5,6},{7,8,9} }; for (size_t i = 0; i < streams.size(); i++) { run(streams[i], a[i], N); } showArray(a, N); // destroy all streams for (size_t i = 0; i < streams.size(); i++) { hipStreamDestroy(streams[i]); } system("pause"); return 0; }
92f6ff4296e175544791428f1ba4909f710455d3.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <thrust\device_vector.h> #include <thrust\host_vector.h> #include <stdio.h> #include <iostream> using namespace std; const int N = 3; __global__ void add(int *a, int N) { int i = blockDim.x*blockIdx.x + threadIdx.x; if (i < N) { a[i] = a[i] + N; } } void showArray(int a[][3], const int N) { cout << "show array: "; for (size_t i = 0; i < N; i++) for (size_t j = 0; j < N; j++) { cout << a[i][j] << ", "; } cout << endl; } void initStreams(thrust::host_vector<cudaStream_t> & streams, const int N) { for (size_t i = 0; i < N; i++) { cudaStream_t stream; cudaStreamCreate(&stream); streams.push_back(stream); } } void run(cudaStream_t & stream, int *a, const int N) { int *d_a; int size = N * sizeof(int); // blocks and threads dim3 blocks(N); dim3 threads(N); cudaMalloc((void **)&d_a, size); cudaStream_t copy; cudaStreamCreateWithFlags(&copy, cudaStreamNonBlocking); cudaMemcpyAsync(d_a, a, size, cudaMemcpyHostToDevice, copy); // asynchronizely copy data to device while (cudaStreamQuery(copy) != cudaSuccess) {} /*the same as using this api to hold here: cudaStreamSynchronize(copy); */ add<<<blocks, threads, 0, stream>>> (d_a, N); cudaStreamSynchronize(stream); // wait for stream done cudaMemcpy(a, d_a, size, cudaMemcpyDeviceToHost); cudaFree(d_a); cudaStreamDestroy(copy); } int main() { thrust::host_vector<cudaStream_t> streams; initStreams(streams, N); int a[N][N] = { {1,2,3},{4,5,6},{7,8,9} }; for (size_t i = 0; i < streams.size(); i++) { run(streams[i], a[i], N); } showArray(a, N); // destroy all streams for (size_t i = 0; i < streams.size(); i++) { cudaStreamDestroy(streams[i]); } system("pause"); return 0; }